linux/arch/um/include/asm/pgtable.h
<<
>>
Prefs
   1/* 
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Copyright 2003 PathScale, Inc.
   4 * Derived from include/asm-i386/pgtable.h
   5 * Licensed under the GPL
   6 */
   7
   8#ifndef __UM_PGTABLE_H
   9#define __UM_PGTABLE_H
  10
  11#include <asm/fixmap.h>
  12
  13#define _PAGE_PRESENT   0x001
  14#define _PAGE_NEWPAGE   0x002
  15#define _PAGE_NEWPROT   0x004
  16#define _PAGE_RW        0x020
  17#define _PAGE_USER      0x040
  18#define _PAGE_ACCESSED  0x080
  19#define _PAGE_DIRTY     0x100
  20/* If _PAGE_PRESENT is clear, we use these: */
  21#define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
  22                                   pte_present gives true */
  23
  24#ifdef CONFIG_3_LEVEL_PGTABLES
  25#include <asm/pgtable-3level.h>
  26#else
  27#include <asm/pgtable-2level.h>
  28#endif
  29
  30extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  31
  32/* zero page used for uninitialized stuff */
  33extern unsigned long *empty_zero_page;
  34
  35#define pgtable_cache_init() do ; while (0)
  36
  37/* Just any arbitrary offset to the start of the vmalloc VM area: the
  38 * current 8MB value just means that there will be a 8MB "hole" after the
  39 * physical memory until the kernel virtual memory starts.  That means that
  40 * any out-of-bounds memory accesses will hopefully be caught.
  41 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  42 * area for the same reason. ;)
  43 */
  44
  45extern unsigned long end_iomem;
  46
  47#define VMALLOC_OFFSET  (__va_space)
  48#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  49#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  50#define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
  51#define MODULES_VADDR   VMALLOC_START
  52#define MODULES_END     VMALLOC_END
  53#define MODULES_LEN     (MODULES_VADDR - MODULES_END)
  54
  55#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  56#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  57#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  58#define __PAGE_KERNEL_EXEC                                              \
  59         (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  60#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  61#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  62#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  63#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  64#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  65#define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
  66
  67/*
  68 * The i386 can't do page protection for execute, and considers that the same
  69 * are read.
  70 * Also, write permissions imply read permissions. This is the closest we can
  71 * get..
  72 */
  73#define __P000  PAGE_NONE
  74#define __P001  PAGE_READONLY
  75#define __P010  PAGE_COPY
  76#define __P011  PAGE_COPY
  77#define __P100  PAGE_READONLY
  78#define __P101  PAGE_READONLY
  79#define __P110  PAGE_COPY
  80#define __P111  PAGE_COPY
  81
  82#define __S000  PAGE_NONE
  83#define __S001  PAGE_READONLY
  84#define __S010  PAGE_SHARED
  85#define __S011  PAGE_SHARED
  86#define __S100  PAGE_READONLY
  87#define __S101  PAGE_READONLY
  88#define __S110  PAGE_SHARED
  89#define __S111  PAGE_SHARED
  90
  91/*
  92 * ZERO_PAGE is a global shared page that is always zero: used
  93 * for zero-mapped memory areas etc..
  94 */
  95#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  96
  97#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  98
  99#define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
 100#define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 101
 102#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
 103#define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
 104
 105#define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
 106#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
 107
 108#define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
 109#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
 110
 111#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
 112
 113#define pte_page(x) pfn_to_page(pte_pfn(x))
 114
 115#define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
 116
 117/*
 118 * =================================
 119 * Flags checking section.
 120 * =================================
 121 */
 122
 123static inline int pte_none(pte_t pte)
 124{
 125        return pte_is_zero(pte);
 126}
 127
 128/*
 129 * The following only work if pte_present() is true.
 130 * Undefined behaviour if not..
 131 */
 132static inline int pte_read(pte_t pte)
 133{ 
 134        return((pte_get_bits(pte, _PAGE_USER)) &&
 135               !(pte_get_bits(pte, _PAGE_PROTNONE)));
 136}
 137
 138static inline int pte_exec(pte_t pte){
 139        return((pte_get_bits(pte, _PAGE_USER)) &&
 140               !(pte_get_bits(pte, _PAGE_PROTNONE)));
 141}
 142
 143static inline int pte_write(pte_t pte)
 144{
 145        return((pte_get_bits(pte, _PAGE_RW)) &&
 146               !(pte_get_bits(pte, _PAGE_PROTNONE)));
 147}
 148
 149static inline int pte_dirty(pte_t pte)
 150{
 151        return pte_get_bits(pte, _PAGE_DIRTY);
 152}
 153
 154static inline int pte_young(pte_t pte)
 155{
 156        return pte_get_bits(pte, _PAGE_ACCESSED);
 157}
 158
 159static inline int pte_newpage(pte_t pte)
 160{
 161        return pte_get_bits(pte, _PAGE_NEWPAGE);
 162}
 163
 164static inline int pte_newprot(pte_t pte)
 165{ 
 166        return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
 167}
 168
 169static inline int pte_special(pte_t pte)
 170{
 171        return 0;
 172}
 173
 174/*
 175 * =================================
 176 * Flags setting section.
 177 * =================================
 178 */
 179
 180static inline pte_t pte_mknewprot(pte_t pte)
 181{
 182        pte_set_bits(pte, _PAGE_NEWPROT);
 183        return(pte);
 184}
 185
 186static inline pte_t pte_mkclean(pte_t pte)
 187{
 188        pte_clear_bits(pte, _PAGE_DIRTY);
 189        return(pte);
 190}
 191
 192static inline pte_t pte_mkold(pte_t pte)        
 193{ 
 194        pte_clear_bits(pte, _PAGE_ACCESSED);
 195        return(pte);
 196}
 197
 198static inline pte_t pte_wrprotect(pte_t pte)
 199{ 
 200        pte_clear_bits(pte, _PAGE_RW);
 201        return(pte_mknewprot(pte)); 
 202}
 203
 204static inline pte_t pte_mkread(pte_t pte)
 205{ 
 206        pte_set_bits(pte, _PAGE_USER);
 207        return(pte_mknewprot(pte)); 
 208}
 209
 210static inline pte_t pte_mkdirty(pte_t pte)
 211{ 
 212        pte_set_bits(pte, _PAGE_DIRTY);
 213        return(pte);
 214}
 215
 216static inline pte_t pte_mkyoung(pte_t pte)
 217{
 218        pte_set_bits(pte, _PAGE_ACCESSED);
 219        return(pte);
 220}
 221
 222static inline pte_t pte_mkwrite(pte_t pte)      
 223{
 224        pte_set_bits(pte, _PAGE_RW);
 225        return(pte_mknewprot(pte)); 
 226}
 227
 228static inline pte_t pte_mkuptodate(pte_t pte)   
 229{
 230        pte_clear_bits(pte, _PAGE_NEWPAGE);
 231        if(pte_present(pte))
 232                pte_clear_bits(pte, _PAGE_NEWPROT);
 233        return(pte); 
 234}
 235
 236static inline pte_t pte_mknewpage(pte_t pte)
 237{
 238        pte_set_bits(pte, _PAGE_NEWPAGE);
 239        return(pte);
 240}
 241
 242static inline pte_t pte_mkspecial(pte_t pte)
 243{
 244        return(pte);
 245}
 246
 247static inline void set_pte(pte_t *pteptr, pte_t pteval)
 248{
 249        pte_copy(*pteptr, pteval);
 250
 251        /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
 252         * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
 253         * mapped pages.
 254         */
 255
 256        *pteptr = pte_mknewpage(*pteptr);
 257        if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
 258}
 259#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 260
 261#define __HAVE_ARCH_PTE_SAME
 262static inline int pte_same(pte_t pte_a, pte_t pte_b)
 263{
 264        return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
 265}
 266
 267/*
 268 * Conversion functions: convert a page and protection to a page entry,
 269 * and a page entry and page directory to the page they refer to.
 270 */
 271
 272#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
 273#define __virt_to_page(virt) phys_to_page(__pa(virt))
 274#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
 275#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
 276
 277#define mk_pte(page, pgprot) \
 278        ({ pte_t pte;                                   \
 279                                                        \
 280        pte_set_val(pte, page_to_phys(page), (pgprot)); \
 281        if (pte_present(pte))                           \
 282                pte_mknewprot(pte_mknewpage(pte));      \
 283        pte;})
 284
 285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 286{
 287        pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
 288        return pte; 
 289}
 290
 291/*
 292 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 293 *
 294 * this macro returns the index of the entry in the pgd page which would
 295 * control the given virtual address
 296 */
 297#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 298
 299/*
 300 * pgd_offset() returns a (pgd_t *)
 301 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 302 */
 303#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 304
 305/*
 306 * a shortcut which implies the use of the kernel's pgd, instead
 307 * of a process's
 308 */
 309#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 310
 311/*
 312 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 313 *
 314 * this macro returns the index of the entry in the pmd page which would
 315 * control the given virtual address
 316 */
 317#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 318#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 319
 320#define pmd_page_vaddr(pmd) \
 321        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 322
 323/*
 324 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 325 *
 326 * this macro returns the index of the entry in the pte page which would
 327 * control the given virtual address
 328 */
 329#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 330#define pte_offset_kernel(dir, address) \
 331        ((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
 332#define pte_offset_map(dir, address) \
 333        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
 334#define pte_unmap(pte) do { } while (0)
 335
 336struct mm_struct;
 337extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
 338
 339#define update_mmu_cache(vma,address,ptep) do ; while (0)
 340
 341/* Encode and de-code a swap entry */
 342#define __swp_type(x)                   (((x).val >> 5) & 0x1f)
 343#define __swp_offset(x)                 ((x).val >> 11)
 344
 345#define __swp_entry(type, offset) \
 346        ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
 347#define __pte_to_swp_entry(pte) \
 348        ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
 349#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 350
 351#define kern_addr_valid(addr) (1)
 352
 353#include <asm-generic/pgtable.h>
 354
 355/* Clear a kernel PTE and flush it from the TLB */
 356#define kpte_clear_flush(ptep, vaddr)           \
 357do {                                            \
 358        pte_clear(&init_mm, (vaddr), (ptep));   \
 359        __flush_tlb_one((vaddr));               \
 360} while (0)
 361
 362#endif
 363