linux/arch/x86/include/asm/pgtable_types.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_DEFS_H
   2#define _ASM_X86_PGTABLE_DEFS_H
   3
   4#include <linux/const.h>
   5#include <asm/page_types.h>
   6
   7#define FIRST_USER_ADDRESS      0UL
   8
   9#define _PAGE_BIT_PRESENT       0       /* is present */
  10#define _PAGE_BIT_RW            1       /* writeable */
  11#define _PAGE_BIT_USER          2       /* userspace addressable */
  12#define _PAGE_BIT_PWT           3       /* page write through */
  13#define _PAGE_BIT_PCD           4       /* page cache disabled */
  14#define _PAGE_BIT_ACCESSED      5       /* was accessed (raised by CPU) */
  15#define _PAGE_BIT_DIRTY         6       /* was written to (raised by CPU) */
  16#define _PAGE_BIT_PSE           7       /* 4 MB (or 2MB) page */
  17#define _PAGE_BIT_PAT           7       /* on 4KB pages */
  18#define _PAGE_BIT_GLOBAL        8       /* Global TLB entry PPro+ */
  19#define _PAGE_BIT_SOFTW1        9       /* available for programmer */
  20#define _PAGE_BIT_SOFTW2        10      /* " */
  21#define _PAGE_BIT_SOFTW3        11      /* " */
  22#define _PAGE_BIT_PAT_LARGE     12      /* On 2MB or 1GB pages */
  23#define _PAGE_BIT_SPECIAL       _PAGE_BIT_SOFTW1
  24#define _PAGE_BIT_CPA_TEST      _PAGE_BIT_SOFTW1
  25#define _PAGE_BIT_SPLITTING     _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
  26#define _PAGE_BIT_HIDDEN        _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
  27#define _PAGE_BIT_SOFT_DIRTY    _PAGE_BIT_SOFTW3 /* software dirty tracking */
  28#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
  29
  30/* If _PAGE_BIT_PRESENT is clear, we use these: */
  31/* - if the user mapped it with PROT_NONE; pte_present gives true */
  32#define _PAGE_BIT_PROTNONE      _PAGE_BIT_GLOBAL
  33
  34#define _PAGE_PRESENT   (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
  35#define _PAGE_RW        (_AT(pteval_t, 1) << _PAGE_BIT_RW)
  36#define _PAGE_USER      (_AT(pteval_t, 1) << _PAGE_BIT_USER)
  37#define _PAGE_PWT       (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
  38#define _PAGE_PCD       (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
  39#define _PAGE_ACCESSED  (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
  40#define _PAGE_DIRTY     (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
  41#define _PAGE_PSE       (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
  42#define _PAGE_GLOBAL    (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
  43#define _PAGE_SOFTW1    (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
  44#define _PAGE_SOFTW2    (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
  45#define _PAGE_PAT       (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
  46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
  47#define _PAGE_SPECIAL   (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
  48#define _PAGE_CPA_TEST  (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
  49#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
  50#define __HAVE_ARCH_PTE_SPECIAL
  51
  52#ifdef CONFIG_KMEMCHECK
  53#define _PAGE_HIDDEN    (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
  54#else
  55#define _PAGE_HIDDEN    (_AT(pteval_t, 0))
  56#endif
  57
  58/*
  59 * The same hidden bit is used by kmemcheck, but since kmemcheck
  60 * works on kernel pages while soft-dirty engine on user space,
  61 * they do not conflict with each other.
  62 */
  63
  64#ifdef CONFIG_MEM_SOFT_DIRTY
  65#define _PAGE_SOFT_DIRTY        (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
  66#else
  67#define _PAGE_SOFT_DIRTY        (_AT(pteval_t, 0))
  68#endif
  69
  70/*
  71 * Tracking soft dirty bit when a page goes to a swap is tricky.
  72 * We need a bit which can be stored in pte _and_ not conflict
  73 * with swap entry format. On x86 bits 6 and 7 are *not* involved
  74 * into swap entry computation, but bit 6 is used for nonlinear
  75 * file mapping, so we borrow bit 7 for soft dirty tracking.
  76 *
  77 * Please note that this bit must be treated as swap dirty page
  78 * mark if and only if the PTE has present bit clear!
  79 */
  80#ifdef CONFIG_MEM_SOFT_DIRTY
  81#define _PAGE_SWP_SOFT_DIRTY    _PAGE_PSE
  82#else
  83#define _PAGE_SWP_SOFT_DIRTY    (_AT(pteval_t, 0))
  84#endif
  85
  86#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  87#define _PAGE_NX        (_AT(pteval_t, 1) << _PAGE_BIT_NX)
  88#else
  89#define _PAGE_NX        (_AT(pteval_t, 0))
  90#endif
  91
  92#define _PAGE_PROTNONE  (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
  93
  94#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
  95                         _PAGE_ACCESSED | _PAGE_DIRTY)
  96#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
  97                         _PAGE_DIRTY)
  98
  99/* Set of bits not changed in pte_modify */
 100#define _PAGE_CHG_MASK  (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |         \
 101                         _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
 102                         _PAGE_SOFT_DIRTY)
 103#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 104
 105/*
 106 * The cache modes defined here are used to translate between pure SW usage
 107 * and the HW defined cache mode bits and/or PAT entries.
 108 *
 109 * The resulting bits for PWT, PCD and PAT should be chosen in a way
 110 * to have the WB mode at index 0 (all bits clear). This is the default
 111 * right now and likely would break too much if changed.
 112 */
 113#ifndef __ASSEMBLY__
 114enum page_cache_mode {
 115        _PAGE_CACHE_MODE_WB = 0,
 116        _PAGE_CACHE_MODE_WC = 1,
 117        _PAGE_CACHE_MODE_UC_MINUS = 2,
 118        _PAGE_CACHE_MODE_UC = 3,
 119        _PAGE_CACHE_MODE_WT = 4,
 120        _PAGE_CACHE_MODE_WP = 5,
 121        _PAGE_CACHE_MODE_NUM = 8
 122};
 123#endif
 124
 125#define _PAGE_CACHE_MASK        (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
 126#define _PAGE_NOCACHE           (cachemode2protval(_PAGE_CACHE_MODE_UC))
 127
 128#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 129#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
 130                                 _PAGE_ACCESSED | _PAGE_NX)
 131
 132#define PAGE_SHARED_EXEC        __pgprot(_PAGE_PRESENT | _PAGE_RW |     \
 133                                         _PAGE_USER | _PAGE_ACCESSED)
 134#define PAGE_COPY_NOEXEC        __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 135                                         _PAGE_ACCESSED | _PAGE_NX)
 136#define PAGE_COPY_EXEC          __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 137                                         _PAGE_ACCESSED)
 138#define PAGE_COPY               PAGE_COPY_NOEXEC
 139#define PAGE_READONLY           __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 140                                         _PAGE_ACCESSED | _PAGE_NX)
 141#define PAGE_READONLY_EXEC      __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 142                                         _PAGE_ACCESSED)
 143
 144#define __PAGE_KERNEL_EXEC                                              \
 145        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
 146#define __PAGE_KERNEL           (__PAGE_KERNEL_EXEC | _PAGE_NX)
 147
 148#define __PAGE_KERNEL_RO                (__PAGE_KERNEL & ~_PAGE_RW)
 149#define __PAGE_KERNEL_RX                (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 150#define __PAGE_KERNEL_NOCACHE           (__PAGE_KERNEL | _PAGE_NOCACHE)
 151#define __PAGE_KERNEL_VSYSCALL          (__PAGE_KERNEL_RX | _PAGE_USER)
 152#define __PAGE_KERNEL_VVAR              (__PAGE_KERNEL_RO | _PAGE_USER)
 153#define __PAGE_KERNEL_LARGE             (__PAGE_KERNEL | _PAGE_PSE)
 154#define __PAGE_KERNEL_LARGE_EXEC        (__PAGE_KERNEL_EXEC | _PAGE_PSE)
 155
 156#define __PAGE_KERNEL_IO                (__PAGE_KERNEL)
 157#define __PAGE_KERNEL_IO_NOCACHE        (__PAGE_KERNEL_NOCACHE)
 158
 159#define PAGE_KERNEL                     __pgprot(__PAGE_KERNEL)
 160#define PAGE_KERNEL_RO                  __pgprot(__PAGE_KERNEL_RO)
 161#define PAGE_KERNEL_EXEC                __pgprot(__PAGE_KERNEL_EXEC)
 162#define PAGE_KERNEL_RX                  __pgprot(__PAGE_KERNEL_RX)
 163#define PAGE_KERNEL_NOCACHE             __pgprot(__PAGE_KERNEL_NOCACHE)
 164#define PAGE_KERNEL_LARGE               __pgprot(__PAGE_KERNEL_LARGE)
 165#define PAGE_KERNEL_LARGE_EXEC          __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 166#define PAGE_KERNEL_VSYSCALL            __pgprot(__PAGE_KERNEL_VSYSCALL)
 167#define PAGE_KERNEL_VVAR                __pgprot(__PAGE_KERNEL_VVAR)
 168
 169#define PAGE_KERNEL_IO                  __pgprot(__PAGE_KERNEL_IO)
 170#define PAGE_KERNEL_IO_NOCACHE          __pgprot(__PAGE_KERNEL_IO_NOCACHE)
 171
 172/*         xwr */
 173#define __P000  PAGE_NONE
 174#define __P001  PAGE_READONLY
 175#define __P010  PAGE_COPY
 176#define __P011  PAGE_COPY
 177#define __P100  PAGE_READONLY_EXEC
 178#define __P101  PAGE_READONLY_EXEC
 179#define __P110  PAGE_COPY_EXEC
 180#define __P111  PAGE_COPY_EXEC
 181
 182#define __S000  PAGE_NONE
 183#define __S001  PAGE_READONLY
 184#define __S010  PAGE_SHARED
 185#define __S011  PAGE_SHARED
 186#define __S100  PAGE_READONLY_EXEC
 187#define __S101  PAGE_READONLY_EXEC
 188#define __S110  PAGE_SHARED_EXEC
 189#define __S111  PAGE_SHARED_EXEC
 190
 191/*
 192 * early identity mapping  pte attrib macros.
 193 */
 194#ifdef CONFIG_X86_64
 195#define __PAGE_KERNEL_IDENT_LARGE_EXEC  __PAGE_KERNEL_LARGE_EXEC
 196#else
 197#define PTE_IDENT_ATTR   0x003          /* PRESENT+RW */
 198#define PDE_IDENT_ATTR   0x063          /* PRESENT+RW+DIRTY+ACCESSED */
 199#define PGD_IDENT_ATTR   0x001          /* PRESENT (no other attributes) */
 200#endif
 201
 202#ifdef CONFIG_X86_32
 203# include <asm/pgtable_32_types.h>
 204#else
 205# include <asm/pgtable_64_types.h>
 206#endif
 207
 208#ifndef __ASSEMBLY__
 209
 210#include <linux/types.h>
 211
 212/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
 213#define PTE_PFN_MASK            ((pteval_t)PHYSICAL_PAGE_MASK)
 214
 215/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
 216#define PTE_FLAGS_MASK          (~PTE_PFN_MASK)
 217
 218typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
 219
 220typedef struct { pgdval_t pgd; } pgd_t;
 221
 222static inline pgd_t native_make_pgd(pgdval_t val)
 223{
 224        return (pgd_t) { val };
 225}
 226
 227static inline pgdval_t native_pgd_val(pgd_t pgd)
 228{
 229        return pgd.pgd;
 230}
 231
 232static inline pgdval_t pgd_flags(pgd_t pgd)
 233{
 234        return native_pgd_val(pgd) & PTE_FLAGS_MASK;
 235}
 236
 237#if CONFIG_PGTABLE_LEVELS > 3
 238typedef struct { pudval_t pud; } pud_t;
 239
 240static inline pud_t native_make_pud(pmdval_t val)
 241{
 242        return (pud_t) { val };
 243}
 244
 245static inline pudval_t native_pud_val(pud_t pud)
 246{
 247        return pud.pud;
 248}
 249#else
 250#include <asm-generic/pgtable-nopud.h>
 251
 252static inline pudval_t native_pud_val(pud_t pud)
 253{
 254        return native_pgd_val(pud.pgd);
 255}
 256#endif
 257
 258#if CONFIG_PGTABLE_LEVELS > 2
 259typedef struct { pmdval_t pmd; } pmd_t;
 260
 261static inline pmd_t native_make_pmd(pmdval_t val)
 262{
 263        return (pmd_t) { val };
 264}
 265
 266static inline pmdval_t native_pmd_val(pmd_t pmd)
 267{
 268        return pmd.pmd;
 269}
 270#else
 271#include <asm-generic/pgtable-nopmd.h>
 272
 273static inline pmdval_t native_pmd_val(pmd_t pmd)
 274{
 275        return native_pgd_val(pmd.pud.pgd);
 276}
 277#endif
 278
 279static inline pudval_t pud_pfn_mask(pud_t pud)
 280{
 281        if (native_pud_val(pud) & _PAGE_PSE)
 282                return PHYSICAL_PUD_PAGE_MASK;
 283        else
 284                return PTE_PFN_MASK;
 285}
 286
 287static inline pudval_t pud_flags_mask(pud_t pud)
 288{
 289        return ~pud_pfn_mask(pud);
 290}
 291
 292static inline pudval_t pud_flags(pud_t pud)
 293{
 294        return native_pud_val(pud) & pud_flags_mask(pud);
 295}
 296
 297static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
 298{
 299        if (native_pmd_val(pmd) & _PAGE_PSE)
 300                return PHYSICAL_PMD_PAGE_MASK;
 301        else
 302                return PTE_PFN_MASK;
 303}
 304
 305static inline pmdval_t pmd_flags_mask(pmd_t pmd)
 306{
 307        return ~pmd_pfn_mask(pmd);
 308}
 309
 310static inline pmdval_t pmd_flags(pmd_t pmd)
 311{
 312        return native_pmd_val(pmd) & pmd_flags_mask(pmd);
 313}
 314
 315static inline pte_t native_make_pte(pteval_t val)
 316{
 317        return (pte_t) { .pte = val };
 318}
 319
 320static inline pteval_t native_pte_val(pte_t pte)
 321{
 322        return pte.pte;
 323}
 324
 325static inline pteval_t pte_flags(pte_t pte)
 326{
 327        return native_pte_val(pte) & PTE_FLAGS_MASK;
 328}
 329
 330#define pgprot_val(x)   ((x).pgprot)
 331#define __pgprot(x)     ((pgprot_t) { (x) } )
 332
 333extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
 334extern uint8_t __pte2cachemode_tbl[8];
 335
 336#define __pte2cm_idx(cb)                                \
 337        ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) |          \
 338         (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) |          \
 339         (((cb) >> _PAGE_BIT_PWT) & 1))
 340#define __cm_idx2pte(i)                                 \
 341        ((((i) & 4) << (_PAGE_BIT_PAT - 2)) |           \
 342         (((i) & 2) << (_PAGE_BIT_PCD - 1)) |           \
 343         (((i) & 1) << _PAGE_BIT_PWT))
 344
 345static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
 346{
 347        if (likely(pcm == 0))
 348                return 0;
 349        return __cachemode2pte_tbl[pcm];
 350}
 351static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
 352{
 353        return __pgprot(cachemode2protval(pcm));
 354}
 355static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
 356{
 357        unsigned long masked;
 358
 359        masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
 360        if (likely(masked == 0))
 361                return 0;
 362        return __pte2cachemode_tbl[__pte2cm_idx(masked)];
 363}
 364static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 365{
 366        pgprot_t new;
 367        unsigned long val;
 368
 369        val = pgprot_val(pgprot);
 370        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 371                ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 372        return new;
 373}
 374static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 375{
 376        pgprot_t new;
 377        unsigned long val;
 378
 379        val = pgprot_val(pgprot);
 380        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 381                          ((val & _PAGE_PAT_LARGE) >>
 382                           (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 383        return new;
 384}
 385
 386
 387typedef struct page *pgtable_t;
 388
 389extern pteval_t __supported_pte_mask;
 390extern void set_nx(void);
 391extern int nx_enabled;
 392
 393#define pgprot_writecombine     pgprot_writecombine
 394extern pgprot_t pgprot_writecombine(pgprot_t prot);
 395
 396#define pgprot_writethrough     pgprot_writethrough
 397extern pgprot_t pgprot_writethrough(pgprot_t prot);
 398
 399/* Indicate that x86 has its own track and untrack pfn vma functions */
 400#define __HAVE_PFNMAP_TRACKING
 401
 402#define __HAVE_PHYS_MEM_ACCESS_PROT
 403struct file;
 404pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 405                              unsigned long size, pgprot_t vma_prot);
 406int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 407                              unsigned long size, pgprot_t *vma_prot);
 408
 409/* Install a pte for a particular vaddr in kernel space. */
 410void set_pte_vaddr(unsigned long vaddr, pte_t pte);
 411
 412#ifdef CONFIG_X86_32
 413extern void native_pagetable_init(void);
 414#else
 415#define native_pagetable_init        paging_init
 416#endif
 417
 418struct seq_file;
 419extern void arch_report_meminfo(struct seq_file *m);
 420
 421enum pg_level {
 422        PG_LEVEL_NONE,
 423        PG_LEVEL_4K,
 424        PG_LEVEL_2M,
 425        PG_LEVEL_1G,
 426        PG_LEVEL_NUM
 427};
 428
 429#ifdef CONFIG_PROC_FS
 430extern void update_page_count(int level, unsigned long pages);
 431#else
 432static inline void update_page_count(int level, unsigned long pages) { }
 433#endif
 434
 435/*
 436 * Helper function that returns the kernel pagetable entry controlling
 437 * the virtual address 'address'. NULL means no pagetable entry present.
 438 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
 439 * as a pte too.
 440 */
 441extern pte_t *lookup_address(unsigned long address, unsigned int *level);
 442extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
 443                                    unsigned int *level);
 444extern pmd_t *lookup_pmd_address(unsigned long address);
 445extern phys_addr_t slow_virt_to_phys(void *__address);
 446extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
 447                                   unsigned numpages, unsigned long page_flags);
 448void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
 449                               unsigned numpages);
 450#endif  /* !__ASSEMBLY__ */
 451
 452#endif /* _ASM_X86_PGTABLE_DEFS_H */
 453