linux/arch/x86/include/asm/pgtable_types.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_DEFS_H
   2#define _ASM_X86_PGTABLE_DEFS_H
   3
   4#include <linux/const.h>
   5#include <asm/page_types.h>
   6
   7#define FIRST_USER_ADDRESS      0UL
   8
   9#define _PAGE_BIT_PRESENT       0       /* is present */
  10#define _PAGE_BIT_RW            1       /* writeable */
  11#define _PAGE_BIT_USER          2       /* userspace addressable */
  12#define _PAGE_BIT_PWT           3       /* page write through */
  13#define _PAGE_BIT_PCD           4       /* page cache disabled */
  14#define _PAGE_BIT_ACCESSED      5       /* was accessed (raised by CPU) */
  15#define _PAGE_BIT_DIRTY         6       /* was written to (raised by CPU) */
  16#define _PAGE_BIT_PSE           7       /* 4 MB (or 2MB) page */
  17#define _PAGE_BIT_PAT           7       /* on 4KB pages */
  18#define _PAGE_BIT_GLOBAL        8       /* Global TLB entry PPro+ */
  19#define _PAGE_BIT_SOFTW1        9       /* available for programmer */
  20#define _PAGE_BIT_SOFTW2        10      /* " */
  21#define _PAGE_BIT_SOFTW3        11      /* " */
  22#define _PAGE_BIT_PAT_LARGE     12      /* On 2MB or 1GB pages */
  23#define _PAGE_BIT_SOFTW4        58      /* available for programmer */
  24#define _PAGE_BIT_PKEY_BIT0     59      /* Protection Keys, bit 1/4 */
  25#define _PAGE_BIT_PKEY_BIT1     60      /* Protection Keys, bit 2/4 */
  26#define _PAGE_BIT_PKEY_BIT2     61      /* Protection Keys, bit 3/4 */
  27#define _PAGE_BIT_PKEY_BIT3     62      /* Protection Keys, bit 4/4 */
  28#define _PAGE_BIT_NX            63      /* No execute: only valid after cpuid check */
  29
  30#define _PAGE_BIT_SPECIAL       _PAGE_BIT_SOFTW1
  31#define _PAGE_BIT_CPA_TEST      _PAGE_BIT_SOFTW1
  32#define _PAGE_BIT_HIDDEN        _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
  33#define _PAGE_BIT_SOFT_DIRTY    _PAGE_BIT_SOFTW3 /* software dirty tracking */
  34#define _PAGE_BIT_DEVMAP        _PAGE_BIT_SOFTW4
  35
  36/* If _PAGE_BIT_PRESENT is clear, we use these: */
  37/* - if the user mapped it with PROT_NONE; pte_present gives true */
  38#define _PAGE_BIT_PROTNONE      _PAGE_BIT_GLOBAL
  39
  40#define _PAGE_PRESENT   (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
  41#define _PAGE_RW        (_AT(pteval_t, 1) << _PAGE_BIT_RW)
  42#define _PAGE_USER      (_AT(pteval_t, 1) << _PAGE_BIT_USER)
  43#define _PAGE_PWT       (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
  44#define _PAGE_PCD       (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
  45#define _PAGE_ACCESSED  (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
  46#define _PAGE_DIRTY     (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
  47#define _PAGE_PSE       (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
  48#define _PAGE_GLOBAL    (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
  49#define _PAGE_SOFTW1    (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
  50#define _PAGE_SOFTW2    (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
  51#define _PAGE_PAT       (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
  52#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
  53#define _PAGE_SPECIAL   (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
  54#define _PAGE_CPA_TEST  (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
  55#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  56#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
  57#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
  58#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
  59#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
  60#else
  61#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
  62#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
  63#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
  64#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
  65#endif
  66#define __HAVE_ARCH_PTE_SPECIAL
  67
  68#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
  69                         _PAGE_PKEY_BIT1 | \
  70                         _PAGE_PKEY_BIT2 | \
  71                         _PAGE_PKEY_BIT3)
  72
  73#ifdef CONFIG_KMEMCHECK
  74#define _PAGE_HIDDEN    (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
  75#else
  76#define _PAGE_HIDDEN    (_AT(pteval_t, 0))
  77#endif
  78
  79/*
  80 * The same hidden bit is used by kmemcheck, but since kmemcheck
  81 * works on kernel pages while soft-dirty engine on user space,
  82 * they do not conflict with each other.
  83 */
  84
  85#ifdef CONFIG_MEM_SOFT_DIRTY
  86#define _PAGE_SOFT_DIRTY        (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
  87#else
  88#define _PAGE_SOFT_DIRTY        (_AT(pteval_t, 0))
  89#endif
  90
  91/*
  92 * Tracking soft dirty bit when a page goes to a swap is tricky.
  93 * We need a bit which can be stored in pte _and_ not conflict
  94 * with swap entry format. On x86 bits 6 and 7 are *not* involved
  95 * into swap entry computation, but bit 6 is used for nonlinear
  96 * file mapping, so we borrow bit 7 for soft dirty tracking.
  97 *
  98 * Please note that this bit must be treated as swap dirty page
  99 * mark if and only if the PTE has present bit clear!
 100 */
 101#ifdef CONFIG_MEM_SOFT_DIRTY
 102#define _PAGE_SWP_SOFT_DIRTY    _PAGE_PSE
 103#else
 104#define _PAGE_SWP_SOFT_DIRTY    (_AT(pteval_t, 0))
 105#endif
 106
 107#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 108#define _PAGE_NX        (_AT(pteval_t, 1) << _PAGE_BIT_NX)
 109#define _PAGE_DEVMAP    (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
 110#define __HAVE_ARCH_PTE_DEVMAP
 111#else
 112#define _PAGE_NX        (_AT(pteval_t, 0))
 113#define _PAGE_DEVMAP    (_AT(pteval_t, 0))
 114#endif
 115
 116#define _PAGE_PROTNONE  (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 117
 118#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
 119                         _PAGE_ACCESSED | _PAGE_DIRTY)
 120#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
 121                         _PAGE_DIRTY)
 122
 123/*
 124 * Set of bits not changed in pte_modify.  The pte's
 125 * protection key is treated like _PAGE_RW, for
 126 * instance, and is *not* included in this mask since
 127 * pte_modify() does modify it.
 128 */
 129#define _PAGE_CHG_MASK  (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |         \
 130                         _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
 131                         _PAGE_SOFT_DIRTY)
 132#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 133
 134/*
 135 * The cache modes defined here are used to translate between pure SW usage
 136 * and the HW defined cache mode bits and/or PAT entries.
 137 *
 138 * The resulting bits for PWT, PCD and PAT should be chosen in a way
 139 * to have the WB mode at index 0 (all bits clear). This is the default
 140 * right now and likely would break too much if changed.
 141 */
 142#ifndef __ASSEMBLY__
 143enum page_cache_mode {
 144        _PAGE_CACHE_MODE_WB = 0,
 145        _PAGE_CACHE_MODE_WC = 1,
 146        _PAGE_CACHE_MODE_UC_MINUS = 2,
 147        _PAGE_CACHE_MODE_UC = 3,
 148        _PAGE_CACHE_MODE_WT = 4,
 149        _PAGE_CACHE_MODE_WP = 5,
 150        _PAGE_CACHE_MODE_NUM = 8
 151};
 152#endif
 153
 154#define _PAGE_CACHE_MASK        (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
 155#define _PAGE_NOCACHE           (cachemode2protval(_PAGE_CACHE_MODE_UC))
 156
 157#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 158#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
 159                                 _PAGE_ACCESSED | _PAGE_NX)
 160
 161#define PAGE_SHARED_EXEC        __pgprot(_PAGE_PRESENT | _PAGE_RW |     \
 162                                         _PAGE_USER | _PAGE_ACCESSED)
 163#define PAGE_COPY_NOEXEC        __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 164                                         _PAGE_ACCESSED | _PAGE_NX)
 165#define PAGE_COPY_EXEC          __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 166                                         _PAGE_ACCESSED)
 167#define PAGE_COPY               PAGE_COPY_NOEXEC
 168#define PAGE_READONLY           __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 169                                         _PAGE_ACCESSED | _PAGE_NX)
 170#define PAGE_READONLY_EXEC      __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
 171                                         _PAGE_ACCESSED)
 172
 173#define __PAGE_KERNEL_EXEC                                              \
 174        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
 175#define __PAGE_KERNEL           (__PAGE_KERNEL_EXEC | _PAGE_NX)
 176
 177#define __PAGE_KERNEL_RO                (__PAGE_KERNEL & ~_PAGE_RW)
 178#define __PAGE_KERNEL_RX                (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 179#define __PAGE_KERNEL_NOCACHE           (__PAGE_KERNEL | _PAGE_NOCACHE)
 180#define __PAGE_KERNEL_VSYSCALL          (__PAGE_KERNEL_RX | _PAGE_USER)
 181#define __PAGE_KERNEL_VVAR              (__PAGE_KERNEL_RO | _PAGE_USER)
 182#define __PAGE_KERNEL_LARGE             (__PAGE_KERNEL | _PAGE_PSE)
 183#define __PAGE_KERNEL_LARGE_EXEC        (__PAGE_KERNEL_EXEC | _PAGE_PSE)
 184
 185#define __PAGE_KERNEL_IO                (__PAGE_KERNEL)
 186#define __PAGE_KERNEL_IO_NOCACHE        (__PAGE_KERNEL_NOCACHE)
 187
 188#define PAGE_KERNEL                     __pgprot(__PAGE_KERNEL)
 189#define PAGE_KERNEL_RO                  __pgprot(__PAGE_KERNEL_RO)
 190#define PAGE_KERNEL_EXEC                __pgprot(__PAGE_KERNEL_EXEC)
 191#define PAGE_KERNEL_RX                  __pgprot(__PAGE_KERNEL_RX)
 192#define PAGE_KERNEL_NOCACHE             __pgprot(__PAGE_KERNEL_NOCACHE)
 193#define PAGE_KERNEL_LARGE               __pgprot(__PAGE_KERNEL_LARGE)
 194#define PAGE_KERNEL_LARGE_EXEC          __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 195#define PAGE_KERNEL_VSYSCALL            __pgprot(__PAGE_KERNEL_VSYSCALL)
 196#define PAGE_KERNEL_VVAR                __pgprot(__PAGE_KERNEL_VVAR)
 197
 198#define PAGE_KERNEL_IO                  __pgprot(__PAGE_KERNEL_IO)
 199#define PAGE_KERNEL_IO_NOCACHE          __pgprot(__PAGE_KERNEL_IO_NOCACHE)
 200
 201/*         xwr */
 202#define __P000  PAGE_NONE
 203#define __P001  PAGE_READONLY
 204#define __P010  PAGE_COPY
 205#define __P011  PAGE_COPY
 206#define __P100  PAGE_READONLY_EXEC
 207#define __P101  PAGE_READONLY_EXEC
 208#define __P110  PAGE_COPY_EXEC
 209#define __P111  PAGE_COPY_EXEC
 210
 211#define __S000  PAGE_NONE
 212#define __S001  PAGE_READONLY
 213#define __S010  PAGE_SHARED
 214#define __S011  PAGE_SHARED
 215#define __S100  PAGE_READONLY_EXEC
 216#define __S101  PAGE_READONLY_EXEC
 217#define __S110  PAGE_SHARED_EXEC
 218#define __S111  PAGE_SHARED_EXEC
 219
 220/*
 221 * early identity mapping  pte attrib macros.
 222 */
 223#ifdef CONFIG_X86_64
 224#define __PAGE_KERNEL_IDENT_LARGE_EXEC  __PAGE_KERNEL_LARGE_EXEC
 225#else
 226#define PTE_IDENT_ATTR   0x003          /* PRESENT+RW */
 227#define PDE_IDENT_ATTR   0x063          /* PRESENT+RW+DIRTY+ACCESSED */
 228#define PGD_IDENT_ATTR   0x001          /* PRESENT (no other attributes) */
 229#endif
 230
 231#ifdef CONFIG_X86_32
 232# include <asm/pgtable_32_types.h>
 233#else
 234# include <asm/pgtable_64_types.h>
 235#endif
 236
 237#ifndef __ASSEMBLY__
 238
 239#include <linux/types.h>
 240
 241/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
 242#define PTE_PFN_MASK            ((pteval_t)PHYSICAL_PAGE_MASK)
 243
 244/*
 245 *  Extracts the flags from a (pte|pmd|pud|pgd)val_t
 246 *  This includes the protection key value.
 247 */
 248#define PTE_FLAGS_MASK          (~PTE_PFN_MASK)
 249
 250typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
 251
 252typedef struct { pgdval_t pgd; } pgd_t;
 253
 254static inline pgd_t native_make_pgd(pgdval_t val)
 255{
 256        return (pgd_t) { val };
 257}
 258
 259static inline pgdval_t native_pgd_val(pgd_t pgd)
 260{
 261        return pgd.pgd;
 262}
 263
 264static inline pgdval_t pgd_flags(pgd_t pgd)
 265{
 266        return native_pgd_val(pgd) & PTE_FLAGS_MASK;
 267}
 268
 269#if CONFIG_PGTABLE_LEVELS > 3
 270typedef struct { pudval_t pud; } pud_t;
 271
 272static inline pud_t native_make_pud(pmdval_t val)
 273{
 274        return (pud_t) { val };
 275}
 276
 277static inline pudval_t native_pud_val(pud_t pud)
 278{
 279        return pud.pud;
 280}
 281#else
 282#include <asm-generic/pgtable-nopud.h>
 283
 284static inline pudval_t native_pud_val(pud_t pud)
 285{
 286        return native_pgd_val(pud.pgd);
 287}
 288#endif
 289
 290#if CONFIG_PGTABLE_LEVELS > 2
 291typedef struct { pmdval_t pmd; } pmd_t;
 292
 293static inline pmd_t native_make_pmd(pmdval_t val)
 294{
 295        return (pmd_t) { val };
 296}
 297
 298static inline pmdval_t native_pmd_val(pmd_t pmd)
 299{
 300        return pmd.pmd;
 301}
 302#else
 303#include <asm-generic/pgtable-nopmd.h>
 304
 305static inline pmdval_t native_pmd_val(pmd_t pmd)
 306{
 307        return native_pgd_val(pmd.pud.pgd);
 308}
 309#endif
 310
 311static inline pudval_t pud_pfn_mask(pud_t pud)
 312{
 313        if (native_pud_val(pud) & _PAGE_PSE)
 314                return PHYSICAL_PUD_PAGE_MASK;
 315        else
 316                return PTE_PFN_MASK;
 317}
 318
 319static inline pudval_t pud_flags_mask(pud_t pud)
 320{
 321        return ~pud_pfn_mask(pud);
 322}
 323
 324static inline pudval_t pud_flags(pud_t pud)
 325{
 326        return native_pud_val(pud) & pud_flags_mask(pud);
 327}
 328
 329static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
 330{
 331        if (native_pmd_val(pmd) & _PAGE_PSE)
 332                return PHYSICAL_PMD_PAGE_MASK;
 333        else
 334                return PTE_PFN_MASK;
 335}
 336
 337static inline pmdval_t pmd_flags_mask(pmd_t pmd)
 338{
 339        return ~pmd_pfn_mask(pmd);
 340}
 341
 342static inline pmdval_t pmd_flags(pmd_t pmd)
 343{
 344        return native_pmd_val(pmd) & pmd_flags_mask(pmd);
 345}
 346
 347static inline pte_t native_make_pte(pteval_t val)
 348{
 349        return (pte_t) { .pte = val };
 350}
 351
 352static inline pteval_t native_pte_val(pte_t pte)
 353{
 354        return pte.pte;
 355}
 356
 357static inline pteval_t pte_flags(pte_t pte)
 358{
 359        return native_pte_val(pte) & PTE_FLAGS_MASK;
 360}
 361
 362#define pgprot_val(x)   ((x).pgprot)
 363#define __pgprot(x)     ((pgprot_t) { (x) } )
 364
 365extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
 366extern uint8_t __pte2cachemode_tbl[8];
 367
 368#define __pte2cm_idx(cb)                                \
 369        ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) |          \
 370         (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) |          \
 371         (((cb) >> _PAGE_BIT_PWT) & 1))
 372#define __cm_idx2pte(i)                                 \
 373        ((((i) & 4) << (_PAGE_BIT_PAT - 2)) |           \
 374         (((i) & 2) << (_PAGE_BIT_PCD - 1)) |           \
 375         (((i) & 1) << _PAGE_BIT_PWT))
 376
 377static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
 378{
 379        if (likely(pcm == 0))
 380                return 0;
 381        return __cachemode2pte_tbl[pcm];
 382}
 383static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
 384{
 385        return __pgprot(cachemode2protval(pcm));
 386}
 387static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
 388{
 389        unsigned long masked;
 390
 391        masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
 392        if (likely(masked == 0))
 393                return 0;
 394        return __pte2cachemode_tbl[__pte2cm_idx(masked)];
 395}
 396static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 397{
 398        pgprotval_t val = pgprot_val(pgprot);
 399        pgprot_t new;
 400
 401        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 402                ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 403        return new;
 404}
 405static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 406{
 407        pgprotval_t val = pgprot_val(pgprot);
 408        pgprot_t new;
 409
 410        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 411                          ((val & _PAGE_PAT_LARGE) >>
 412                           (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 413        return new;
 414}
 415
 416
 417typedef struct page *pgtable_t;
 418
 419extern pteval_t __supported_pte_mask;
 420extern void set_nx(void);
 421extern int nx_enabled;
 422
 423#define pgprot_writecombine     pgprot_writecombine
 424extern pgprot_t pgprot_writecombine(pgprot_t prot);
 425
 426#define pgprot_writethrough     pgprot_writethrough
 427extern pgprot_t pgprot_writethrough(pgprot_t prot);
 428
 429/* Indicate that x86 has its own track and untrack pfn vma functions */
 430#define __HAVE_PFNMAP_TRACKING
 431
 432#define __HAVE_PHYS_MEM_ACCESS_PROT
 433struct file;
 434pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 435                              unsigned long size, pgprot_t vma_prot);
 436int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 437                              unsigned long size, pgprot_t *vma_prot);
 438
 439/* Install a pte for a particular vaddr in kernel space. */
 440void set_pte_vaddr(unsigned long vaddr, pte_t pte);
 441
 442#ifdef CONFIG_X86_32
 443extern void native_pagetable_init(void);
 444#else
 445#define native_pagetable_init        paging_init
 446#endif
 447
 448struct seq_file;
 449extern void arch_report_meminfo(struct seq_file *m);
 450
 451enum pg_level {
 452        PG_LEVEL_NONE,
 453        PG_LEVEL_4K,
 454        PG_LEVEL_2M,
 455        PG_LEVEL_1G,
 456        PG_LEVEL_NUM
 457};
 458
 459#ifdef CONFIG_PROC_FS
 460extern void update_page_count(int level, unsigned long pages);
 461#else
 462static inline void update_page_count(int level, unsigned long pages) { }
 463#endif
 464
 465/*
 466 * Helper function that returns the kernel pagetable entry controlling
 467 * the virtual address 'address'. NULL means no pagetable entry present.
 468 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
 469 * as a pte too.
 470 */
 471extern pte_t *lookup_address(unsigned long address, unsigned int *level);
 472extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
 473                                    unsigned int *level);
 474extern pmd_t *lookup_pmd_address(unsigned long address);
 475extern phys_addr_t slow_virt_to_phys(void *__address);
 476extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
 477                                   unsigned numpages, unsigned long page_flags);
 478void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
 479                               unsigned numpages);
 480#endif  /* !__ASSEMBLY__ */
 481
 482#endif /* _ASM_X86_PGTABLE_DEFS_H */
 483