linux/arch/x86/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_H
   2#define _ASM_X86_PGTABLE_H
   3
   4#include <asm/page.h>
   5#include <asm/e820.h>
   6
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)                                  \
  13        ((boot_cpu_data.x86 > 3)                                \
  14         ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))  \
  15         : (prot))
  16
  17#ifndef __ASSEMBLY__
  18
  19#include <asm/x86_init.h>
  20
  21/*
  22 * ZERO_PAGE is a global shared page that is always zero: used
  23 * for zero-mapped memory areas etc..
  24 */
  25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  27
  28extern spinlock_t pgd_lock;
  29extern struct list_head pgd_list;
  30
  31extern struct mm_struct *pgd_page_get_mm(struct page *page);
  32
  33#ifdef CONFIG_PARAVIRT
  34#include <asm/paravirt.h>
  35#else  /* !CONFIG_PARAVIRT */
  36#define set_pte(ptep, pte)              native_set_pte(ptep, pte)
  37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  38#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  39
  40#define set_pte_atomic(ptep, pte)                                       \
  41        native_set_pte_atomic(ptep, pte)
  42
  43#define set_pmd(pmdp, pmd)              native_set_pmd(pmdp, pmd)
  44
  45#ifndef __PAGETABLE_PUD_FOLDED
  46#define set_pgd(pgdp, pgd)              native_set_pgd(pgdp, pgd)
  47#define pgd_clear(pgd)                  native_pgd_clear(pgd)
  48#endif
  49
  50#ifndef set_pud
  51# define set_pud(pudp, pud)             native_set_pud(pudp, pud)
  52#endif
  53
  54#ifndef __PAGETABLE_PMD_FOLDED
  55#define pud_clear(pud)                  native_pud_clear(pud)
  56#endif
  57
  58#define pte_clear(mm, addr, ptep)       native_pte_clear(mm, addr, ptep)
  59#define pmd_clear(pmd)                  native_pmd_clear(pmd)
  60
  61#define pte_update(mm, addr, ptep)              do { } while (0)
  62#define pte_update_defer(mm, addr, ptep)        do { } while (0)
  63#define pmd_update(mm, addr, ptep)              do { } while (0)
  64#define pmd_update_defer(mm, addr, ptep)        do { } while (0)
  65
  66#define pgd_val(x)      native_pgd_val(x)
  67#define __pgd(x)        native_make_pgd(x)
  68
  69#ifndef __PAGETABLE_PUD_FOLDED
  70#define pud_val(x)      native_pud_val(x)
  71#define __pud(x)        native_make_pud(x)
  72#endif
  73
  74#ifndef __PAGETABLE_PMD_FOLDED
  75#define pmd_val(x)      native_pmd_val(x)
  76#define __pmd(x)        native_make_pmd(x)
  77#endif
  78
  79#define pte_val(x)      native_pte_val(x)
  80#define __pte(x)        native_make_pte(x)
  81
  82#define arch_end_context_switch(prev)   do {} while(0)
  83
  84#endif  /* CONFIG_PARAVIRT */
  85
  86/*
  87 * The following only work if pte_present() is true.
  88 * Undefined behaviour if not..
  89 */
  90static inline int pte_dirty(pte_t pte)
  91{
  92        return pte_flags(pte) & _PAGE_DIRTY;
  93}
  94
  95static inline int pte_young(pte_t pte)
  96{
  97        return pte_flags(pte) & _PAGE_ACCESSED;
  98}
  99
 100static inline int pmd_young(pmd_t pmd)
 101{
 102        return pmd_flags(pmd) & _PAGE_ACCESSED;
 103}
 104
 105static inline int pte_write(pte_t pte)
 106{
 107        return pte_flags(pte) & _PAGE_RW;
 108}
 109
 110static inline int pte_file(pte_t pte)
 111{
 112        return pte_flags(pte) & _PAGE_FILE;
 113}
 114
 115static inline int pte_huge(pte_t pte)
 116{
 117        return pte_flags(pte) & _PAGE_PSE;
 118}
 119
 120static inline int pte_global(pte_t pte)
 121{
 122        return pte_flags(pte) & _PAGE_GLOBAL;
 123}
 124
 125static inline int pte_exec(pte_t pte)
 126{
 127        return !(pte_flags(pte) & _PAGE_NX);
 128}
 129
 130static inline int pte_special(pte_t pte)
 131{
 132        return pte_flags(pte) & _PAGE_SPECIAL;
 133}
 134
 135static inline unsigned long pte_pfn(pte_t pte)
 136{
 137        return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 138}
 139
 140static inline unsigned long pmd_pfn(pmd_t pmd)
 141{
 142        return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 143}
 144
 145static inline unsigned long pud_pfn(pud_t pud)
 146{
 147        return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
 148}
 149
 150#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 151
 152static inline int pmd_large(pmd_t pte)
 153{
 154        return pmd_flags(pte) & _PAGE_PSE;
 155}
 156
 157#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 158static inline int pmd_trans_splitting(pmd_t pmd)
 159{
 160        return pmd_val(pmd) & _PAGE_SPLITTING;
 161}
 162
 163static inline int pmd_trans_huge(pmd_t pmd)
 164{
 165        return pmd_val(pmd) & _PAGE_PSE;
 166}
 167
 168static inline int has_transparent_hugepage(void)
 169{
 170        return cpu_has_pse;
 171}
 172#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 173
 174static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 175{
 176        pteval_t v = native_pte_val(pte);
 177
 178        return native_make_pte(v | set);
 179}
 180
 181static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 182{
 183        pteval_t v = native_pte_val(pte);
 184
 185        return native_make_pte(v & ~clear);
 186}
 187
 188static inline pte_t pte_mkclean(pte_t pte)
 189{
 190        return pte_clear_flags(pte, _PAGE_DIRTY);
 191}
 192
 193static inline pte_t pte_mkold(pte_t pte)
 194{
 195        return pte_clear_flags(pte, _PAGE_ACCESSED);
 196}
 197
 198static inline pte_t pte_wrprotect(pte_t pte)
 199{
 200        return pte_clear_flags(pte, _PAGE_RW);
 201}
 202
 203static inline pte_t pte_mkexec(pte_t pte)
 204{
 205        return pte_clear_flags(pte, _PAGE_NX);
 206}
 207
 208static inline pte_t pte_mkdirty(pte_t pte)
 209{
 210        return pte_set_flags(pte, _PAGE_DIRTY);
 211}
 212
 213static inline pte_t pte_mkyoung(pte_t pte)
 214{
 215        return pte_set_flags(pte, _PAGE_ACCESSED);
 216}
 217
 218static inline pte_t pte_mkwrite(pte_t pte)
 219{
 220        return pte_set_flags(pte, _PAGE_RW);
 221}
 222
 223static inline pte_t pte_mkhuge(pte_t pte)
 224{
 225        return pte_set_flags(pte, _PAGE_PSE);
 226}
 227
 228static inline pte_t pte_clrhuge(pte_t pte)
 229{
 230        return pte_clear_flags(pte, _PAGE_PSE);
 231}
 232
 233static inline pte_t pte_mkglobal(pte_t pte)
 234{
 235        return pte_set_flags(pte, _PAGE_GLOBAL);
 236}
 237
 238static inline pte_t pte_clrglobal(pte_t pte)
 239{
 240        return pte_clear_flags(pte, _PAGE_GLOBAL);
 241}
 242
 243static inline pte_t pte_mkspecial(pte_t pte)
 244{
 245        return pte_set_flags(pte, _PAGE_SPECIAL);
 246}
 247
 248static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 249{
 250        pmdval_t v = native_pmd_val(pmd);
 251
 252        return __pmd(v | set);
 253}
 254
 255static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 256{
 257        pmdval_t v = native_pmd_val(pmd);
 258
 259        return __pmd(v & ~clear);
 260}
 261
 262static inline pmd_t pmd_mkold(pmd_t pmd)
 263{
 264        return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 265}
 266
 267static inline pmd_t pmd_wrprotect(pmd_t pmd)
 268{
 269        return pmd_clear_flags(pmd, _PAGE_RW);
 270}
 271
 272static inline pmd_t pmd_mkdirty(pmd_t pmd)
 273{
 274        return pmd_set_flags(pmd, _PAGE_DIRTY);
 275}
 276
 277static inline pmd_t pmd_mkhuge(pmd_t pmd)
 278{
 279        return pmd_set_flags(pmd, _PAGE_PSE);
 280}
 281
 282static inline pmd_t pmd_mkyoung(pmd_t pmd)
 283{
 284        return pmd_set_flags(pmd, _PAGE_ACCESSED);
 285}
 286
 287static inline pmd_t pmd_mkwrite(pmd_t pmd)
 288{
 289        return pmd_set_flags(pmd, _PAGE_RW);
 290}
 291
 292static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 293{
 294        return pmd_clear_flags(pmd, _PAGE_PRESENT);
 295}
 296
 297/*
 298 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 299 * can use those bits for other purposes, so leave them be.
 300 */
 301static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 302{
 303        pgprotval_t protval = pgprot_val(pgprot);
 304
 305        if (protval & _PAGE_PRESENT)
 306                protval &= __supported_pte_mask;
 307
 308        return protval;
 309}
 310
 311static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 312{
 313        return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
 314                     massage_pgprot(pgprot));
 315}
 316
 317static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 318{
 319        return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
 320                     massage_pgprot(pgprot));
 321}
 322
 323static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 324{
 325        pteval_t val = pte_val(pte);
 326
 327        /*
 328         * Chop off the NX bit (if present), and add the NX portion of
 329         * the newprot (if present):
 330         */
 331        val &= _PAGE_CHG_MASK;
 332        val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
 333
 334        return __pte(val);
 335}
 336
 337static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 338{
 339        pmdval_t val = pmd_val(pmd);
 340
 341        val &= _HPAGE_CHG_MASK;
 342        val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 343
 344        return __pmd(val);
 345}
 346
 347/* mprotect needs to preserve PAT bits when updating vm_page_prot */
 348#define pgprot_modify pgprot_modify
 349static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 350{
 351        pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 352        pgprotval_t addbits = pgprot_val(newprot);
 353        return __pgprot(preservebits | addbits);
 354}
 355
 356#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
 357
 358#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 359
 360static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 361                                         unsigned long flags,
 362                                         unsigned long new_flags)
 363{
 364        /*
 365         * PAT type is always WB for untracked ranges, so no need to check.
 366         */
 367        if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 368                return 1;
 369
 370        /*
 371         * Certain new memtypes are not allowed with certain
 372         * requested memtype:
 373         * - request is uncached, return cannot be write-back
 374         * - request is write-combine, return cannot be write-back
 375         */
 376        if ((flags == _PAGE_CACHE_UC_MINUS &&
 377             new_flags == _PAGE_CACHE_WB) ||
 378            (flags == _PAGE_CACHE_WC &&
 379             new_flags == _PAGE_CACHE_WB)) {
 380                return 0;
 381        }
 382
 383        return 1;
 384}
 385
 386pmd_t *populate_extra_pmd(unsigned long vaddr);
 387pte_t *populate_extra_pte(unsigned long vaddr);
 388#endif  /* __ASSEMBLY__ */
 389
 390#ifdef CONFIG_X86_32
 391# include <asm/pgtable_32.h>
 392#else
 393# include <asm/pgtable_64.h>
 394#endif
 395
 396#ifndef __ASSEMBLY__
 397#include <linux/mm_types.h>
 398
 399static inline int pte_none(pte_t pte)
 400{
 401        return !pte.pte;
 402}
 403
 404#define __HAVE_ARCH_PTE_SAME
 405static inline int pte_same(pte_t a, pte_t b)
 406{
 407        return a.pte == b.pte;
 408}
 409
 410static inline int pte_present(pte_t a)
 411{
 412        return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
 413                               _PAGE_NUMA);
 414}
 415
 416#define pte_accessible pte_accessible
 417static inline int pte_accessible(pte_t a)
 418{
 419        return pte_flags(a) & _PAGE_PRESENT;
 420}
 421
 422static inline int pte_hidden(pte_t pte)
 423{
 424        return pte_flags(pte) & _PAGE_HIDDEN;
 425}
 426
 427static inline int pmd_present(pmd_t pmd)
 428{
 429        /*
 430         * Checking for _PAGE_PSE is needed too because
 431         * split_huge_page will temporarily clear the present bit (but
 432         * the _PAGE_PSE flag will remain set at all times while the
 433         * _PAGE_PRESENT bit is clear).
 434         */
 435        return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
 436                                 _PAGE_NUMA);
 437}
 438
 439static inline int pmd_none(pmd_t pmd)
 440{
 441        /* Only check low word on 32-bit platforms, since it might be
 442           out of sync with upper half. */
 443        return (unsigned long)native_pmd_val(pmd) == 0;
 444}
 445
 446static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 447{
 448        return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
 449}
 450
 451/*
 452 * Currently stuck as a macro due to indirect forward reference to
 453 * linux/mmzone.h's __section_mem_map_addr() definition:
 454 */
 455#define pmd_page(pmd)   pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 456
 457/*
 458 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 459 *
 460 * this macro returns the index of the entry in the pmd page which would
 461 * control the given virtual address
 462 */
 463static inline unsigned long pmd_index(unsigned long address)
 464{
 465        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 466}
 467
 468/*
 469 * Conversion functions: convert a page and protection to a page entry,
 470 * and a page entry and page directory to the page they refer to.
 471 *
 472 * (Currently stuck as a macro because of indirect forward reference
 473 * to linux/mm.h:page_to_nid())
 474 */
 475#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 476
 477/*
 478 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 479 *
 480 * this function returns the index of the entry in the pte page which would
 481 * control the given virtual address
 482 */
 483static inline unsigned long pte_index(unsigned long address)
 484{
 485        return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 486}
 487
 488static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 489{
 490        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 491}
 492
 493static inline int pmd_bad(pmd_t pmd)
 494{
 495#ifdef CONFIG_NUMA_BALANCING
 496        /* pmd_numa check */
 497        if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
 498                return 0;
 499#endif
 500        return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 501}
 502
 503static inline unsigned long pages_to_mb(unsigned long npg)
 504{
 505        return npg >> (20 - PAGE_SHIFT);
 506}
 507
 508#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
 509        remap_pfn_range(vma, vaddr, pfn, size, prot)
 510
 511#if PAGETABLE_LEVELS > 2
 512static inline int pud_none(pud_t pud)
 513{
 514        return native_pud_val(pud) == 0;
 515}
 516
 517static inline int pud_present(pud_t pud)
 518{
 519        return pud_flags(pud) & _PAGE_PRESENT;
 520}
 521
 522static inline unsigned long pud_page_vaddr(pud_t pud)
 523{
 524        return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
 525}
 526
 527/*
 528 * Currently stuck as a macro due to indirect forward reference to
 529 * linux/mmzone.h's __section_mem_map_addr() definition:
 530 */
 531#define pud_page(pud)           pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
 532
 533/* Find an entry in the second-level page table.. */
 534static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 535{
 536        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 537}
 538
 539static inline int pud_large(pud_t pud)
 540{
 541        return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 542                (_PAGE_PSE | _PAGE_PRESENT);
 543}
 544
 545static inline int pud_bad(pud_t pud)
 546{
 547        return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 548}
 549#else
 550static inline int pud_large(pud_t pud)
 551{
 552        return 0;
 553}
 554#endif  /* PAGETABLE_LEVELS > 2 */
 555
 556#if PAGETABLE_LEVELS > 3
 557static inline int pgd_present(pgd_t pgd)
 558{
 559        return pgd_flags(pgd) & _PAGE_PRESENT;
 560}
 561
 562static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 563{
 564        return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
 565}
 566
 567/*
 568 * Currently stuck as a macro due to indirect forward reference to
 569 * linux/mmzone.h's __section_mem_map_addr() definition:
 570 */
 571#define pgd_page(pgd)           pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
 572
 573/* to find an entry in a page-table-directory. */
 574static inline unsigned long pud_index(unsigned long address)
 575{
 576        return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
 577}
 578
 579static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
 580{
 581        return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
 582}
 583
 584static inline int pgd_bad(pgd_t pgd)
 585{
 586        return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
 587}
 588
 589static inline int pgd_none(pgd_t pgd)
 590{
 591        return !native_pgd_val(pgd);
 592}
 593#endif  /* PAGETABLE_LEVELS > 3 */
 594
 595#endif  /* __ASSEMBLY__ */
 596
 597/*
 598 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 599 *
 600 * this macro returns the index of the entry in the pgd page which would
 601 * control the given virtual address
 602 */
 603#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 604
 605/*
 606 * pgd_offset() returns a (pgd_t *)
 607 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 608 */
 609#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
 610/*
 611 * a shortcut which implies the use of the kernel's pgd, instead
 612 * of a process's
 613 */
 614#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 615
 616
 617#define KERNEL_PGD_BOUNDARY     pgd_index(PAGE_OFFSET)
 618#define KERNEL_PGD_PTRS         (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 619
 620#ifndef __ASSEMBLY__
 621
 622extern int direct_gbpages;
 623
 624/* local pte updates need not use xchg for locking */
 625static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 626{
 627        pte_t res = *ptep;
 628
 629        /* Pure native function needs no input for mm, addr */
 630        native_pte_clear(NULL, 0, ptep);
 631        return res;
 632}
 633
 634static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
 635{
 636        pmd_t res = *pmdp;
 637
 638        native_pmd_clear(pmdp);
 639        return res;
 640}
 641
 642static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 643                                     pte_t *ptep , pte_t pte)
 644{
 645        native_set_pte(ptep, pte);
 646}
 647
 648static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
 649                                     pmd_t *pmdp , pmd_t pmd)
 650{
 651        native_set_pmd(pmdp, pmd);
 652}
 653
 654#ifndef CONFIG_PARAVIRT
 655/*
 656 * Rules for using pte_update - it must be called after any PTE update which
 657 * has not been done using the set_pte / clear_pte interfaces.  It is used by
 658 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 659 * updates should either be sets, clears, or set_pte_atomic for P->P
 660 * transitions, which means this hook should only be called for user PTEs.
 661 * This hook implies a P->P protection or access change has taken place, which
 662 * requires a subsequent TLB flush.  The notification can optionally be delayed
 663 * until the TLB flush event by using the pte_update_defer form of the
 664 * interface, but care must be taken to assure that the flush happens while
 665 * still holding the same page table lock so that the shadow and primary pages
 666 * do not become out of sync on SMP.
 667 */
 668#define pte_update(mm, addr, ptep)              do { } while (0)
 669#define pte_update_defer(mm, addr, ptep)        do { } while (0)
 670#endif
 671
 672/*
 673 * We only update the dirty/accessed state if we set
 674 * the dirty bit by hand in the kernel, since the hardware
 675 * will do the accessed bit for us, and we don't want to
 676 * race with other CPU's that might be updating the dirty
 677 * bit at the same time.
 678 */
 679struct vm_area_struct;
 680
 681#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 682extern int ptep_set_access_flags(struct vm_area_struct *vma,
 683                                 unsigned long address, pte_t *ptep,
 684                                 pte_t entry, int dirty);
 685
 686#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 687extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
 688                                     unsigned long addr, pte_t *ptep);
 689
 690#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 691extern int ptep_clear_flush_young(struct vm_area_struct *vma,
 692                                  unsigned long address, pte_t *ptep);
 693
 694#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 695static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 696                                       pte_t *ptep)
 697{
 698        pte_t pte = native_ptep_get_and_clear(ptep);
 699        pte_update(mm, addr, ptep);
 700        return pte;
 701}
 702
 703#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 704static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 705                                            unsigned long addr, pte_t *ptep,
 706                                            int full)
 707{
 708        pte_t pte;
 709        if (full) {
 710                /*
 711                 * Full address destruction in progress; paravirt does not
 712                 * care about updates and native needs no locking
 713                 */
 714                pte = native_local_ptep_get_and_clear(ptep);
 715        } else {
 716                pte = ptep_get_and_clear(mm, addr, ptep);
 717        }
 718        return pte;
 719}
 720
 721#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 722static inline void ptep_set_wrprotect(struct mm_struct *mm,
 723                                      unsigned long addr, pte_t *ptep)
 724{
 725        clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 726        pte_update(mm, addr, ptep);
 727}
 728
 729#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
 730
 731#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
 732
 733#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 734extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 735                                 unsigned long address, pmd_t *pmdp,
 736                                 pmd_t entry, int dirty);
 737
 738#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 739extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 740                                     unsigned long addr, pmd_t *pmdp);
 741
 742#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 743extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 744                                  unsigned long address, pmd_t *pmdp);
 745
 746
 747#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 748extern void pmdp_splitting_flush(struct vm_area_struct *vma,
 749                                 unsigned long addr, pmd_t *pmdp);
 750
 751#define __HAVE_ARCH_PMD_WRITE
 752static inline int pmd_write(pmd_t pmd)
 753{
 754        return pmd_flags(pmd) & _PAGE_RW;
 755}
 756
 757#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 758static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
 759                                       pmd_t *pmdp)
 760{
 761        pmd_t pmd = native_pmdp_get_and_clear(pmdp);
 762        pmd_update(mm, addr, pmdp);
 763        return pmd;
 764}
 765
 766#define __HAVE_ARCH_PMDP_SET_WRPROTECT
 767static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 768                                      unsigned long addr, pmd_t *pmdp)
 769{
 770        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 771        pmd_update(mm, addr, pmdp);
 772}
 773
 774/*
 775 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 776 *
 777 *  dst - pointer to pgd range anwhere on a pgd page
 778 *  src - ""
 779 *  count - the number of pgds to copy.
 780 *
 781 * dst and src can be on the same page, but the range must not overlap,
 782 * and must not cross a page boundary.
 783 */
 784static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 785{
 786       memcpy(dst, src, count * sizeof(pgd_t));
 787}
 788
 789
 790#include <asm-generic/pgtable.h>
 791#endif  /* __ASSEMBLY__ */
 792
 793#endif /* _ASM_X86_PGTABLE_H */
 794