linux/arch/x86/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_H
   2#define _ASM_X86_PGTABLE_H
   3
   4#include <asm/page.h>
   5#include <asm/e820.h>
   6
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)                                  \
  13        ((boot_cpu_data.x86 > 3)                                \
  14         ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))  \
  15         : (prot))
  16
  17#ifndef __ASSEMBLY__
  18
  19#include <asm/x86_init.h>
  20
  21/*
  22 * ZERO_PAGE is a global shared page that is always zero: used
  23 * for zero-mapped memory areas etc..
  24 */
  25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  27
  28extern spinlock_t pgd_lock;
  29extern struct list_head pgd_list;
  30
  31extern struct mm_struct *pgd_page_get_mm(struct page *page);
  32
  33#ifdef CONFIG_PARAVIRT
  34#include <asm/paravirt.h>
  35#else  /* !CONFIG_PARAVIRT */
  36#define set_pte(ptep, pte)              native_set_pte(ptep, pte)
  37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  38#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  39
  40#define set_pte_atomic(ptep, pte)                                       \
  41        native_set_pte_atomic(ptep, pte)
  42
  43#define set_pmd(pmdp, pmd)              native_set_pmd(pmdp, pmd)
  44
  45#ifndef __PAGETABLE_PUD_FOLDED
  46#define set_pgd(pgdp, pgd)              native_set_pgd(pgdp, pgd)
  47#define pgd_clear(pgd)                  native_pgd_clear(pgd)
  48#endif
  49
  50#ifndef set_pud
  51# define set_pud(pudp, pud)             native_set_pud(pudp, pud)
  52#endif
  53
  54#ifndef __PAGETABLE_PMD_FOLDED
  55#define pud_clear(pud)                  native_pud_clear(pud)
  56#endif
  57
  58#define pte_clear(mm, addr, ptep)       native_pte_clear(mm, addr, ptep)
  59#define pmd_clear(pmd)                  native_pmd_clear(pmd)
  60
  61#define pte_update(mm, addr, ptep)              do { } while (0)
  62#define pte_update_defer(mm, addr, ptep)        do { } while (0)
  63#define pmd_update(mm, addr, ptep)              do { } while (0)
  64#define pmd_update_defer(mm, addr, ptep)        do { } while (0)
  65
  66#define pgd_val(x)      native_pgd_val(x)
  67#define __pgd(x)        native_make_pgd(x)
  68
  69#ifndef __PAGETABLE_PUD_FOLDED
  70#define pud_val(x)      native_pud_val(x)
  71#define __pud(x)        native_make_pud(x)
  72#endif
  73
  74#ifndef __PAGETABLE_PMD_FOLDED
  75#define pmd_val(x)      native_pmd_val(x)
  76#define __pmd(x)        native_make_pmd(x)
  77#endif
  78
  79#define pte_val(x)      native_pte_val(x)
  80#define __pte(x)        native_make_pte(x)
  81
  82#define arch_end_context_switch(prev)   do {} while(0)
  83
  84#endif  /* CONFIG_PARAVIRT */
  85
  86/*
  87 * The following only work if pte_present() is true.
  88 * Undefined behaviour if not..
  89 */
  90static inline int pte_dirty(pte_t pte)
  91{
  92        return pte_flags(pte) & _PAGE_DIRTY;
  93}
  94
  95static inline int pte_young(pte_t pte)
  96{
  97        return pte_flags(pte) & _PAGE_ACCESSED;
  98}
  99
 100static inline int pmd_young(pmd_t pmd)
 101{
 102        return pmd_flags(pmd) & _PAGE_ACCESSED;
 103}
 104
 105static inline int pte_write(pte_t pte)
 106{
 107        return pte_flags(pte) & _PAGE_RW;
 108}
 109
 110static inline int pte_file(pte_t pte)
 111{
 112        return pte_flags(pte) & _PAGE_FILE;
 113}
 114
 115static inline int pte_huge(pte_t pte)
 116{
 117        return pte_flags(pte) & _PAGE_PSE;
 118}
 119
 120static inline int pte_global(pte_t pte)
 121{
 122        return pte_flags(pte) & _PAGE_GLOBAL;
 123}
 124
 125static inline int pte_exec(pte_t pte)
 126{
 127        return !(pte_flags(pte) & _PAGE_NX);
 128}
 129
 130static inline int pte_special(pte_t pte)
 131{
 132        return pte_flags(pte) & _PAGE_SPECIAL;
 133}
 134
 135static inline unsigned long pte_pfn(pte_t pte)
 136{
 137        return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 138}
 139
 140static inline unsigned long pmd_pfn(pmd_t pmd)
 141{
 142        return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 143}
 144
 145#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 146
 147static inline int pmd_large(pmd_t pte)
 148{
 149        return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 150                (_PAGE_PSE | _PAGE_PRESENT);
 151}
 152
 153#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 154static inline int pmd_trans_splitting(pmd_t pmd)
 155{
 156        return pmd_val(pmd) & _PAGE_SPLITTING;
 157}
 158
 159static inline int pmd_trans_huge(pmd_t pmd)
 160{
 161        return pmd_val(pmd) & _PAGE_PSE;
 162}
 163
 164static inline int has_transparent_hugepage(void)
 165{
 166        return cpu_has_pse;
 167}
 168#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 169
 170static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 171{
 172        pteval_t v = native_pte_val(pte);
 173
 174        return native_make_pte(v | set);
 175}
 176
 177static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 178{
 179        pteval_t v = native_pte_val(pte);
 180
 181        return native_make_pte(v & ~clear);
 182}
 183
 184static inline pte_t pte_mkclean(pte_t pte)
 185{
 186        return pte_clear_flags(pte, _PAGE_DIRTY);
 187}
 188
 189static inline pte_t pte_mkold(pte_t pte)
 190{
 191        return pte_clear_flags(pte, _PAGE_ACCESSED);
 192}
 193
 194static inline pte_t pte_wrprotect(pte_t pte)
 195{
 196        return pte_clear_flags(pte, _PAGE_RW);
 197}
 198
 199static inline pte_t pte_mkexec(pte_t pte)
 200{
 201        return pte_clear_flags(pte, _PAGE_NX);
 202}
 203
 204static inline pte_t pte_mkdirty(pte_t pte)
 205{
 206        return pte_set_flags(pte, _PAGE_DIRTY);
 207}
 208
 209static inline pte_t pte_mkyoung(pte_t pte)
 210{
 211        return pte_set_flags(pte, _PAGE_ACCESSED);
 212}
 213
 214static inline pte_t pte_mkwrite(pte_t pte)
 215{
 216        return pte_set_flags(pte, _PAGE_RW);
 217}
 218
 219static inline pte_t pte_mkhuge(pte_t pte)
 220{
 221        return pte_set_flags(pte, _PAGE_PSE);
 222}
 223
 224static inline pte_t pte_clrhuge(pte_t pte)
 225{
 226        return pte_clear_flags(pte, _PAGE_PSE);
 227}
 228
 229static inline pte_t pte_mkglobal(pte_t pte)
 230{
 231        return pte_set_flags(pte, _PAGE_GLOBAL);
 232}
 233
 234static inline pte_t pte_clrglobal(pte_t pte)
 235{
 236        return pte_clear_flags(pte, _PAGE_GLOBAL);
 237}
 238
 239static inline pte_t pte_mkspecial(pte_t pte)
 240{
 241        return pte_set_flags(pte, _PAGE_SPECIAL);
 242}
 243
 244static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 245{
 246        pmdval_t v = native_pmd_val(pmd);
 247
 248        return __pmd(v | set);
 249}
 250
 251static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 252{
 253        pmdval_t v = native_pmd_val(pmd);
 254
 255        return __pmd(v & ~clear);
 256}
 257
 258static inline pmd_t pmd_mkold(pmd_t pmd)
 259{
 260        return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 261}
 262
 263static inline pmd_t pmd_wrprotect(pmd_t pmd)
 264{
 265        return pmd_clear_flags(pmd, _PAGE_RW);
 266}
 267
 268static inline pmd_t pmd_mkdirty(pmd_t pmd)
 269{
 270        return pmd_set_flags(pmd, _PAGE_DIRTY);
 271}
 272
 273static inline pmd_t pmd_mkhuge(pmd_t pmd)
 274{
 275        return pmd_set_flags(pmd, _PAGE_PSE);
 276}
 277
 278static inline pmd_t pmd_mkyoung(pmd_t pmd)
 279{
 280        return pmd_set_flags(pmd, _PAGE_ACCESSED);
 281}
 282
 283static inline pmd_t pmd_mkwrite(pmd_t pmd)
 284{
 285        return pmd_set_flags(pmd, _PAGE_RW);
 286}
 287
 288static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 289{
 290        return pmd_clear_flags(pmd, _PAGE_PRESENT);
 291}
 292
 293/*
 294 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 295 * can use those bits for other purposes, so leave them be.
 296 */
 297static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 298{
 299        pgprotval_t protval = pgprot_val(pgprot);
 300
 301        if (protval & _PAGE_PRESENT)
 302                protval &= __supported_pte_mask;
 303
 304        return protval;
 305}
 306
 307static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 308{
 309        return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
 310                     massage_pgprot(pgprot));
 311}
 312
 313static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 314{
 315        return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
 316                     massage_pgprot(pgprot));
 317}
 318
 319static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 320{
 321        pteval_t val = pte_val(pte);
 322
 323        /*
 324         * Chop off the NX bit (if present), and add the NX portion of
 325         * the newprot (if present):
 326         */
 327        val &= _PAGE_CHG_MASK;
 328        val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
 329
 330        return __pte(val);
 331}
 332
 333static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 334{
 335        pmdval_t val = pmd_val(pmd);
 336
 337        val &= _HPAGE_CHG_MASK;
 338        val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 339
 340        return __pmd(val);
 341}
 342
 343/* mprotect needs to preserve PAT bits when updating vm_page_prot */
 344#define pgprot_modify pgprot_modify
 345static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 346{
 347        pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 348        pgprotval_t addbits = pgprot_val(newprot);
 349        return __pgprot(preservebits | addbits);
 350}
 351
 352#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
 353
 354#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 355
 356static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 357                                         unsigned long flags,
 358                                         unsigned long new_flags)
 359{
 360        /*
 361         * PAT type is always WB for untracked ranges, so no need to check.
 362         */
 363        if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 364                return 1;
 365
 366        /*
 367         * Certain new memtypes are not allowed with certain
 368         * requested memtype:
 369         * - request is uncached, return cannot be write-back
 370         * - request is write-combine, return cannot be write-back
 371         */
 372        if ((flags == _PAGE_CACHE_UC_MINUS &&
 373             new_flags == _PAGE_CACHE_WB) ||
 374            (flags == _PAGE_CACHE_WC &&
 375             new_flags == _PAGE_CACHE_WB)) {
 376                return 0;
 377        }
 378
 379        return 1;
 380}
 381
 382pmd_t *populate_extra_pmd(unsigned long vaddr);
 383pte_t *populate_extra_pte(unsigned long vaddr);
 384#endif  /* __ASSEMBLY__ */
 385
 386#ifdef CONFIG_X86_32
 387# include "pgtable_32.h"
 388#else
 389# include "pgtable_64.h"
 390#endif
 391
 392#ifndef __ASSEMBLY__
 393#include <linux/mm_types.h>
 394
 395static inline int pte_none(pte_t pte)
 396{
 397        return !pte.pte;
 398}
 399
 400#define __HAVE_ARCH_PTE_SAME
 401static inline int pte_same(pte_t a, pte_t b)
 402{
 403        return a.pte == b.pte;
 404}
 405
 406static inline int pte_present(pte_t a)
 407{
 408        return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
 409}
 410
 411static inline int pte_hidden(pte_t pte)
 412{
 413        return pte_flags(pte) & _PAGE_HIDDEN;
 414}
 415
 416static inline int pmd_present(pmd_t pmd)
 417{
 418        return pmd_flags(pmd) & _PAGE_PRESENT;
 419}
 420
 421static inline int pmd_none(pmd_t pmd)
 422{
 423        /* Only check low word on 32-bit platforms, since it might be
 424           out of sync with upper half. */
 425        return (unsigned long)native_pmd_val(pmd) == 0;
 426}
 427
 428static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 429{
 430        return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
 431}
 432
 433/*
 434 * Currently stuck as a macro due to indirect forward reference to
 435 * linux/mmzone.h's __section_mem_map_addr() definition:
 436 */
 437#define pmd_page(pmd)   pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 438
 439/*
 440 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 441 *
 442 * this macro returns the index of the entry in the pmd page which would
 443 * control the given virtual address
 444 */
 445static inline unsigned long pmd_index(unsigned long address)
 446{
 447        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 448}
 449
 450/*
 451 * Conversion functions: convert a page and protection to a page entry,
 452 * and a page entry and page directory to the page they refer to.
 453 *
 454 * (Currently stuck as a macro because of indirect forward reference
 455 * to linux/mm.h:page_to_nid())
 456 */
 457#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 458
 459/*
 460 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 461 *
 462 * this function returns the index of the entry in the pte page which would
 463 * control the given virtual address
 464 */
 465static inline unsigned long pte_index(unsigned long address)
 466{
 467        return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 468}
 469
 470static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 471{
 472        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 473}
 474
 475static inline int pmd_bad(pmd_t pmd)
 476{
 477        return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 478}
 479
 480static inline unsigned long pages_to_mb(unsigned long npg)
 481{
 482        return npg >> (20 - PAGE_SHIFT);
 483}
 484
 485#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
 486        remap_pfn_range(vma, vaddr, pfn, size, prot)
 487
 488#if PAGETABLE_LEVELS > 2
 489static inline int pud_none(pud_t pud)
 490{
 491        return native_pud_val(pud) == 0;
 492}
 493
 494static inline int pud_present(pud_t pud)
 495{
 496        return pud_flags(pud) & _PAGE_PRESENT;
 497}
 498
 499static inline unsigned long pud_page_vaddr(pud_t pud)
 500{
 501        return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
 502}
 503
 504/*
 505 * Currently stuck as a macro due to indirect forward reference to
 506 * linux/mmzone.h's __section_mem_map_addr() definition:
 507 */
 508#define pud_page(pud)           pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
 509
 510/* Find an entry in the second-level page table.. */
 511static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 512{
 513        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 514}
 515
 516static inline int pud_large(pud_t pud)
 517{
 518        return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 519                (_PAGE_PSE | _PAGE_PRESENT);
 520}
 521
 522static inline int pud_bad(pud_t pud)
 523{
 524        return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 525}
 526#else
 527static inline int pud_large(pud_t pud)
 528{
 529        return 0;
 530}
 531#endif  /* PAGETABLE_LEVELS > 2 */
 532
 533#if PAGETABLE_LEVELS > 3
 534static inline int pgd_present(pgd_t pgd)
 535{
 536        return pgd_flags(pgd) & _PAGE_PRESENT;
 537}
 538
 539static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 540{
 541        return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
 542}
 543
 544/*
 545 * Currently stuck as a macro due to indirect forward reference to
 546 * linux/mmzone.h's __section_mem_map_addr() definition:
 547 */
 548#define pgd_page(pgd)           pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
 549
 550/* to find an entry in a page-table-directory. */
 551static inline unsigned long pud_index(unsigned long address)
 552{
 553        return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
 554}
 555
 556static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
 557{
 558        return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
 559}
 560
 561static inline int pgd_bad(pgd_t pgd)
 562{
 563        return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
 564}
 565
 566static inline int pgd_none(pgd_t pgd)
 567{
 568        return !native_pgd_val(pgd);
 569}
 570#endif  /* PAGETABLE_LEVELS > 3 */
 571
 572#endif  /* __ASSEMBLY__ */
 573
 574/*
 575 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 576 *
 577 * this macro returns the index of the entry in the pgd page which would
 578 * control the given virtual address
 579 */
 580#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 581
 582/*
 583 * pgd_offset() returns a (pgd_t *)
 584 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 585 */
 586#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
 587/*
 588 * a shortcut which implies the use of the kernel's pgd, instead
 589 * of a process's
 590 */
 591#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 592
 593
 594#define KERNEL_PGD_BOUNDARY     pgd_index(PAGE_OFFSET)
 595#define KERNEL_PGD_PTRS         (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 596
 597#ifndef __ASSEMBLY__
 598
 599extern int direct_gbpages;
 600
 601/* local pte updates need not use xchg for locking */
 602static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 603{
 604        pte_t res = *ptep;
 605
 606        /* Pure native function needs no input for mm, addr */
 607        native_pte_clear(NULL, 0, ptep);
 608        return res;
 609}
 610
 611static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
 612{
 613        pmd_t res = *pmdp;
 614
 615        native_pmd_clear(pmdp);
 616        return res;
 617}
 618
 619static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 620                                     pte_t *ptep , pte_t pte)
 621{
 622        native_set_pte(ptep, pte);
 623}
 624
 625static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
 626                                     pmd_t *pmdp , pmd_t pmd)
 627{
 628        native_set_pmd(pmdp, pmd);
 629}
 630
 631#ifndef CONFIG_PARAVIRT
 632/*
 633 * Rules for using pte_update - it must be called after any PTE update which
 634 * has not been done using the set_pte / clear_pte interfaces.  It is used by
 635 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 636 * updates should either be sets, clears, or set_pte_atomic for P->P
 637 * transitions, which means this hook should only be called for user PTEs.
 638 * This hook implies a P->P protection or access change has taken place, which
 639 * requires a subsequent TLB flush.  The notification can optionally be delayed
 640 * until the TLB flush event by using the pte_update_defer form of the
 641 * interface, but care must be taken to assure that the flush happens while
 642 * still holding the same page table lock so that the shadow and primary pages
 643 * do not become out of sync on SMP.
 644 */
 645#define pte_update(mm, addr, ptep)              do { } while (0)
 646#define pte_update_defer(mm, addr, ptep)        do { } while (0)
 647#endif
 648
 649/*
 650 * We only update the dirty/accessed state if we set
 651 * the dirty bit by hand in the kernel, since the hardware
 652 * will do the accessed bit for us, and we don't want to
 653 * race with other CPU's that might be updating the dirty
 654 * bit at the same time.
 655 */
 656struct vm_area_struct;
 657
 658#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 659extern int ptep_set_access_flags(struct vm_area_struct *vma,
 660                                 unsigned long address, pte_t *ptep,
 661                                 pte_t entry, int dirty);
 662
 663#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 664extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
 665                                     unsigned long addr, pte_t *ptep);
 666
 667#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 668extern int ptep_clear_flush_young(struct vm_area_struct *vma,
 669                                  unsigned long address, pte_t *ptep);
 670
 671#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 672static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 673                                       pte_t *ptep)
 674{
 675        pte_t pte = native_ptep_get_and_clear(ptep);
 676        pte_update(mm, addr, ptep);
 677        return pte;
 678}
 679
 680#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 681static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 682                                            unsigned long addr, pte_t *ptep,
 683                                            int full)
 684{
 685        pte_t pte;
 686        if (full) {
 687                /*
 688                 * Full address destruction in progress; paravirt does not
 689                 * care about updates and native needs no locking
 690                 */
 691                pte = native_local_ptep_get_and_clear(ptep);
 692        } else {
 693                pte = ptep_get_and_clear(mm, addr, ptep);
 694        }
 695        return pte;
 696}
 697
 698#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 699static inline void ptep_set_wrprotect(struct mm_struct *mm,
 700                                      unsigned long addr, pte_t *ptep)
 701{
 702        clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 703        pte_update(mm, addr, ptep);
 704}
 705
 706#define flush_tlb_fix_spurious_fault(vma, address)
 707
 708#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
 709
 710#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 711extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 712                                 unsigned long address, pmd_t *pmdp,
 713                                 pmd_t entry, int dirty);
 714
 715#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 716extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 717                                     unsigned long addr, pmd_t *pmdp);
 718
 719#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 720extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 721                                  unsigned long address, pmd_t *pmdp);
 722
 723
 724#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 725extern void pmdp_splitting_flush(struct vm_area_struct *vma,
 726                                 unsigned long addr, pmd_t *pmdp);
 727
 728#define __HAVE_ARCH_PMD_WRITE
 729static inline int pmd_write(pmd_t pmd)
 730{
 731        return pmd_flags(pmd) & _PAGE_RW;
 732}
 733
 734#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 735static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
 736                                       pmd_t *pmdp)
 737{
 738        pmd_t pmd = native_pmdp_get_and_clear(pmdp);
 739        pmd_update(mm, addr, pmdp);
 740        return pmd;
 741}
 742
 743#define __HAVE_ARCH_PMDP_SET_WRPROTECT
 744static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 745                                      unsigned long addr, pmd_t *pmdp)
 746{
 747        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 748        pmd_update(mm, addr, pmdp);
 749}
 750
 751/*
 752 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 753 *
 754 *  dst - pointer to pgd range anwhere on a pgd page
 755 *  src - ""
 756 *  count - the number of pgds to copy.
 757 *
 758 * dst and src can be on the same page, but the range must not overlap,
 759 * and must not cross a page boundary.
 760 */
 761static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 762{
 763       memcpy(dst, src, count * sizeof(pgd_t));
 764}
 765
 766
 767#include <asm-generic/pgtable.h>
 768#endif  /* __ASSEMBLY__ */
 769
 770#endif /* _ASM_X86_PGTABLE_H */
 771