linux/arch/x86/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_H
   2#define _ASM_X86_PGTABLE_H
   3
   4#include <asm/page.h>
   5#include <asm/e820.h>
   6
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)                                  \
  13        ((boot_cpu_data.x86 > 3)                                \
  14         ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))  \
  15         : (prot))
  16
  17#ifndef __ASSEMBLY__
  18
  19#include <asm/x86_init.h>
  20
  21/*
  22 * ZERO_PAGE is a global shared page that is always zero: used
  23 * for zero-mapped memory areas etc..
  24 */
  25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  27
  28extern spinlock_t pgd_lock;
  29extern struct list_head pgd_list;
  30
  31extern struct mm_struct *pgd_page_get_mm(struct page *page);
  32
  33#ifdef CONFIG_PARAVIRT
  34#include <asm/paravirt.h>
  35#else  /* !CONFIG_PARAVIRT */
  36#define set_pte(ptep, pte)              native_set_pte(ptep, pte)
  37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  38#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  39
  40#define set_pte_atomic(ptep, pte)                                       \
  41        native_set_pte_atomic(ptep, pte)
  42
  43#define set_pmd(pmdp, pmd)              native_set_pmd(pmdp, pmd)
  44
  45#ifndef __PAGETABLE_PUD_FOLDED
  46#define set_pgd(pgdp, pgd)              native_set_pgd(pgdp, pgd)
  47#define pgd_clear(pgd)                  native_pgd_clear(pgd)
  48#endif
  49
  50#ifndef set_pud
  51# define set_pud(pudp, pud)             native_set_pud(pudp, pud)
  52#endif
  53
  54#ifndef __PAGETABLE_PMD_FOLDED
  55#define pud_clear(pud)                  native_pud_clear(pud)
  56#endif
  57
  58#define pte_clear(mm, addr, ptep)       native_pte_clear(mm, addr, ptep)
  59#define pmd_clear(pmd)                  native_pmd_clear(pmd)
  60
  61#define pte_update(mm, addr, ptep)              do { } while (0)
  62#define pte_update_defer(mm, addr, ptep)        do { } while (0)
  63#define pmd_update(mm, addr, ptep)              do { } while (0)
  64#define pmd_update_defer(mm, addr, ptep)        do { } while (0)
  65
  66#define pgd_val(x)      native_pgd_val(x)
  67#define __pgd(x)        native_make_pgd(x)
  68
  69#ifndef __PAGETABLE_PUD_FOLDED
  70#define pud_val(x)      native_pud_val(x)
  71#define __pud(x)        native_make_pud(x)
  72#endif
  73
  74#ifndef __PAGETABLE_PMD_FOLDED
  75#define pmd_val(x)      native_pmd_val(x)
  76#define __pmd(x)        native_make_pmd(x)
  77#endif
  78
  79#define pte_val(x)      native_pte_val(x)
  80#define __pte(x)        native_make_pte(x)
  81
  82#define arch_end_context_switch(prev)   do {} while(0)
  83
  84#endif  /* CONFIG_PARAVIRT */
  85
  86/*
  87 * The following only work if pte_present() is true.
  88 * Undefined behaviour if not..
  89 */
  90static inline int pte_dirty(pte_t pte)
  91{
  92        return pte_flags(pte) & _PAGE_DIRTY;
  93}
  94
  95static inline int pte_young(pte_t pte)
  96{
  97        return pte_flags(pte) & _PAGE_ACCESSED;
  98}
  99
 100static inline int pmd_young(pmd_t pmd)
 101{
 102        return pmd_flags(pmd) & _PAGE_ACCESSED;
 103}
 104
 105static inline int pte_write(pte_t pte)
 106{
 107        return pte_flags(pte) & _PAGE_RW;
 108}
 109
 110static inline int pte_file(pte_t pte)
 111{
 112        return pte_flags(pte) & _PAGE_FILE;
 113}
 114
 115static inline int pte_huge(pte_t pte)
 116{
 117        return pte_flags(pte) & _PAGE_PSE;
 118}
 119
 120static inline int pte_global(pte_t pte)
 121{
 122        return pte_flags(pte) & _PAGE_GLOBAL;
 123}
 124
 125static inline int pte_exec(pte_t pte)
 126{
 127        return !(pte_flags(pte) & _PAGE_NX);
 128}
 129
 130static inline int pte_special(pte_t pte)
 131{
 132        return pte_flags(pte) & _PAGE_SPECIAL;
 133}
 134
 135static inline unsigned long pte_pfn(pte_t pte)
 136{
 137        return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 138}
 139
 140static inline unsigned long pmd_pfn(pmd_t pmd)
 141{
 142        return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 143}
 144
 145static inline unsigned long pud_pfn(pud_t pud)
 146{
 147        return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
 148}
 149
 150#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 151
 152static inline int pmd_large(pmd_t pte)
 153{
 154        return pmd_flags(pte) & _PAGE_PSE;
 155}
 156
 157#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 158static inline int pmd_trans_splitting(pmd_t pmd)
 159{
 160        return pmd_val(pmd) & _PAGE_SPLITTING;
 161}
 162
 163static inline int pmd_trans_huge(pmd_t pmd)
 164{
 165        return pmd_val(pmd) & _PAGE_PSE;
 166}
 167
 168static inline int has_transparent_hugepage(void)
 169{
 170        return cpu_has_pse;
 171}
 172#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 173
 174static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 175{
 176        pteval_t v = native_pte_val(pte);
 177
 178        return native_make_pte(v | set);
 179}
 180
 181static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 182{
 183        pteval_t v = native_pte_val(pte);
 184
 185        return native_make_pte(v & ~clear);
 186}
 187
 188static inline pte_t pte_mkclean(pte_t pte)
 189{
 190        return pte_clear_flags(pte, _PAGE_DIRTY);
 191}
 192
 193static inline pte_t pte_mkold(pte_t pte)
 194{
 195        return pte_clear_flags(pte, _PAGE_ACCESSED);
 196}
 197
 198static inline pte_t pte_wrprotect(pte_t pte)
 199{
 200        return pte_clear_flags(pte, _PAGE_RW);
 201}
 202
 203static inline pte_t pte_mkexec(pte_t pte)
 204{
 205        return pte_clear_flags(pte, _PAGE_NX);
 206}
 207
 208static inline pte_t pte_mkdirty(pte_t pte)
 209{
 210        return pte_set_flags(pte, _PAGE_DIRTY);
 211}
 212
 213static inline pte_t pte_mkyoung(pte_t pte)
 214{
 215        return pte_set_flags(pte, _PAGE_ACCESSED);
 216}
 217
 218static inline pte_t pte_mkwrite(pte_t pte)
 219{
 220        return pte_set_flags(pte, _PAGE_RW);
 221}
 222
 223static inline pte_t pte_mkhuge(pte_t pte)
 224{
 225        return pte_set_flags(pte, _PAGE_PSE);
 226}
 227
 228static inline pte_t pte_clrhuge(pte_t pte)
 229{
 230        return pte_clear_flags(pte, _PAGE_PSE);
 231}
 232
 233static inline pte_t pte_mkglobal(pte_t pte)
 234{
 235        return pte_set_flags(pte, _PAGE_GLOBAL);
 236}
 237
 238static inline pte_t pte_clrglobal(pte_t pte)
 239{
 240        return pte_clear_flags(pte, _PAGE_GLOBAL);
 241}
 242
 243static inline pte_t pte_mkspecial(pte_t pte)
 244{
 245        return pte_set_flags(pte, _PAGE_SPECIAL);
 246}
 247
 248static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 249{
 250        pmdval_t v = native_pmd_val(pmd);
 251
 252        return __pmd(v | set);
 253}
 254
 255static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 256{
 257        pmdval_t v = native_pmd_val(pmd);
 258
 259        return __pmd(v & ~clear);
 260}
 261
 262static inline pmd_t pmd_mkold(pmd_t pmd)
 263{
 264        return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 265}
 266
 267static inline pmd_t pmd_wrprotect(pmd_t pmd)
 268{
 269        return pmd_clear_flags(pmd, _PAGE_RW);
 270}
 271
 272static inline pmd_t pmd_mkdirty(pmd_t pmd)
 273{
 274        return pmd_set_flags(pmd, _PAGE_DIRTY);
 275}
 276
 277static inline pmd_t pmd_mkhuge(pmd_t pmd)
 278{
 279        return pmd_set_flags(pmd, _PAGE_PSE);
 280}
 281
 282static inline pmd_t pmd_mkyoung(pmd_t pmd)
 283{
 284        return pmd_set_flags(pmd, _PAGE_ACCESSED);
 285}
 286
 287static inline pmd_t pmd_mkwrite(pmd_t pmd)
 288{
 289        return pmd_set_flags(pmd, _PAGE_RW);
 290}
 291
 292static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 293{
 294        return pmd_clear_flags(pmd, _PAGE_PRESENT);
 295}
 296
 297/*
 298 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 299 * can use those bits for other purposes, so leave them be.
 300 */
 301static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 302{
 303        pgprotval_t protval = pgprot_val(pgprot);
 304
 305        if (protval & _PAGE_PRESENT)
 306                protval &= __supported_pte_mask;
 307
 308        return protval;
 309}
 310
 311static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 312{
 313        return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
 314                     massage_pgprot(pgprot));
 315}
 316
 317static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 318{
 319        return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
 320                     massage_pgprot(pgprot));
 321}
 322
 323static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 324{
 325        pteval_t val = pte_val(pte);
 326
 327        /*
 328         * Chop off the NX bit (if present), and add the NX portion of
 329         * the newprot (if present):
 330         */
 331        val &= _PAGE_CHG_MASK;
 332        val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
 333
 334        return __pte(val);
 335}
 336
 337static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 338{
 339        pmdval_t val = pmd_val(pmd);
 340
 341        val &= _HPAGE_CHG_MASK;
 342        val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 343
 344        return __pmd(val);
 345}
 346
 347/* mprotect needs to preserve PAT bits when updating vm_page_prot */
 348#define pgprot_modify pgprot_modify
 349static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 350{
 351        pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 352        pgprotval_t addbits = pgprot_val(newprot);
 353        return __pgprot(preservebits | addbits);
 354}
 355
 356#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
 357
 358#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 359
 360static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 361                                         unsigned long flags,
 362                                         unsigned long new_flags)
 363{
 364        /*
 365         * PAT type is always WB for untracked ranges, so no need to check.
 366         */
 367        if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 368                return 1;
 369
 370        /*
 371         * Certain new memtypes are not allowed with certain
 372         * requested memtype:
 373         * - request is uncached, return cannot be write-back
 374         * - request is write-combine, return cannot be write-back
 375         */
 376        if ((flags == _PAGE_CACHE_UC_MINUS &&
 377             new_flags == _PAGE_CACHE_WB) ||
 378            (flags == _PAGE_CACHE_WC &&
 379             new_flags == _PAGE_CACHE_WB)) {
 380                return 0;
 381        }
 382
 383        return 1;
 384}
 385
 386pmd_t *populate_extra_pmd(unsigned long vaddr);
 387pte_t *populate_extra_pte(unsigned long vaddr);
 388#endif  /* __ASSEMBLY__ */
 389
 390#ifdef CONFIG_X86_32
 391# include <asm/pgtable_32.h>
 392#else
 393# include <asm/pgtable_64.h>
 394#endif
 395
 396#ifndef __ASSEMBLY__
 397#include <linux/mm_types.h>
 398#include <linux/log2.h>
 399
 400static inline int pte_none(pte_t pte)
 401{
 402        return !pte.pte;
 403}
 404
 405#define __HAVE_ARCH_PTE_SAME
 406static inline int pte_same(pte_t a, pte_t b)
 407{
 408        return a.pte == b.pte;
 409}
 410
 411static inline int pte_present(pte_t a)
 412{
 413        return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
 414                               _PAGE_NUMA);
 415}
 416
 417#define pte_accessible pte_accessible
 418static inline int pte_accessible(pte_t a)
 419{
 420        return pte_flags(a) & _PAGE_PRESENT;
 421}
 422
 423static inline int pte_hidden(pte_t pte)
 424{
 425        return pte_flags(pte) & _PAGE_HIDDEN;
 426}
 427
 428static inline int pmd_present(pmd_t pmd)
 429{
 430        /*
 431         * Checking for _PAGE_PSE is needed too because
 432         * split_huge_page will temporarily clear the present bit (but
 433         * the _PAGE_PSE flag will remain set at all times while the
 434         * _PAGE_PRESENT bit is clear).
 435         */
 436        return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
 437                                 _PAGE_NUMA);
 438}
 439
 440static inline int pmd_none(pmd_t pmd)
 441{
 442        /* Only check low word on 32-bit platforms, since it might be
 443           out of sync with upper half. */
 444        return (unsigned long)native_pmd_val(pmd) == 0;
 445}
 446
 447static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 448{
 449        return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
 450}
 451
 452/*
 453 * Currently stuck as a macro due to indirect forward reference to
 454 * linux/mmzone.h's __section_mem_map_addr() definition:
 455 */
 456#define pmd_page(pmd)   pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 457
 458/*
 459 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 460 *
 461 * this macro returns the index of the entry in the pmd page which would
 462 * control the given virtual address
 463 */
 464static inline unsigned long pmd_index(unsigned long address)
 465{
 466        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 467}
 468
 469/*
 470 * Conversion functions: convert a page and protection to a page entry,
 471 * and a page entry and page directory to the page they refer to.
 472 *
 473 * (Currently stuck as a macro because of indirect forward reference
 474 * to linux/mm.h:page_to_nid())
 475 */
 476#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 477
 478/*
 479 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 480 *
 481 * this function returns the index of the entry in the pte page which would
 482 * control the given virtual address
 483 */
 484static inline unsigned long pte_index(unsigned long address)
 485{
 486        return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 487}
 488
 489static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 490{
 491        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 492}
 493
 494static inline int pmd_bad(pmd_t pmd)
 495{
 496#ifdef CONFIG_NUMA_BALANCING
 497        /* pmd_numa check */
 498        if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
 499                return 0;
 500#endif
 501        return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 502}
 503
 504static inline unsigned long pages_to_mb(unsigned long npg)
 505{
 506        return npg >> (20 - PAGE_SHIFT);
 507}
 508
 509#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
 510        remap_pfn_range(vma, vaddr, pfn, size, prot)
 511
 512#if PAGETABLE_LEVELS > 2
 513static inline int pud_none(pud_t pud)
 514{
 515        return native_pud_val(pud) == 0;
 516}
 517
 518static inline int pud_present(pud_t pud)
 519{
 520        return pud_flags(pud) & _PAGE_PRESENT;
 521}
 522
 523static inline unsigned long pud_page_vaddr(pud_t pud)
 524{
 525        return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
 526}
 527
 528/*
 529 * Currently stuck as a macro due to indirect forward reference to
 530 * linux/mmzone.h's __section_mem_map_addr() definition:
 531 */
 532#define pud_page(pud)           pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
 533
 534/* Find an entry in the second-level page table.. */
 535static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 536{
 537        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 538}
 539
 540static inline int pud_large(pud_t pud)
 541{
 542        return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 543                (_PAGE_PSE | _PAGE_PRESENT);
 544}
 545
 546static inline int pud_bad(pud_t pud)
 547{
 548        return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 549}
 550#else
 551static inline int pud_large(pud_t pud)
 552{
 553        return 0;
 554}
 555#endif  /* PAGETABLE_LEVELS > 2 */
 556
 557#if PAGETABLE_LEVELS > 3
 558static inline int pgd_present(pgd_t pgd)
 559{
 560        return pgd_flags(pgd) & _PAGE_PRESENT;
 561}
 562
 563static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 564{
 565        return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
 566}
 567
 568/*
 569 * Currently stuck as a macro due to indirect forward reference to
 570 * linux/mmzone.h's __section_mem_map_addr() definition:
 571 */
 572#define pgd_page(pgd)           pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
 573
 574/* to find an entry in a page-table-directory. */
 575static inline unsigned long pud_index(unsigned long address)
 576{
 577        return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
 578}
 579
 580static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
 581{
 582        return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
 583}
 584
 585static inline int pgd_bad(pgd_t pgd)
 586{
 587        return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
 588}
 589
 590static inline int pgd_none(pgd_t pgd)
 591{
 592        return !native_pgd_val(pgd);
 593}
 594#endif  /* PAGETABLE_LEVELS > 3 */
 595
 596#endif  /* __ASSEMBLY__ */
 597
 598/*
 599 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 600 *
 601 * this macro returns the index of the entry in the pgd page which would
 602 * control the given virtual address
 603 */
 604#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 605
 606/*
 607 * pgd_offset() returns a (pgd_t *)
 608 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 609 */
 610#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
 611/*
 612 * a shortcut which implies the use of the kernel's pgd, instead
 613 * of a process's
 614 */
 615#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 616
 617
 618#define KERNEL_PGD_BOUNDARY     pgd_index(PAGE_OFFSET)
 619#define KERNEL_PGD_PTRS         (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 620
 621#ifndef __ASSEMBLY__
 622
 623extern int direct_gbpages;
 624void init_mem_mapping(void);
 625void early_alloc_pgt_buf(void);
 626
 627/* local pte updates need not use xchg for locking */
 628static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 629{
 630        pte_t res = *ptep;
 631
 632        /* Pure native function needs no input for mm, addr */
 633        native_pte_clear(NULL, 0, ptep);
 634        return res;
 635}
 636
 637static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
 638{
 639        pmd_t res = *pmdp;
 640
 641        native_pmd_clear(pmdp);
 642        return res;
 643}
 644
 645static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 646                                     pte_t *ptep , pte_t pte)
 647{
 648        native_set_pte(ptep, pte);
 649}
 650
 651static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
 652                                     pmd_t *pmdp , pmd_t pmd)
 653{
 654        native_set_pmd(pmdp, pmd);
 655}
 656
 657#ifndef CONFIG_PARAVIRT
 658/*
 659 * Rules for using pte_update - it must be called after any PTE update which
 660 * has not been done using the set_pte / clear_pte interfaces.  It is used by
 661 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 662 * updates should either be sets, clears, or set_pte_atomic for P->P
 663 * transitions, which means this hook should only be called for user PTEs.
 664 * This hook implies a P->P protection or access change has taken place, which
 665 * requires a subsequent TLB flush.  The notification can optionally be delayed
 666 * until the TLB flush event by using the pte_update_defer form of the
 667 * interface, but care must be taken to assure that the flush happens while
 668 * still holding the same page table lock so that the shadow and primary pages
 669 * do not become out of sync on SMP.
 670 */
 671#define pte_update(mm, addr, ptep)              do { } while (0)
 672#define pte_update_defer(mm, addr, ptep)        do { } while (0)
 673#endif
 674
 675/*
 676 * We only update the dirty/accessed state if we set
 677 * the dirty bit by hand in the kernel, since the hardware
 678 * will do the accessed bit for us, and we don't want to
 679 * race with other CPU's that might be updating the dirty
 680 * bit at the same time.
 681 */
 682struct vm_area_struct;
 683
 684#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 685extern int ptep_set_access_flags(struct vm_area_struct *vma,
 686                                 unsigned long address, pte_t *ptep,
 687                                 pte_t entry, int dirty);
 688
 689#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 690extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
 691                                     unsigned long addr, pte_t *ptep);
 692
 693#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 694extern int ptep_clear_flush_young(struct vm_area_struct *vma,
 695                                  unsigned long address, pte_t *ptep);
 696
 697#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 698static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 699                                       pte_t *ptep)
 700{
 701        pte_t pte = native_ptep_get_and_clear(ptep);
 702        pte_update(mm, addr, ptep);
 703        return pte;
 704}
 705
 706#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 707static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 708                                            unsigned long addr, pte_t *ptep,
 709                                            int full)
 710{
 711        pte_t pte;
 712        if (full) {
 713                /*
 714                 * Full address destruction in progress; paravirt does not
 715                 * care about updates and native needs no locking
 716                 */
 717                pte = native_local_ptep_get_and_clear(ptep);
 718        } else {
 719                pte = ptep_get_and_clear(mm, addr, ptep);
 720        }
 721        return pte;
 722}
 723
 724#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 725static inline void ptep_set_wrprotect(struct mm_struct *mm,
 726                                      unsigned long addr, pte_t *ptep)
 727{
 728        clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 729        pte_update(mm, addr, ptep);
 730}
 731
 732#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
 733
 734#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
 735
 736#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 737extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 738                                 unsigned long address, pmd_t *pmdp,
 739                                 pmd_t entry, int dirty);
 740
 741#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 742extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 743                                     unsigned long addr, pmd_t *pmdp);
 744
 745#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 746extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 747                                  unsigned long address, pmd_t *pmdp);
 748
 749
 750#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 751extern void pmdp_splitting_flush(struct vm_area_struct *vma,
 752                                 unsigned long addr, pmd_t *pmdp);
 753
 754#define __HAVE_ARCH_PMD_WRITE
 755static inline int pmd_write(pmd_t pmd)
 756{
 757        return pmd_flags(pmd) & _PAGE_RW;
 758}
 759
 760#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 761static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
 762                                       pmd_t *pmdp)
 763{
 764        pmd_t pmd = native_pmdp_get_and_clear(pmdp);
 765        pmd_update(mm, addr, pmdp);
 766        return pmd;
 767}
 768
 769#define __HAVE_ARCH_PMDP_SET_WRPROTECT
 770static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 771                                      unsigned long addr, pmd_t *pmdp)
 772{
 773        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 774        pmd_update(mm, addr, pmdp);
 775}
 776
 777/*
 778 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 779 *
 780 *  dst - pointer to pgd range anwhere on a pgd page
 781 *  src - ""
 782 *  count - the number of pgds to copy.
 783 *
 784 * dst and src can be on the same page, but the range must not overlap,
 785 * and must not cross a page boundary.
 786 */
 787static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 788{
 789       memcpy(dst, src, count * sizeof(pgd_t));
 790}
 791
 792#define PTE_SHIFT ilog2(PTRS_PER_PTE)
 793static inline int page_level_shift(enum pg_level level)
 794{
 795        return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
 796}
 797static inline unsigned long page_level_size(enum pg_level level)
 798{
 799        return 1UL << page_level_shift(level);
 800}
 801static inline unsigned long page_level_mask(enum pg_level level)
 802{
 803        return ~(page_level_size(level) - 1);
 804}
 805
 806/*
 807 * The x86 doesn't have any external MMU info: the kernel page
 808 * tables contain all the necessary information.
 809 */
 810static inline void update_mmu_cache(struct vm_area_struct *vma,
 811                unsigned long addr, pte_t *ptep)
 812{
 813}
 814static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
 815                unsigned long addr, pmd_t *pmd)
 816{
 817}
 818
 819#include <asm-generic/pgtable.h>
 820#endif  /* __ASSEMBLY__ */
 821
 822#endif /* _ASM_X86_PGTABLE_H */
 823