linux/arch/x86/include/asm/pgalloc.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGALLOC_H
   2#define _ASM_X86_PGALLOC_H
   3
   4#include <linux/threads.h>
   5#include <linux/mm.h>           /* for struct page */
   6#include <linux/pagemap.h>
   7
   8static inline int  __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
   9
  10#ifdef CONFIG_PARAVIRT
  11#include <asm/paravirt.h>
  12#else
  13#define paravirt_pgd_alloc(mm)  __paravirt_pgd_alloc(mm)
  14static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
  15static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)  {}
  16static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)  {}
  17static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  18                                            unsigned long start, unsigned long count) {}
  19static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)  {}
  20static inline void paravirt_release_pte(unsigned long pfn) {}
  21static inline void paravirt_release_pmd(unsigned long pfn) {}
  22static inline void paravirt_release_pud(unsigned long pfn) {}
  23#endif
  24
  25/*
  26 * Allocate and free page tables.
  27 */
  28extern pgd_t *pgd_alloc(struct mm_struct *);
  29extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  30
  31extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  32extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  33
  34/* Should really implement gc for free page table pages. This could be
  35   done with a reference count in struct page. */
  36
  37static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  38{
  39        BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  40        free_page((unsigned long)pte);
  41}
  42
  43static inline void pte_free(struct mm_struct *mm, struct page *pte)
  44{
  45        pgtable_page_dtor(pte);
  46        __free_page(pte);
  47}
  48
  49extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  50
  51static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
  52                                  unsigned long address)
  53{
  54        ___pte_free_tlb(tlb, pte);
  55}
  56
  57static inline void pmd_populate_kernel(struct mm_struct *mm,
  58                                       pmd_t *pmd, pte_t *pte)
  59{
  60        paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
  61        set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  62}
  63
  64static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  65                                struct page *pte)
  66{
  67        unsigned long pfn = page_to_pfn(pte);
  68
  69        paravirt_alloc_pte(mm, pfn);
  70        set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  71}
  72
  73#define pmd_pgtable(pmd) pmd_page(pmd)
  74
  75#if PAGETABLE_LEVELS > 2
  76static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  77{
  78        return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  79}
  80
  81static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  82{
  83        BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  84        free_page((unsigned long)pmd);
  85}
  86
  87extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  88
  89static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
  90                                  unsigned long adddress)
  91{
  92        ___pmd_free_tlb(tlb, pmd);
  93}
  94
  95#ifdef CONFIG_X86_PAE
  96extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  97#else   /* !CONFIG_X86_PAE */
  98static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  99{
 100        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 101        set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
 102}
 103#endif  /* CONFIG_X86_PAE */
 104
 105#if PAGETABLE_LEVELS > 3
 106static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 107{
 108        paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
 109        set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
 110}
 111
 112static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 113{
 114        return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 115}
 116
 117static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 118{
 119        BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
 120        free_page((unsigned long)pud);
 121}
 122
 123extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
 124
 125static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 126                                  unsigned long address)
 127{
 128        ___pud_free_tlb(tlb, pud);
 129}
 130
 131#endif  /* PAGETABLE_LEVELS > 3 */
 132#endif  /* PAGETABLE_LEVELS > 2 */
 133
 134#endif /* _ASM_X86_PGALLOC_H */
 135