linux/arch/powerpc/include/asm/pgalloc-32.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PGALLOC_32_H
   2#define _ASM_POWERPC_PGALLOC_32_H
   3
   4#include <linux/threads.h>
   5
   6/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
   7#define MAX_PGTABLE_INDEX_SIZE  0
   8
   9extern void __bad_pte(pmd_t *pmd);
  10
  11extern pgd_t *pgd_alloc(struct mm_struct *mm);
  12extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  13
  14/*
  15 * We don't have any real pmd's, and this code never triggers because
  16 * the pgd will always be present..
  17 */
  18/* #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); }) */
  19#define pmd_free(mm, x)                 do { } while (0)
  20#define __pmd_free_tlb(tlb,x,a)         do { } while (0)
  21/* #define pgd_populate(mm, pmd, pte)      BUG() */
  22
  23#ifndef CONFIG_BOOKE
  24#define pmd_populate_kernel(mm, pmd, pte)       \
  25                (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
  26#define pmd_populate(mm, pmd, pte)      \
  27                (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
  28#define pmd_pgtable(pmd) pmd_page(pmd)
  29#else
  30#define pmd_populate_kernel(mm, pmd, pte)       \
  31                (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
  32#define pmd_populate(mm, pmd, pte)      \
  33                (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
  34#define pmd_pgtable(pmd) pmd_page(pmd)
  35#endif
  36
  37extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
  38extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
  39
  40static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  41{
  42        free_page((unsigned long)pte);
  43}
  44
  45static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
  46{
  47        pgtable_page_dtor(ptepage);
  48        __free_page(ptepage);
  49}
  50
  51static inline void pgtable_free(void *table, unsigned index_size)
  52{
  53        BUG_ON(index_size); /* 32-bit doesn't use this */
  54        free_page((unsigned long)table);
  55}
  56
  57#define check_pgt_cache()       do { } while (0)
  58
  59#ifdef CONFIG_SMP
  60static inline void pgtable_free_tlb(struct mmu_gather *tlb,
  61                                    void *table, int shift)
  62{
  63        unsigned long pgf = (unsigned long)table;
  64        BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
  65        pgf |= shift;
  66        tlb_remove_table(tlb, (void *)pgf);
  67}
  68
  69static inline void __tlb_remove_table(void *_table)
  70{
  71        void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
  72        unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
  73
  74        pgtable_free(table, shift);
  75}
  76#else
  77static inline void pgtable_free_tlb(struct mmu_gather *tlb,
  78                                    void *table, int shift)
  79{
  80        pgtable_free(table, shift);
  81}
  82#endif
  83
  84static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
  85                                  unsigned long address)
  86{
  87        struct page *page = page_address(table);
  88
  89        tlb_flush_pgtable(tlb, address);
  90        pgtable_page_dtor(page);
  91        pgtable_free_tlb(tlb, page, 0);
  92}
  93#endif /* _ASM_POWERPC_PGALLOC_32_H */
  94