linux/arch/powerpc/include/asm/book3s/64/pgalloc.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   2#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   3/*
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/cpumask.h>
  12#include <linux/percpu.h>
  13
  14struct vmemmap_backing {
  15        struct vmemmap_backing *list;
  16        unsigned long phys;
  17        unsigned long virt_addr;
  18};
  19extern struct vmemmap_backing *vmemmap_list;
  20
  21/*
  22 * Functions that deal with pagetables that could be at any level of
  23 * the table need to be passed an "index_size" so they know how to
  24 * handle allocation.  For PTE pages (which are linked to a struct
  25 * page for now, and drawn from the main get_free_pages() pool), the
  26 * allocation size will be (2^index_size * sizeof(pointer)) and
  27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
  28 *
  29 * The maximum index size needs to be big enough to allow any
  30 * pagetable sizes we need, but small enough to fit in the low bits of
  31 * any page table pointer.  In other words all pagetables, even tiny
  32 * ones, must be aligned to allow at least enough low 0 bits to
  33 * contain this value.  This value is also used as a mask, so it must
  34 * be one less than a power of two.
  35 */
  36#define MAX_PGTABLE_INDEX_SIZE  0xf
  37
  38extern struct kmem_cache *pgtable_cache[];
  39#define PGT_CACHE(shift) ({                             \
  40                        BUG_ON(!(shift));               \
  41                        pgtable_cache[(shift) - 1];     \
  42                })
  43
  44extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
  45extern void pte_fragment_free(unsigned long *, int);
  46extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
  47#ifdef CONFIG_SMP
  48extern void __tlb_remove_table(void *_table);
  49#endif
  50
  51static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
  52{
  53#ifdef CONFIG_PPC_64K_PAGES
  54        return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
  55#else
  56        struct page *page;
  57        page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
  58                                4);
  59        if (!page)
  60                return NULL;
  61        return (pgd_t *) page_address(page);
  62#endif
  63}
  64
  65static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
  66{
  67#ifdef CONFIG_PPC_64K_PAGES
  68        free_page((unsigned long)pgd);
  69#else
  70        free_pages((unsigned long)pgd, 4);
  71#endif
  72}
  73
  74static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  75{
  76        if (radix_enabled())
  77                return radix__pgd_alloc(mm);
  78        return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
  79                pgtable_gfp_flags(mm, GFP_KERNEL));
  80}
  81
  82static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  83{
  84        if (radix_enabled())
  85                return radix__pgd_free(mm, pgd);
  86        kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
  87}
  88
  89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  90{
  91        pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
  92}
  93
  94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  95{
  96        return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
  97                pgtable_gfp_flags(mm, GFP_KERNEL));
  98}
  99
 100static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 101{
 102        kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
 103}
 104
 105static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 106{
 107        pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
 108}
 109
 110static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 111                                  unsigned long address)
 112{
 113        /*
 114         * By now all the pud entries should be none entries. So go
 115         * ahead and flush the page walk cache
 116         */
 117        flush_tlb_pgtable(tlb, address);
 118        pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
 119}
 120
 121static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 122{
 123        return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
 124                pgtable_gfp_flags(mm, GFP_KERNEL));
 125}
 126
 127static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 128{
 129        kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
 130}
 131
 132static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 133                                  unsigned long address)
 134{
 135        /*
 136         * By now all the pud entries should be none entries. So go
 137         * ahead and flush the page walk cache
 138         */
 139        flush_tlb_pgtable(tlb, address);
 140        return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
 141}
 142
 143static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 144                                       pte_t *pte)
 145{
 146        pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
 147}
 148
 149static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 150                                pgtable_t pte_page)
 151{
 152        pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
 153}
 154
 155static inline pgtable_t pmd_pgtable(pmd_t pmd)
 156{
 157        return (pgtable_t)pmd_page_vaddr(pmd);
 158}
 159
 160#ifdef CONFIG_PPC_4K_PAGES
 161static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 162                                          unsigned long address)
 163{
 164        return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 165}
 166
 167static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 168                                      unsigned long address)
 169{
 170        struct page *page;
 171        pte_t *pte;
 172
 173        pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
 174        if (!pte)
 175                return NULL;
 176        page = virt_to_page(pte);
 177        if (!pgtable_page_ctor(page)) {
 178                __free_page(page);
 179                return NULL;
 180        }
 181        return pte;
 182}
 183#else /* if CONFIG_PPC_64K_PAGES */
 184
 185static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 186                                          unsigned long address)
 187{
 188        return (pte_t *)pte_fragment_alloc(mm, address, 1);
 189}
 190
 191static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 192                                      unsigned long address)
 193{
 194        return (pgtable_t)pte_fragment_alloc(mm, address, 0);
 195}
 196#endif
 197
 198static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 199{
 200        pte_fragment_free((unsigned long *)pte, 1);
 201}
 202
 203static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 204{
 205        pte_fragment_free((unsigned long *)ptepage, 0);
 206}
 207
 208static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 209                                  unsigned long address)
 210{
 211        /*
 212         * By now all the pud entries should be none entries. So go
 213         * ahead and flush the page walk cache
 214         */
 215        flush_tlb_pgtable(tlb, address);
 216        pgtable_free_tlb(tlb, table, 0);
 217}
 218
 219#define check_pgt_cache()       do { } while (0)
 220
 221#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
 222