linux/arch/powerpc/include/asm/pgalloc-64.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PGALLOC_64_H
   2#define _ASM_POWERPC_PGALLOC_64_H
   3/*
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/cpumask.h>
  12#include <linux/percpu.h>
  13
  14struct vmemmap_backing {
  15        struct vmemmap_backing *list;
  16        unsigned long phys;
  17        unsigned long virt_addr;
  18};
  19extern struct vmemmap_backing *vmemmap_list;
  20
  21/*
  22 * Functions that deal with pagetables that could be at any level of
  23 * the table need to be passed an "index_size" so they know how to
  24 * handle allocation.  For PTE pages (which are linked to a struct
  25 * page for now, and drawn from the main get_free_pages() pool), the
  26 * allocation size will be (2^index_size * sizeof(pointer)) and
  27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
  28 *
  29 * The maximum index size needs to be big enough to allow any
  30 * pagetable sizes we need, but small enough to fit in the low bits of
  31 * any page table pointer.  In other words all pagetables, even tiny
  32 * ones, must be aligned to allow at least enough low 0 bits to
  33 * contain this value.  This value is also used as a mask, so it must
  34 * be one less than a power of two.
  35 */
  36#define MAX_PGTABLE_INDEX_SIZE  0xf
  37
  38extern struct kmem_cache *pgtable_cache[];
  39#define PGT_CACHE(shift) ({                             \
  40                        BUG_ON(!(shift));               \
  41                        pgtable_cache[(shift) - 1];     \
  42                })
  43
  44static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  45{
  46        return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
  47}
  48
  49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  50{
  51        kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
  52}
  53
  54#ifndef CONFIG_PPC_64K_PAGES
  55
  56#define pgd_populate(MM, PGD, PUD)      pgd_set(PGD, __pgtable_ptr_val(PUD))
  57
  58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  59{
  60        return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
  61                                GFP_KERNEL|__GFP_REPEAT);
  62}
  63
  64static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  65{
  66        kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
  67}
  68
  69static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  70{
  71        pud_set(pud, __pgtable_ptr_val(pmd));
  72}
  73
  74static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
  75                                       pte_t *pte)
  76{
  77        pmd_set(pmd, __pgtable_ptr_val(pte));
  78}
  79
  80static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  81                                pgtable_t pte_page)
  82{
  83        pmd_set(pmd, __pgtable_ptr_val(page_address(pte_page)));
  84}
  85
  86#define pmd_pgtable(pmd) pmd_page(pmd)
  87
  88static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  89                                          unsigned long address)
  90{
  91        return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
  92}
  93
  94static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  95                                      unsigned long address)
  96{
  97        struct page *page;
  98        pte_t *pte;
  99
 100        pte = pte_alloc_one_kernel(mm, address);
 101        if (!pte)
 102                return NULL;
 103        page = virt_to_page(pte);
 104        if (!pgtable_page_ctor(page)) {
 105                __free_page(page);
 106                return NULL;
 107        }
 108        return page;
 109}
 110
 111static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 112{
 113        free_page((unsigned long)pte);
 114}
 115
 116static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 117{
 118        pgtable_page_dtor(ptepage);
 119        __free_page(ptepage);
 120}
 121
 122static inline void pgtable_free(void *table, unsigned index_size)
 123{
 124        if (!index_size)
 125                free_page((unsigned long)table);
 126        else {
 127                BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
 128                kmem_cache_free(PGT_CACHE(index_size), table);
 129        }
 130}
 131
 132#ifdef CONFIG_SMP
 133static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 134                                    void *table, int shift)
 135{
 136        unsigned long pgf = (unsigned long)table;
 137        BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
 138        pgf |= shift;
 139        tlb_remove_table(tlb, (void *)pgf);
 140}
 141
 142static inline void __tlb_remove_table(void *_table)
 143{
 144        void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
 145        unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
 146
 147        pgtable_free(table, shift);
 148}
 149#else /* !CONFIG_SMP */
 150static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 151                                    void *table, int shift)
 152{
 153        pgtable_free(table, shift);
 154}
 155#endif /* CONFIG_SMP */
 156
 157static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 158                                  unsigned long address)
 159{
 160        tlb_flush_pgtable(tlb, address);
 161        pgtable_page_dtor(table);
 162        pgtable_free_tlb(tlb, page_address(table), 0);
 163}
 164
 165#else /* if CONFIG_PPC_64K_PAGES */
 166
 167extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
 168extern void page_table_free(struct mm_struct *, unsigned long *, int);
 169extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
 170#ifdef CONFIG_SMP
 171extern void __tlb_remove_table(void *_table);
 172#endif
 173
 174#ifndef __PAGETABLE_PUD_FOLDED
 175/* book3s 64 is 4 level page table */
 176static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 177{
 178        pgd_set(pgd, __pgtable_ptr_val(pud));
 179}
 180
 181static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 182{
 183        return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
 184                                GFP_KERNEL|__GFP_REPEAT);
 185}
 186
 187static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 188{
 189        kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
 190}
 191#endif
 192
 193static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 194{
 195        pud_set(pud, __pgtable_ptr_val(pmd));
 196}
 197
 198static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 199                                       pte_t *pte)
 200{
 201        pmd_set(pmd, __pgtable_ptr_val(pte));
 202}
 203
 204static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 205                                pgtable_t pte_page)
 206{
 207        pmd_set(pmd, __pgtable_ptr_val(pte_page));
 208}
 209
 210static inline pgtable_t pmd_pgtable(pmd_t pmd)
 211{
 212        return (pgtable_t)pmd_page_vaddr(pmd);
 213}
 214
 215static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 216                                          unsigned long address)
 217{
 218        return (pte_t *)page_table_alloc(mm, address, 1);
 219}
 220
 221static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 222                                        unsigned long address)
 223{
 224        return (pgtable_t)page_table_alloc(mm, address, 0);
 225}
 226
 227static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 228{
 229        page_table_free(mm, (unsigned long *)pte, 1);
 230}
 231
 232static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 233{
 234        page_table_free(mm, (unsigned long *)ptepage, 0);
 235}
 236
 237static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 238                                  unsigned long address)
 239{
 240        tlb_flush_pgtable(tlb, address);
 241        pgtable_free_tlb(tlb, table, 0);
 242}
 243#endif /* CONFIG_PPC_64K_PAGES */
 244
 245static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 246{
 247        return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
 248                                GFP_KERNEL|__GFP_REPEAT);
 249}
 250
 251static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 252{
 253        kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
 254}
 255
 256#define __pmd_free_tlb(tlb, pmd, addr)                \
 257        pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
 258#ifndef __PAGETABLE_PUD_FOLDED
 259#define __pud_free_tlb(tlb, pud, addr)                \
 260        pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
 261
 262#endif /* __PAGETABLE_PUD_FOLDED */
 263
 264#define check_pgt_cache()       do { } while (0)
 265
 266#endif /* _ASM_POWERPC_PGALLOC_64_H */
 267