linux/arch/powerpc/include/asm/book3s/64/pgalloc.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   2#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   3/*
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/cpumask.h>
  12#include <linux/kmemleak.h>
  13#include <linux/percpu.h>
  14
  15struct vmemmap_backing {
  16        struct vmemmap_backing *list;
  17        unsigned long phys;
  18        unsigned long virt_addr;
  19};
  20extern struct vmemmap_backing *vmemmap_list;
  21
  22/*
  23 * Functions that deal with pagetables that could be at any level of
  24 * the table need to be passed an "index_size" so they know how to
  25 * handle allocation.  For PTE pages (which are linked to a struct
  26 * page for now, and drawn from the main get_free_pages() pool), the
  27 * allocation size will be (2^index_size * sizeof(pointer)) and
  28 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
  29 *
  30 * The maximum index size needs to be big enough to allow any
  31 * pagetable sizes we need, but small enough to fit in the low bits of
  32 * any page table pointer.  In other words all pagetables, even tiny
  33 * ones, must be aligned to allow at least enough low 0 bits to
  34 * contain this value.  This value is also used as a mask, so it must
  35 * be one less than a power of two.
  36 */
  37#define MAX_PGTABLE_INDEX_SIZE  0xf
  38
  39extern struct kmem_cache *pgtable_cache[];
  40#define PGT_CACHE(shift) ({                             \
  41                        BUG_ON(!(shift));               \
  42                        pgtable_cache[(shift) - 1];     \
  43                })
  44
  45extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
  46extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
  47extern void pte_fragment_free(unsigned long *, int);
  48extern void pmd_fragment_free(unsigned long *);
  49extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
  50extern void __tlb_remove_table(void *_table);
  51
  52static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
  53{
  54#ifdef CONFIG_PPC_64K_PAGES
  55        return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
  56#else
  57        struct page *page;
  58        page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
  59                                4);
  60        if (!page)
  61                return NULL;
  62        return (pgd_t *) page_address(page);
  63#endif
  64}
  65
  66static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
  67{
  68#ifdef CONFIG_PPC_64K_PAGES
  69        free_page((unsigned long)pgd);
  70#else
  71        free_pages((unsigned long)pgd, 4);
  72#endif
  73}
  74
  75static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  76{
  77        pgd_t *pgd;
  78
  79        if (radix_enabled())
  80                return radix__pgd_alloc(mm);
  81
  82        pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
  83                               pgtable_gfp_flags(mm, GFP_KERNEL));
  84        if (unlikely(!pgd))
  85                return pgd;
  86
  87        /*
  88         * Don't scan the PGD for pointers, it contains references to PUDs but
  89         * those references are not full pointers and so can't be recognised by
  90         * kmemleak.
  91         */
  92        kmemleak_no_scan(pgd);
  93
  94        /*
  95         * With hugetlb, we don't clear the second half of the page table.
  96         * If we share the same slab cache with the pmd or pud level table,
  97         * we need to make sure we zero out the full table on alloc.
  98         * With 4K we don't store slot in the second half. Hence we don't
  99         * need to do this for 4k.
 100         */
 101#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
 102        (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
 103        memset(pgd, 0, PGD_TABLE_SIZE);
 104#endif
 105        return pgd;
 106}
 107
 108static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 109{
 110        if (radix_enabled())
 111                return radix__pgd_free(mm, pgd);
 112        kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
 113}
 114
 115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 116{
 117        pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
 118}
 119
 120static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 121{
 122        pud_t *pud;
 123
 124        pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
 125                               pgtable_gfp_flags(mm, GFP_KERNEL));
 126        /*
 127         * Tell kmemleak to ignore the PUD, that means don't scan it for
 128         * pointers and don't consider it a leak. PUDs are typically only
 129         * referred to by their PGD, but kmemleak is not able to recognise those
 130         * as pointers, leading to false leak reports.
 131         */
 132        kmemleak_ignore(pud);
 133
 134        return pud;
 135}
 136
 137static inline void __pud_free(pud_t *pud)
 138{
 139        struct page *page = virt_to_page(pud);
 140
 141        /*
 142         * Early pud pages allocated via memblock allocator
 143         * can't be directly freed to slab
 144         */
 145        if (PageReserved(page))
 146                free_reserved_page(page);
 147        else
 148                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
 149}
 150
 151static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 152{
 153        return __pud_free(pud);
 154}
 155
 156static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 157{
 158        pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
 159}
 160
 161static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 162                                  unsigned long address)
 163{
 164        /*
 165         * By now all the pud entries should be none entries. So go
 166         * ahead and flush the page walk cache
 167         */
 168        flush_tlb_pgtable(tlb, address);
 169        pgtable_free_tlb(tlb, pud, PUD_INDEX);
 170}
 171
 172static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 173{
 174        return pmd_fragment_alloc(mm, addr);
 175}
 176
 177static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 178{
 179        pmd_fragment_free((unsigned long *)pmd);
 180}
 181
 182static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 183                                  unsigned long address)
 184{
 185        /*
 186         * By now all the pud entries should be none entries. So go
 187         * ahead and flush the page walk cache
 188         */
 189        flush_tlb_pgtable(tlb, address);
 190        return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
 191}
 192
 193static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 194                                       pte_t *pte)
 195{
 196        pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
 197}
 198
 199static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 200                                pgtable_t pte_page)
 201{
 202        pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
 203}
 204
 205static inline pgtable_t pmd_pgtable(pmd_t pmd)
 206{
 207        return (pgtable_t)pmd_page_vaddr(pmd);
 208}
 209
 210static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 211{
 212        return (pte_t *)pte_fragment_alloc(mm, 1);
 213}
 214
 215static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 216{
 217        return (pgtable_t)pte_fragment_alloc(mm, 0);
 218}
 219
 220static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 221{
 222        pte_fragment_free((unsigned long *)pte, 1);
 223}
 224
 225static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 226{
 227        pte_fragment_free((unsigned long *)ptepage, 0);
 228}
 229
 230static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 231                                  unsigned long address)
 232{
 233        /*
 234         * By now all the pud entries should be none entries. So go
 235         * ahead and flush the page walk cache
 236         */
 237        flush_tlb_pgtable(tlb, address);
 238        pgtable_free_tlb(tlb, table, PTE_INDEX);
 239}
 240
 241#define check_pgt_cache()       do { } while (0)
 242
 243#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
 244