linux/arch/powerpc/include/asm/book3s/64/pgalloc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   3#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
   4/*
   5 */
   6
   7#include <linux/slab.h>
   8#include <linux/cpumask.h>
   9#include <linux/kmemleak.h>
  10#include <linux/percpu.h>
  11
  12struct vmemmap_backing {
  13        struct vmemmap_backing *list;
  14        unsigned long phys;
  15        unsigned long virt_addr;
  16};
  17extern struct vmemmap_backing *vmemmap_list;
  18
  19extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
  20extern void pmd_fragment_free(unsigned long *);
  21extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
  22extern void __tlb_remove_table(void *_table);
  23void pte_frag_destroy(void *pte_frag);
  24
  25static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
  26{
  27#ifdef CONFIG_PPC_64K_PAGES
  28        return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
  29#else
  30        struct page *page;
  31        page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
  32                                4);
  33        if (!page)
  34                return NULL;
  35        return (pgd_t *) page_address(page);
  36#endif
  37}
  38
  39static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
  40{
  41#ifdef CONFIG_PPC_64K_PAGES
  42        free_page((unsigned long)pgd);
  43#else
  44        free_pages((unsigned long)pgd, 4);
  45#endif
  46}
  47
  48static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  49{
  50        pgd_t *pgd;
  51
  52        if (radix_enabled())
  53                return radix__pgd_alloc(mm);
  54
  55        pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
  56                               pgtable_gfp_flags(mm, GFP_KERNEL));
  57        if (unlikely(!pgd))
  58                return pgd;
  59
  60        /*
  61         * Don't scan the PGD for pointers, it contains references to PUDs but
  62         * those references are not full pointers and so can't be recognised by
  63         * kmemleak.
  64         */
  65        kmemleak_no_scan(pgd);
  66
  67        /*
  68         * With hugetlb, we don't clear the second half of the page table.
  69         * If we share the same slab cache with the pmd or pud level table,
  70         * we need to make sure we zero out the full table on alloc.
  71         * With 4K we don't store slot in the second half. Hence we don't
  72         * need to do this for 4k.
  73         */
  74#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
  75        (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
  76        memset(pgd, 0, PGD_TABLE_SIZE);
  77#endif
  78        return pgd;
  79}
  80
  81static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  82{
  83        if (radix_enabled())
  84                return radix__pgd_free(mm, pgd);
  85        kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
  86}
  87
  88static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
  89{
  90        *pgd =  __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
  91}
  92
  93static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  94{
  95        pud_t *pud;
  96
  97        pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
  98                               pgtable_gfp_flags(mm, GFP_KERNEL));
  99        /*
 100         * Tell kmemleak to ignore the PUD, that means don't scan it for
 101         * pointers and don't consider it a leak. PUDs are typically only
 102         * referred to by their PGD, but kmemleak is not able to recognise those
 103         * as pointers, leading to false leak reports.
 104         */
 105        kmemleak_ignore(pud);
 106
 107        return pud;
 108}
 109
 110static inline void __pud_free(pud_t *pud)
 111{
 112        struct page *page = virt_to_page(pud);
 113
 114        /*
 115         * Early pud pages allocated via memblock allocator
 116         * can't be directly freed to slab
 117         */
 118        if (PageReserved(page))
 119                free_reserved_page(page);
 120        else
 121                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
 122}
 123
 124static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 125{
 126        return __pud_free(pud);
 127}
 128
 129static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 130{
 131        *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
 132}
 133
 134static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 135                                  unsigned long address)
 136{
 137        pgtable_free_tlb(tlb, pud, PUD_INDEX);
 138}
 139
 140static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 141{
 142        return pmd_fragment_alloc(mm, addr);
 143}
 144
 145static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 146{
 147        pmd_fragment_free((unsigned long *)pmd);
 148}
 149
 150static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 151                                  unsigned long address)
 152{
 153        return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
 154}
 155
 156static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 157                                       pte_t *pte)
 158{
 159        *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
 160}
 161
 162static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 163                                pgtable_t pte_page)
 164{
 165        *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
 166}
 167
 168static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 169                                  unsigned long address)
 170{
 171        pgtable_free_tlb(tlb, table, PTE_INDEX);
 172}
 173
 174extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
 175static inline void update_page_count(int psize, long count)
 176{
 177        if (IS_ENABLED(CONFIG_PROC_FS))
 178                atomic_long_add(count, &direct_pages_count[psize]);
 179}
 180
 181#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
 182