linux/arch/powerpc/include/asm/book3s/64/hugetlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
   3#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
   4/*
   5 * For radix we want generic code to handle hugetlb. But then if we want
   6 * both hash and radix to be enabled together we need to workaround the
   7 * limitations.
   8 */
   9void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  10void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  11extern unsigned long
  12radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  13                                unsigned long len, unsigned long pgoff,
  14                                unsigned long flags);
  15
  16extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
  17                                                unsigned long addr, pte_t *ptep,
  18                                                pte_t old_pte, pte_t pte);
  19
  20static inline int hstate_get_psize(struct hstate *hstate)
  21{
  22        unsigned long shift;
  23
  24        shift = huge_page_shift(hstate);
  25        if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
  26                return MMU_PAGE_2M;
  27        else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
  28                return MMU_PAGE_1G;
  29        else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
  30                return MMU_PAGE_16M;
  31        else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
  32                return MMU_PAGE_16G;
  33        else {
  34                WARN(1, "Wrong huge page shift\n");
  35                return mmu_virtual_psize;
  36        }
  37}
  38
  39#define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
  40static inline bool gigantic_page_runtime_supported(void)
  41{
  42        /*
  43         * We used gigantic page reservation with hypervisor assist in some case.
  44         * We cannot use runtime allocation of gigantic pages in those platforms
  45         * This is hash translation mode LPARs.
  46         */
  47        if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
  48                return false;
  49
  50        return true;
  51}
  52
  53/* hugepd entry valid bit */
  54#define HUGEPD_VAL_BITS         (0x8000000000000000UL)
  55
  56#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
  57extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
  58                                         unsigned long addr, pte_t *ptep);
  59
  60#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
  61extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
  62                                         unsigned long addr, pte_t *ptep,
  63                                         pte_t old_pte, pte_t new_pte);
  64/*
  65 * This should work for other subarchs too. But right now we use the
  66 * new format only for 64bit book3s
  67 */
  68static inline pte_t *hugepd_page(hugepd_t hpd)
  69{
  70        BUG_ON(!hugepd_ok(hpd));
  71        /*
  72         * We have only four bits to encode, MMU page size
  73         */
  74        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
  75        return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
  76}
  77
  78static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
  79{
  80        return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
  81}
  82
  83static inline unsigned int hugepd_shift(hugepd_t hpd)
  84{
  85        return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
  86}
  87static inline void flush_hugetlb_page(struct vm_area_struct *vma,
  88                                      unsigned long vmaddr)
  89{
  90        if (radix_enabled())
  91                return radix__flush_hugetlb_page(vma, vmaddr);
  92}
  93
  94static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
  95                                    unsigned int pdshift)
  96{
  97        unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
  98
  99        return hugepd_page(hpd) + idx;
 100}
 101
 102static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
 103{
 104        *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
 105}
 106
 107void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 108
 109static inline int check_and_get_huge_psize(int shift)
 110{
 111        int mmu_psize;
 112
 113        if (shift > SLICE_HIGH_SHIFT)
 114                return -EINVAL;
 115
 116        mmu_psize = shift_to_mmu_psize(shift);
 117
 118        /*
 119         * We need to make sure that for different page sizes reported by
 120         * firmware we only add hugetlb support for page sizes that can be
 121         * supported by linux page table layout.
 122         * For now we have
 123         * Radix: 2M and 1G
 124         * Hash: 16M and 16G
 125         */
 126        if (radix_enabled()) {
 127                if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
 128                        return -EINVAL;
 129        } else {
 130                if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
 131                        return -EINVAL;
 132        }
 133        return mmu_psize;
 134}
 135
 136#endif
 137