linux/arch/powerpc/include/asm/hugetlb.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_HUGETLB_H
   2#define _ASM_POWERPC_HUGETLB_H
   3
   4#ifdef CONFIG_HUGETLB_PAGE
   5#include <asm/page.h>
   6#include <asm-generic/hugetlb.h>
   7
   8extern struct kmem_cache *hugepte_cache;
   9
  10#ifdef CONFIG_PPC_BOOK3S_64
  11/*
  12 * This should work for other subarchs too. But right now we use the
  13 * new format only for 64bit book3s
  14 */
  15static inline pte_t *hugepd_page(hugepd_t hpd)
  16{
  17        BUG_ON(!hugepd_ok(hpd));
  18        /*
  19         * We have only four bits to encode, MMU page size
  20         */
  21        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
  22        return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
  23}
  24
  25static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
  26{
  27        return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
  28}
  29
  30static inline unsigned int hugepd_shift(hugepd_t hpd)
  31{
  32        return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
  33}
  34
  35#else
  36
  37static inline pte_t *hugepd_page(hugepd_t hpd)
  38{
  39        BUG_ON(!hugepd_ok(hpd));
  40        return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
  41}
  42
  43static inline unsigned int hugepd_shift(hugepd_t hpd)
  44{
  45        return hpd.pd & HUGEPD_SHIFT_MASK;
  46}
  47
  48#endif /* CONFIG_PPC_BOOK3S_64 */
  49
  50
  51static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
  52                                    unsigned pdshift)
  53{
  54        /*
  55         * On FSL BookE, we have multiple higher-level table entries that
  56         * point to the same hugepte.  Just use the first one since they're all
  57         * identical.  So for that case, idx=0.
  58         */
  59        unsigned long idx = 0;
  60
  61        pte_t *dir = hugepd_page(*hpdp);
  62#ifndef CONFIG_PPC_FSL_BOOK3E
  63        idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
  64#endif
  65
  66        return dir + idx;
  67}
  68
  69pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
  70                                 unsigned long addr, unsigned *shift);
  71
  72void flush_dcache_icache_hugepage(struct page *page);
  73
  74#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
  75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  76                           unsigned long len);
  77#else
  78static inline int is_hugepage_only_range(struct mm_struct *mm,
  79                                         unsigned long addr,
  80                                         unsigned long len)
  81{
  82        return 0;
  83}
  84#endif
  85
  86void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
  87                            pte_t pte);
  88void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  89
  90void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
  91                            unsigned long end, unsigned long floor,
  92                            unsigned long ceiling);
  93
  94/*
  95 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
  96 * to override the version in mm/hugetlb.c
  97 */
  98#define vma_mmu_pagesize vma_mmu_pagesize
  99
 100/*
 101 * If the arch doesn't supply something else, assume that hugepage
 102 * size aligned regions are ok without further preparation.
 103 */
 104static inline int prepare_hugepage_range(struct file *file,
 105                        unsigned long addr, unsigned long len)
 106{
 107        struct hstate *h = hstate_file(file);
 108        if (len & ~huge_page_mask(h))
 109                return -EINVAL;
 110        if (addr & ~huge_page_mask(h))
 111                return -EINVAL;
 112        return 0;
 113}
 114
 115static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 116{
 117}
 118
 119
 120static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 121                                   pte_t *ptep, pte_t pte)
 122{
 123        set_pte_at(mm, addr, ptep, pte);
 124}
 125
 126static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 127                                            unsigned long addr, pte_t *ptep)
 128{
 129#ifdef CONFIG_PPC64
 130        return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
 131#else
 132        return __pte(pte_update(ptep, ~0UL, 0));
 133#endif
 134}
 135
 136static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 137                                         unsigned long addr, pte_t *ptep)
 138{
 139        pte_t pte;
 140        pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 141        flush_tlb_page(vma, addr);
 142}
 143
 144static inline int huge_pte_none(pte_t pte)
 145{
 146        return pte_none(pte);
 147}
 148
 149static inline pte_t huge_pte_wrprotect(pte_t pte)
 150{
 151        return pte_wrprotect(pte);
 152}
 153
 154static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 155                                             unsigned long addr, pte_t *ptep,
 156                                             pte_t pte, int dirty)
 157{
 158#ifdef HUGETLB_NEED_PRELOAD
 159        /*
 160         * The "return 1" forces a call of update_mmu_cache, which will write a
 161         * TLB entry.  Without this, platforms that don't do a write of the TLB
 162         * entry in the TLB miss handler asm will fault ad infinitum.
 163         */
 164        ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 165        return 1;
 166#else
 167        return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 168#endif
 169}
 170
 171static inline pte_t huge_ptep_get(pte_t *ptep)
 172{
 173        return *ptep;
 174}
 175
 176static inline int arch_prepare_hugepage(struct page *page)
 177{
 178        return 0;
 179}
 180
 181static inline void arch_release_hugepage(struct page *page)
 182{
 183}
 184
 185static inline void arch_clear_hugepage_flags(struct page *page)
 186{
 187}
 188
 189#else /* ! CONFIG_HUGETLB_PAGE */
 190static inline void flush_hugetlb_page(struct vm_area_struct *vma,
 191                                      unsigned long vmaddr)
 192{
 193}
 194
 195#define hugepd_shift(x) 0
 196static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
 197                                    unsigned pdshift)
 198{
 199        return 0;
 200}
 201#endif /* CONFIG_HUGETLB_PAGE */
 202
 203/*
 204 * FSL Book3E platforms require special gpage handling - the gpages
 205 * are reserved early in the boot process by memblock instead of via
 206 * the .dts as on IBM platforms.
 207 */
 208#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
 209extern void __init reserve_hugetlb_gpages(void);
 210#else
 211static inline void reserve_hugetlb_gpages(void)
 212{
 213}
 214#endif
 215
 216#endif /* _ASM_POWERPC_HUGETLB_H */
 217