linux/arch/powerpc/include/asm/hugetlb.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_HUGETLB_H
   2#define _ASM_POWERPC_HUGETLB_H
   3
   4#ifdef CONFIG_HUGETLB_PAGE
   5#include <asm/page.h>
   6#include <asm-generic/hugetlb.h>
   7
   8extern struct kmem_cache *hugepte_cache;
   9
  10#ifdef CONFIG_PPC_BOOK3S_64
  11/*
  12 * This should work for other subarchs too. But right now we use the
  13 * new format only for 64bit book3s
  14 */
  15static inline pte_t *hugepd_page(hugepd_t hpd)
  16{
  17        BUG_ON(!hugepd_ok(hpd));
  18        /*
  19         * We have only four bits to encode, MMU page size
  20         */
  21        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
  22        return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
  23}
  24
  25static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
  26{
  27        return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
  28}
  29
  30static inline unsigned int hugepd_shift(hugepd_t hpd)
  31{
  32        return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
  33}
  34
  35#else
  36
  37static inline pte_t *hugepd_page(hugepd_t hpd)
  38{
  39        BUG_ON(!hugepd_ok(hpd));
  40        return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
  41}
  42
  43static inline unsigned int hugepd_shift(hugepd_t hpd)
  44{
  45        return hpd.pd & HUGEPD_SHIFT_MASK;
  46}
  47
  48#endif /* CONFIG_PPC_BOOK3S_64 */
  49
  50
  51static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
  52                                    unsigned pdshift)
  53{
  54        /*
  55         * On FSL BookE, we have multiple higher-level table entries that
  56         * point to the same hugepte.  Just use the first one since they're all
  57         * identical.  So for that case, idx=0.
  58         */
  59        unsigned long idx = 0;
  60
  61        pte_t *dir = hugepd_page(hpd);
  62#ifndef CONFIG_PPC_FSL_BOOK3E
  63        idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
  64#endif
  65
  66        return dir + idx;
  67}
  68
  69pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
  70                                 unsigned long addr, unsigned *shift);
  71
  72void flush_dcache_icache_hugepage(struct page *page);
  73
  74#if defined(CONFIG_PPC_MM_SLICES)
  75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  76                           unsigned long len);
  77#else
  78static inline int is_hugepage_only_range(struct mm_struct *mm,
  79                                         unsigned long addr,
  80                                         unsigned long len)
  81{
  82        return 0;
  83}
  84#endif
  85
  86void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
  87                            pte_t pte);
  88void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  89
  90void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
  91                            unsigned long end, unsigned long floor,
  92                            unsigned long ceiling);
  93
  94/*
  95 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
  96 * to override the version in mm/hugetlb.c
  97 */
  98#define vma_mmu_pagesize vma_mmu_pagesize
  99
 100/*
 101 * If the arch doesn't supply something else, assume that hugepage
 102 * size aligned regions are ok without further preparation.
 103 */
 104static inline int prepare_hugepage_range(struct file *file,
 105                        unsigned long addr, unsigned long len)
 106{
 107        struct hstate *h = hstate_file(file);
 108        if (len & ~huge_page_mask(h))
 109                return -EINVAL;
 110        if (addr & ~huge_page_mask(h))
 111                return -EINVAL;
 112        return 0;
 113}
 114
 115static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 116                                   pte_t *ptep, pte_t pte)
 117{
 118        set_pte_at(mm, addr, ptep, pte);
 119}
 120
 121static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 122                                            unsigned long addr, pte_t *ptep)
 123{
 124#ifdef CONFIG_PPC64
 125        return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
 126#else
 127        return __pte(pte_update(ptep, ~0UL, 0));
 128#endif
 129}
 130
 131static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 132                                         unsigned long addr, pte_t *ptep)
 133{
 134        pte_t pte;
 135        pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 136        flush_tlb_page(vma, addr);
 137}
 138
 139static inline int huge_pte_none(pte_t pte)
 140{
 141        return pte_none(pte);
 142}
 143
 144static inline pte_t huge_pte_wrprotect(pte_t pte)
 145{
 146        return pte_wrprotect(pte);
 147}
 148
 149static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 150                                             unsigned long addr, pte_t *ptep,
 151                                             pte_t pte, int dirty)
 152{
 153#ifdef HUGETLB_NEED_PRELOAD
 154        /*
 155         * The "return 1" forces a call of update_mmu_cache, which will write a
 156         * TLB entry.  Without this, platforms that don't do a write of the TLB
 157         * entry in the TLB miss handler asm will fault ad infinitum.
 158         */
 159        ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 160        return 1;
 161#else
 162        return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 163#endif
 164}
 165
 166static inline pte_t huge_ptep_get(pte_t *ptep)
 167{
 168        return *ptep;
 169}
 170
 171static inline void arch_clear_hugepage_flags(struct page *page)
 172{
 173}
 174
 175#else /* ! CONFIG_HUGETLB_PAGE */
 176static inline void flush_hugetlb_page(struct vm_area_struct *vma,
 177                                      unsigned long vmaddr)
 178{
 179}
 180
 181#define hugepd_shift(x) 0
 182static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
 183                                    unsigned pdshift)
 184{
 185        return 0;
 186}
 187#endif /* CONFIG_HUGETLB_PAGE */
 188
 189/*
 190 * FSL Book3E platforms require special gpage handling - the gpages
 191 * are reserved early in the boot process by memblock instead of via
 192 * the .dts as on IBM platforms.
 193 */
 194#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
 195extern void __init reserve_hugetlb_gpages(void);
 196#else
 197static inline void reserve_hugetlb_gpages(void)
 198{
 199}
 200#endif
 201
 202#endif /* _ASM_POWERPC_HUGETLB_H */
 203