linux/arch/powerpc/include/asm/hugetlb.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_HUGETLB_H
   2#define _ASM_POWERPC_HUGETLB_H
   3
   4#ifdef CONFIG_HUGETLB_PAGE
   5#include <asm/page.h>
   6
   7extern struct kmem_cache *hugepte_cache;
   8
   9static inline pte_t *hugepd_page(hugepd_t hpd)
  10{
  11        BUG_ON(!hugepd_ok(hpd));
  12        return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
  13}
  14
  15static inline unsigned int hugepd_shift(hugepd_t hpd)
  16{
  17        return hpd.pd & HUGEPD_SHIFT_MASK;
  18}
  19
  20static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
  21                                    unsigned pdshift)
  22{
  23        /*
  24         * On FSL BookE, we have multiple higher-level table entries that
  25         * point to the same hugepte.  Just use the first one since they're all
  26         * identical.  So for that case, idx=0.
  27         */
  28        unsigned long idx = 0;
  29
  30        pte_t *dir = hugepd_page(*hpdp);
  31#ifndef CONFIG_PPC_FSL_BOOK3E
  32        idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
  33#endif
  34
  35        return dir + idx;
  36}
  37
  38pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
  39                                 unsigned long addr, unsigned *shift);
  40
  41void flush_dcache_icache_hugepage(struct page *page);
  42
  43#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
  44int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  45                           unsigned long len);
  46#else
  47static inline int is_hugepage_only_range(struct mm_struct *mm,
  48                                         unsigned long addr,
  49                                         unsigned long len)
  50{
  51        return 0;
  52}
  53#endif
  54
  55void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
  56                            pte_t pte);
  57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  58
  59void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
  60                            unsigned long end, unsigned long floor,
  61                            unsigned long ceiling);
  62
  63/*
  64 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
  65 * to override the version in mm/hugetlb.c
  66 */
  67#define vma_mmu_pagesize vma_mmu_pagesize
  68
  69/*
  70 * If the arch doesn't supply something else, assume that hugepage
  71 * size aligned regions are ok without further preparation.
  72 */
  73static inline int prepare_hugepage_range(struct file *file,
  74                        unsigned long addr, unsigned long len)
  75{
  76        struct hstate *h = hstate_file(file);
  77        if (len & ~huge_page_mask(h))
  78                return -EINVAL;
  79        if (addr & ~huge_page_mask(h))
  80                return -EINVAL;
  81        return 0;
  82}
  83
  84static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
  85{
  86}
  87
  88
  89static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  90                                   pte_t *ptep, pte_t pte)
  91{
  92        set_pte_at(mm, addr, ptep, pte);
  93}
  94
  95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  96                                            unsigned long addr, pte_t *ptep)
  97{
  98#ifdef CONFIG_PPC64
  99        return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
 100#else
 101        return __pte(pte_update(ptep, ~0UL, 0));
 102#endif
 103}
 104
 105static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 106                                         unsigned long addr, pte_t *ptep)
 107{
 108        pte_t pte;
 109        pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 110        flush_tlb_page(vma, addr);
 111}
 112
 113static inline int huge_pte_none(pte_t pte)
 114{
 115        return pte_none(pte);
 116}
 117
 118static inline pte_t huge_pte_wrprotect(pte_t pte)
 119{
 120        return pte_wrprotect(pte);
 121}
 122
 123static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 124                                             unsigned long addr, pte_t *ptep,
 125                                             pte_t pte, int dirty)
 126{
 127#ifdef HUGETLB_NEED_PRELOAD
 128        /*
 129         * The "return 1" forces a call of update_mmu_cache, which will write a
 130         * TLB entry.  Without this, platforms that don't do a write of the TLB
 131         * entry in the TLB miss handler asm will fault ad infinitum.
 132         */
 133        ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 134        return 1;
 135#else
 136        return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 137#endif
 138}
 139
 140static inline pte_t huge_ptep_get(pte_t *ptep)
 141{
 142        return *ptep;
 143}
 144
 145static inline int arch_prepare_hugepage(struct page *page)
 146{
 147        return 0;
 148}
 149
 150static inline void arch_release_hugepage(struct page *page)
 151{
 152}
 153
 154static inline void arch_clear_hugepage_flags(struct page *page)
 155{
 156}
 157
 158#else /* ! CONFIG_HUGETLB_PAGE */
 159static inline void flush_hugetlb_page(struct vm_area_struct *vma,
 160                                      unsigned long vmaddr)
 161{
 162}
 163#endif /* CONFIG_HUGETLB_PAGE */
 164
 165
 166/*
 167 * FSL Book3E platforms require special gpage handling - the gpages
 168 * are reserved early in the boot process by memblock instead of via
 169 * the .dts as on IBM platforms.
 170 */
 171#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
 172extern void __init reserve_hugetlb_gpages(void);
 173#else
 174static inline void reserve_hugetlb_gpages(void)
 175{
 176}
 177#endif
 178
 179#endif /* _ASM_POWERPC_HUGETLB_H */
 180