linux/include/linux/huge_mm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGE_MM_H
   3#define _LINUX_HUGE_MM_H
   4
   5#include <linux/sched/coredump.h>
   6#include <linux/mm_types.h>
   7
   8#include <linux/fs.h> /* only for vma_is_dax() */
   9
  10extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
  11extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  12                         pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  13                         struct vm_area_struct *vma);
  14extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
  15extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  16                         pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
  17                         struct vm_area_struct *vma);
  18
  19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  20extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
  21#else
  22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
  23{
  24}
  25#endif
  26
  27extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
  28extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  29                                          unsigned long addr,
  30                                          pmd_t *pmd,
  31                                          unsigned int flags);
  32extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
  33                        struct vm_area_struct *vma,
  34                        pmd_t *pmd, unsigned long addr, unsigned long next);
  35extern int zap_huge_pmd(struct mmu_gather *tlb,
  36                        struct vm_area_struct *vma,
  37                        pmd_t *pmd, unsigned long addr);
  38extern int zap_huge_pud(struct mmu_gather *tlb,
  39                        struct vm_area_struct *vma,
  40                        pud_t *pud, unsigned long addr);
  41extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  42                        unsigned long addr, unsigned long end,
  43                        unsigned char *vec);
  44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
  45                         unsigned long new_addr, unsigned long old_end,
  46                         pmd_t *old_pmd, pmd_t *new_pmd);
  47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  48                        unsigned long addr, pgprot_t newprot,
  49                        int prot_numa);
  50vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
  51vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
  52enum transparent_hugepage_flag {
  53        TRANSPARENT_HUGEPAGE_FLAG,
  54        TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  55        TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
  56        TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
  57        TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
  58        TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  59        TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  60        TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
  61#ifdef CONFIG_DEBUG_VM
  62        TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  63#endif
  64};
  65
  66struct kobject;
  67struct kobj_attribute;
  68
  69extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
  70                                 struct kobj_attribute *attr,
  71                                 const char *buf, size_t count,
  72                                 enum transparent_hugepage_flag flag);
  73extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
  74                                struct kobj_attribute *attr, char *buf,
  75                                enum transparent_hugepage_flag flag);
  76extern struct kobj_attribute shmem_enabled_attr;
  77
  78#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  79#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  80
  81#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  82#define HPAGE_PMD_SHIFT PMD_SHIFT
  83#define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
  84#define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
  85
  86#define HPAGE_PUD_SHIFT PUD_SHIFT
  87#define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
  88#define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
  89
  90extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
  91
  92extern unsigned long transparent_hugepage_flags;
  93
  94/*
  95 * to be used on vmas which are known to support THP.
  96 * Use transparent_hugepage_enabled otherwise
  97 */
  98static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
  99{
 100        if (vma->vm_flags & VM_NOHUGEPAGE)
 101                return false;
 102
 103        if (is_vma_temporary_stack(vma))
 104                return false;
 105
 106        if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 107                return false;
 108
 109        if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
 110                return true;
 111        /*
 112         * For dax vmas, try to always use hugepage mappings. If the kernel does
 113         * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
 114         * mappings, and device-dax namespaces, that try to guarantee a given
 115         * mapping size, will fail to enable
 116         */
 117        if (vma_is_dax(vma))
 118                return true;
 119
 120        if (transparent_hugepage_flags &
 121                                (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
 122                return !!(vma->vm_flags & VM_HUGEPAGE);
 123
 124        return false;
 125}
 126
 127bool transparent_hugepage_enabled(struct vm_area_struct *vma);
 128
 129#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
 130
 131static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 132                unsigned long haddr)
 133{
 134        /* Don't have to check pgoff for anonymous vma */
 135        if (!vma_is_anonymous(vma)) {
 136                if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
 137                        (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
 138                        return false;
 139        }
 140
 141        if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 142                return false;
 143        return true;
 144}
 145
 146#define transparent_hugepage_use_zero_page()                            \
 147        (transparent_hugepage_flags &                                   \
 148         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
 149#ifdef CONFIG_DEBUG_VM
 150#define transparent_hugepage_debug_cow()                                \
 151        (transparent_hugepage_flags &                                   \
 152         (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
 153#else /* CONFIG_DEBUG_VM */
 154#define transparent_hugepage_debug_cow() 0
 155#endif /* CONFIG_DEBUG_VM */
 156
 157extern unsigned long thp_get_unmapped_area(struct file *filp,
 158                unsigned long addr, unsigned long len, unsigned long pgoff,
 159                unsigned long flags);
 160
 161extern void prep_transhuge_page(struct page *page);
 162extern void free_transhuge_page(struct page *page);
 163
 164bool can_split_huge_page(struct page *page, int *pextra_pins);
 165int split_huge_page_to_list(struct page *page, struct list_head *list);
 166static inline int split_huge_page(struct page *page)
 167{
 168        return split_huge_page_to_list(page, NULL);
 169}
 170void deferred_split_huge_page(struct page *page);
 171
 172void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 173                unsigned long address, bool freeze, struct page *page);
 174
 175#define split_huge_pmd(__vma, __pmd, __address)                         \
 176        do {                                                            \
 177                pmd_t *____pmd = (__pmd);                               \
 178                if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
 179                                        || pmd_devmap(*____pmd))        \
 180                        __split_huge_pmd(__vma, __pmd, __address,       \
 181                                                false, NULL);           \
 182        }  while (0)
 183
 184
 185void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 186                bool freeze, struct page *page);
 187
 188void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 189                unsigned long address);
 190
 191#define split_huge_pud(__vma, __pud, __address)                         \
 192        do {                                                            \
 193                pud_t *____pud = (__pud);                               \
 194                if (pud_trans_huge(*____pud)                            \
 195                                        || pud_devmap(*____pud))        \
 196                        __split_huge_pud(__vma, __pud, __address);      \
 197        }  while (0)
 198
 199extern int hugepage_madvise(struct vm_area_struct *vma,
 200                            unsigned long *vm_flags, int advice);
 201extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
 202                                    unsigned long start,
 203                                    unsigned long end,
 204                                    long adjust_next);
 205extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
 206                struct vm_area_struct *vma);
 207extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
 208                struct vm_area_struct *vma);
 209
 210static inline int is_swap_pmd(pmd_t pmd)
 211{
 212        return !pmd_none(pmd) && !pmd_present(pmd);
 213}
 214
 215/* mmap_sem must be held on entry */
 216static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
 217                struct vm_area_struct *vma)
 218{
 219        VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
 220        if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
 221                return __pmd_trans_huge_lock(pmd, vma);
 222        else
 223                return NULL;
 224}
 225static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
 226                struct vm_area_struct *vma)
 227{
 228        VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
 229        if (pud_trans_huge(*pud) || pud_devmap(*pud))
 230                return __pud_trans_huge_lock(pud, vma);
 231        else
 232                return NULL;
 233}
 234static inline int hpage_nr_pages(struct page *page)
 235{
 236        if (unlikely(PageTransHuge(page)))
 237                return HPAGE_PMD_NR;
 238        return 1;
 239}
 240
 241struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 242                pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
 243struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 244                pud_t *pud, int flags, struct dev_pagemap **pgmap);
 245
 246extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 247
 248extern struct page *huge_zero_page;
 249
 250static inline bool is_huge_zero_page(struct page *page)
 251{
 252        return READ_ONCE(huge_zero_page) == page;
 253}
 254
 255static inline bool is_huge_zero_pmd(pmd_t pmd)
 256{
 257        return is_huge_zero_page(pmd_page(pmd));
 258}
 259
 260static inline bool is_huge_zero_pud(pud_t pud)
 261{
 262        return false;
 263}
 264
 265struct page *mm_get_huge_zero_page(struct mm_struct *mm);
 266void mm_put_huge_zero_page(struct mm_struct *mm);
 267
 268#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
 269
 270static inline bool thp_migration_supported(void)
 271{
 272        return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
 273}
 274
 275static inline struct list_head *page_deferred_list(struct page *page)
 276{
 277        /*
 278         * Global or memcg deferred list in the second tail pages is
 279         * occupied by compound_head.
 280         */
 281        return &page[2].deferred_list;
 282}
 283
 284#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 285#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 286#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
 287#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
 288
 289#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
 290#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
 291#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
 292
 293#define hpage_nr_pages(x) 1
 294
 295static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
 296{
 297        return false;
 298}
 299
 300static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 301{
 302        return false;
 303}
 304
 305static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 306                unsigned long haddr)
 307{
 308        return false;
 309}
 310
 311static inline void prep_transhuge_page(struct page *page) {}
 312
 313#define transparent_hugepage_flags 0UL
 314
 315#define thp_get_unmapped_area   NULL
 316
 317static inline bool
 318can_split_huge_page(struct page *page, int *pextra_pins)
 319{
 320        BUILD_BUG();
 321        return false;
 322}
 323static inline int
 324split_huge_page_to_list(struct page *page, struct list_head *list)
 325{
 326        return 0;
 327}
 328static inline int split_huge_page(struct page *page)
 329{
 330        return 0;
 331}
 332static inline void deferred_split_huge_page(struct page *page) {}
 333#define split_huge_pmd(__vma, __pmd, __address) \
 334        do { } while (0)
 335
 336static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 337                unsigned long address, bool freeze, struct page *page) {}
 338static inline void split_huge_pmd_address(struct vm_area_struct *vma,
 339                unsigned long address, bool freeze, struct page *page) {}
 340
 341#define split_huge_pud(__vma, __pmd, __address) \
 342        do { } while (0)
 343
 344static inline int hugepage_madvise(struct vm_area_struct *vma,
 345                                   unsigned long *vm_flags, int advice)
 346{
 347        BUG();
 348        return 0;
 349}
 350static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
 351                                         unsigned long start,
 352                                         unsigned long end,
 353                                         long adjust_next)
 354{
 355}
 356static inline int is_swap_pmd(pmd_t pmd)
 357{
 358        return 0;
 359}
 360static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
 361                struct vm_area_struct *vma)
 362{
 363        return NULL;
 364}
 365static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
 366                struct vm_area_struct *vma)
 367{
 368        return NULL;
 369}
 370
 371static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
 372                pmd_t orig_pmd)
 373{
 374        return 0;
 375}
 376
 377static inline bool is_huge_zero_page(struct page *page)
 378{
 379        return false;
 380}
 381
 382static inline bool is_huge_zero_pud(pud_t pud)
 383{
 384        return false;
 385}
 386
 387static inline void mm_put_huge_zero_page(struct mm_struct *mm)
 388{
 389        return;
 390}
 391
 392static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
 393        unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
 394{
 395        return NULL;
 396}
 397
 398static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
 399        unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
 400{
 401        return NULL;
 402}
 403
 404static inline bool thp_migration_supported(void)
 405{
 406        return false;
 407}
 408#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 409
 410#endif /* _LINUX_HUGE_MM_H */
 411