linux/include/linux/khugepaged.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_KHUGEPAGED_H
   3#define _LINUX_KHUGEPAGED_H
   4
   5#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
   6
   7
   8#ifdef CONFIG_TRANSPARENT_HUGEPAGE
   9extern struct attribute_group khugepaged_attr_group;
  10
  11extern int khugepaged_init(void);
  12extern void khugepaged_destroy(void);
  13extern int start_stop_khugepaged(void);
  14extern int __khugepaged_enter(struct mm_struct *mm);
  15extern void __khugepaged_exit(struct mm_struct *mm);
  16extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  17                                      unsigned long vm_flags);
  18#ifdef CONFIG_SHMEM
  19extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
  20#else
  21static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
  22                                           unsigned long addr)
  23{
  24}
  25#endif
  26
  27#define khugepaged_enabled()                                           \
  28        (transparent_hugepage_flags &                                  \
  29         ((1<<TRANSPARENT_HUGEPAGE_FLAG) |                     \
  30          (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
  31#define khugepaged_always()                             \
  32        (transparent_hugepage_flags &                   \
  33         (1<<TRANSPARENT_HUGEPAGE_FLAG))
  34#define khugepaged_req_madv()                                   \
  35        (transparent_hugepage_flags &                           \
  36         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
  37#define khugepaged_defrag()                                     \
  38        (transparent_hugepage_flags &                           \
  39         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
  40
  41static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  42{
  43        if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
  44                return __khugepaged_enter(mm);
  45        return 0;
  46}
  47
  48static inline void khugepaged_exit(struct mm_struct *mm)
  49{
  50        if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
  51                __khugepaged_exit(mm);
  52}
  53
  54static inline int khugepaged_enter(struct vm_area_struct *vma,
  55                                   unsigned long vm_flags)
  56{
  57        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
  58                if ((khugepaged_always() ||
  59                     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
  60                    !(vm_flags & VM_NOHUGEPAGE) &&
  61                    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
  62                        if (__khugepaged_enter(vma->vm_mm))
  63                                return -ENOMEM;
  64        return 0;
  65}
  66#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  67static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  68{
  69        return 0;
  70}
  71static inline void khugepaged_exit(struct mm_struct *mm)
  72{
  73}
  74static inline int khugepaged_enter(struct vm_area_struct *vma,
  75                                   unsigned long vm_flags)
  76{
  77        return 0;
  78}
  79static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  80                                             unsigned long vm_flags)
  81{
  82        return 0;
  83}
  84static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
  85                                           unsigned long addr)
  86{
  87}
  88#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  89
  90#endif /* _LINUX_KHUGEPAGED_H */
  91