linux/include/linux/ksm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_KSM_H
   3#define __LINUX_KSM_H
   4/*
   5 * Memory merging support.
   6 *
   7 * This code enables dynamic sharing of identical pages found in different
   8 * memory areas, even if they are not shared by fork().
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/mm.h>
  13#include <linux/pagemap.h>
  14#include <linux/rmap.h>
  15#include <linux/sched.h>
  16#include <linux/sched/coredump.h>
  17
  18struct stable_node;
  19struct mem_cgroup;
  20
  21#ifdef CONFIG_KSM
  22int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  23                unsigned long end, int advice, unsigned long *vm_flags);
  24int __ksm_enter(struct mm_struct *mm);
  25void __ksm_exit(struct mm_struct *mm);
  26
  27static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  28{
  29        if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
  30                return __ksm_enter(mm);
  31        return 0;
  32}
  33
  34static inline void ksm_exit(struct mm_struct *mm)
  35{
  36        if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
  37                __ksm_exit(mm);
  38}
  39
  40static inline struct stable_node *page_stable_node(struct page *page)
  41{
  42        return PageKsm(page) ? page_rmapping(page) : NULL;
  43}
  44
  45static inline void set_page_stable_node(struct page *page,
  46                                        struct stable_node *stable_node)
  47{
  48        page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
  49}
  50
  51/*
  52 * When do_swap_page() first faults in from swap what used to be a KSM page,
  53 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
  54 * it might be faulted into a different anon_vma (or perhaps to a different
  55 * offset in the same anon_vma).  do_swap_page() cannot do all the locking
  56 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
  57 * a copy, and leave remerging the pages to a later pass of ksmd.
  58 *
  59 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  60 * but what if the vma was unmerged while the page was swapped out?
  61 */
  62struct page *ksm_might_need_to_copy(struct page *page,
  63                        struct vm_area_struct *vma, unsigned long address);
  64
  65void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
  66void ksm_migrate_page(struct page *newpage, struct page *oldpage);
  67
  68#else  /* !CONFIG_KSM */
  69
  70static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  71{
  72        return 0;
  73}
  74
  75static inline void ksm_exit(struct mm_struct *mm)
  76{
  77}
  78
  79#ifdef CONFIG_MMU
  80static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  81                unsigned long end, int advice, unsigned long *vm_flags)
  82{
  83        return 0;
  84}
  85
  86static inline struct page *ksm_might_need_to_copy(struct page *page,
  87                        struct vm_area_struct *vma, unsigned long address)
  88{
  89        return page;
  90}
  91
  92static inline int page_referenced_ksm(struct page *page,
  93                        struct mem_cgroup *memcg, unsigned long *vm_flags)
  94{
  95        return 0;
  96}
  97
  98static inline void rmap_walk_ksm(struct page *page,
  99                        struct rmap_walk_control *rwc)
 100{
 101}
 102
 103static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
 104{
 105}
 106#endif /* CONFIG_MMU */
 107#endif /* !CONFIG_KSM */
 108
 109#endif /* __LINUX_KSM_H */
 110