linux/include/linux/mm_inline.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef LINUX_MM_INLINE_H
   3#define LINUX_MM_INLINE_H
   4
   5#include <linux/huge_mm.h>
   6#include <linux/swap.h>
   7
   8/**
   9 * page_is_file_lru - should the page be on a file LRU or anon LRU?
  10 * @page: the page to test
  11 *
  12 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
  13 * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
  14 * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
  15 * functions that manipulate the LRU lists, to sort a page onto the right LRU
  16 * list.
  17 *
  18 * We would like to get this info without a page flag, but the state
  19 * needs to survive until the page is last deleted from the LRU, which
  20 * could be as far down as __page_cache_release.
  21 */
  22static inline int page_is_file_lru(struct page *page)
  23{
  24        return !PageSwapBacked(page);
  25}
  26
  27static __always_inline void update_lru_size(struct lruvec *lruvec,
  28                                enum lru_list lru, enum zone_type zid,
  29                                int nr_pages)
  30{
  31        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
  32
  33        __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
  34        __mod_zone_page_state(&pgdat->node_zones[zid],
  35                                NR_ZONE_LRU_BASE + lru, nr_pages);
  36#ifdef CONFIG_MEMCG
  37        mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
  38#endif
  39}
  40
  41/**
  42 * __clear_page_lru_flags - clear page lru flags before releasing a page
  43 * @page: the page that was on lru and now has a zero reference
  44 */
  45static __always_inline void __clear_page_lru_flags(struct page *page)
  46{
  47        VM_BUG_ON_PAGE(!PageLRU(page), page);
  48
  49        __ClearPageLRU(page);
  50
  51        /* this shouldn't happen, so leave the flags to bad_page() */
  52        if (PageActive(page) && PageUnevictable(page))
  53                return;
  54
  55        __ClearPageActive(page);
  56        __ClearPageUnevictable(page);
  57}
  58
  59/**
  60 * page_lru - which LRU list should a page be on?
  61 * @page: the page to test
  62 *
  63 * Returns the LRU list a page should be on, as an index
  64 * into the array of LRU lists.
  65 */
  66static __always_inline enum lru_list page_lru(struct page *page)
  67{
  68        enum lru_list lru;
  69
  70        VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
  71
  72        if (PageUnevictable(page))
  73                return LRU_UNEVICTABLE;
  74
  75        lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
  76        if (PageActive(page))
  77                lru += LRU_ACTIVE;
  78
  79        return lru;
  80}
  81
  82static __always_inline void add_page_to_lru_list(struct page *page,
  83                                struct lruvec *lruvec)
  84{
  85        enum lru_list lru = page_lru(page);
  86
  87        update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
  88        list_add(&page->lru, &lruvec->lists[lru]);
  89}
  90
  91static __always_inline void add_page_to_lru_list_tail(struct page *page,
  92                                struct lruvec *lruvec)
  93{
  94        enum lru_list lru = page_lru(page);
  95
  96        update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
  97        list_add_tail(&page->lru, &lruvec->lists[lru]);
  98}
  99
 100static __always_inline void del_page_from_lru_list(struct page *page,
 101                                struct lruvec *lruvec)
 102{
 103        list_del(&page->lru);
 104        update_lru_size(lruvec, page_lru(page), page_zonenum(page),
 105                        -thp_nr_pages(page));
 106}
 107#endif
 108