linux/include/linux/page_ref.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PAGE_REF_H
   3#define _LINUX_PAGE_REF_H
   4
   5#include <linux/atomic.h>
   6#include <linux/mm_types.h>
   7#include <linux/page-flags.h>
   8#include <linux/tracepoint-defs.h>
   9
  10DECLARE_TRACEPOINT(page_ref_set);
  11DECLARE_TRACEPOINT(page_ref_mod);
  12DECLARE_TRACEPOINT(page_ref_mod_and_test);
  13DECLARE_TRACEPOINT(page_ref_mod_and_return);
  14DECLARE_TRACEPOINT(page_ref_mod_unless);
  15DECLARE_TRACEPOINT(page_ref_freeze);
  16DECLARE_TRACEPOINT(page_ref_unfreeze);
  17
  18#ifdef CONFIG_DEBUG_PAGE_REF
  19
  20/*
  21 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
  22 * functions. But due to include header file issues, that is not
  23 * feasible. Instead we have to open code the static key functions.
  24 *
  25 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  26 */
  27#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
  28
  29extern void __page_ref_set(struct page *page, int v);
  30extern void __page_ref_mod(struct page *page, int v);
  31extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
  32extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
  33extern void __page_ref_mod_unless(struct page *page, int v, int u);
  34extern void __page_ref_freeze(struct page *page, int v, int ret);
  35extern void __page_ref_unfreeze(struct page *page, int v);
  36
  37#else
  38
  39#define page_ref_tracepoint_active(t) false
  40
  41static inline void __page_ref_set(struct page *page, int v)
  42{
  43}
  44static inline void __page_ref_mod(struct page *page, int v)
  45{
  46}
  47static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
  48{
  49}
  50static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
  51{
  52}
  53static inline void __page_ref_mod_unless(struct page *page, int v, int u)
  54{
  55}
  56static inline void __page_ref_freeze(struct page *page, int v, int ret)
  57{
  58}
  59static inline void __page_ref_unfreeze(struct page *page, int v)
  60{
  61}
  62
  63#endif
  64
  65static inline int page_ref_count(const struct page *page)
  66{
  67        return atomic_read(&page->_refcount);
  68}
  69
  70static inline int page_count(const struct page *page)
  71{
  72        return atomic_read(&compound_head(page)->_refcount);
  73}
  74
  75static inline void set_page_count(struct page *page, int v)
  76{
  77        atomic_set(&page->_refcount, v);
  78        if (page_ref_tracepoint_active(page_ref_set))
  79                __page_ref_set(page, v);
  80}
  81
  82/*
  83 * Setup the page count before being freed into the page allocator for
  84 * the first time (boot or memory hotplug)
  85 */
  86static inline void init_page_count(struct page *page)
  87{
  88        set_page_count(page, 1);
  89}
  90
  91static inline void page_ref_add(struct page *page, int nr)
  92{
  93        atomic_add(nr, &page->_refcount);
  94        if (page_ref_tracepoint_active(page_ref_mod))
  95                __page_ref_mod(page, nr);
  96}
  97
  98static inline void page_ref_sub(struct page *page, int nr)
  99{
 100        atomic_sub(nr, &page->_refcount);
 101        if (page_ref_tracepoint_active(page_ref_mod))
 102                __page_ref_mod(page, -nr);
 103}
 104
 105static inline int page_ref_sub_return(struct page *page, int nr)
 106{
 107        int ret = atomic_sub_return(nr, &page->_refcount);
 108
 109        if (page_ref_tracepoint_active(page_ref_mod_and_return))
 110                __page_ref_mod_and_return(page, -nr, ret);
 111        return ret;
 112}
 113
 114static inline void page_ref_inc(struct page *page)
 115{
 116        atomic_inc(&page->_refcount);
 117        if (page_ref_tracepoint_active(page_ref_mod))
 118                __page_ref_mod(page, 1);
 119}
 120
 121static inline void page_ref_dec(struct page *page)
 122{
 123        atomic_dec(&page->_refcount);
 124        if (page_ref_tracepoint_active(page_ref_mod))
 125                __page_ref_mod(page, -1);
 126}
 127
 128static inline int page_ref_sub_and_test(struct page *page, int nr)
 129{
 130        int ret = atomic_sub_and_test(nr, &page->_refcount);
 131
 132        if (page_ref_tracepoint_active(page_ref_mod_and_test))
 133                __page_ref_mod_and_test(page, -nr, ret);
 134        return ret;
 135}
 136
 137static inline int page_ref_inc_return(struct page *page)
 138{
 139        int ret = atomic_inc_return(&page->_refcount);
 140
 141        if (page_ref_tracepoint_active(page_ref_mod_and_return))
 142                __page_ref_mod_and_return(page, 1, ret);
 143        return ret;
 144}
 145
 146static inline int page_ref_dec_and_test(struct page *page)
 147{
 148        int ret = atomic_dec_and_test(&page->_refcount);
 149
 150        if (page_ref_tracepoint_active(page_ref_mod_and_test))
 151                __page_ref_mod_and_test(page, -1, ret);
 152        return ret;
 153}
 154
 155static inline int page_ref_dec_return(struct page *page)
 156{
 157        int ret = atomic_dec_return(&page->_refcount);
 158
 159        if (page_ref_tracepoint_active(page_ref_mod_and_return))
 160                __page_ref_mod_and_return(page, -1, ret);
 161        return ret;
 162}
 163
 164static inline int page_ref_add_unless(struct page *page, int nr, int u)
 165{
 166        int ret = atomic_add_unless(&page->_refcount, nr, u);
 167
 168        if (page_ref_tracepoint_active(page_ref_mod_unless))
 169                __page_ref_mod_unless(page, nr, ret);
 170        return ret;
 171}
 172
 173static inline int page_ref_freeze(struct page *page, int count)
 174{
 175        int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
 176
 177        if (page_ref_tracepoint_active(page_ref_freeze))
 178                __page_ref_freeze(page, count, ret);
 179        return ret;
 180}
 181
 182static inline void page_ref_unfreeze(struct page *page, int count)
 183{
 184        VM_BUG_ON_PAGE(page_count(page) != 0, page);
 185        VM_BUG_ON(count == 0);
 186
 187        atomic_set_release(&page->_refcount, count);
 188        if (page_ref_tracepoint_active(page_ref_unfreeze))
 189                __page_ref_unfreeze(page, count);
 190}
 191
 192#endif
 193