linux/include/linux/page_ref.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGE_REF_H
   2#define _LINUX_PAGE_REF_H
   3
   4#include <linux/atomic.h>
   5#include <linux/mm_types.h>
   6#include <linux/page-flags.h>
   7#include <linux/tracepoint-defs.h>
   8
   9extern struct tracepoint __tracepoint_page_ref_set;
  10extern struct tracepoint __tracepoint_page_ref_mod;
  11extern struct tracepoint __tracepoint_page_ref_mod_and_test;
  12extern struct tracepoint __tracepoint_page_ref_mod_and_return;
  13extern struct tracepoint __tracepoint_page_ref_mod_unless;
  14extern struct tracepoint __tracepoint_page_ref_freeze;
  15extern struct tracepoint __tracepoint_page_ref_unfreeze;
  16
  17#ifdef CONFIG_DEBUG_PAGE_REF
  18
  19/*
  20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
  21 * functions. But due to include header file issues, that is not
  22 * feasible. Instead we have to open code the static key functions.
  23 *
  24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  25 */
  26#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
  27
  28extern void __page_ref_set(struct page *page, int v);
  29extern void __page_ref_mod(struct page *page, int v);
  30extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
  31extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
  32extern void __page_ref_mod_unless(struct page *page, int v, int u);
  33extern void __page_ref_freeze(struct page *page, int v, int ret);
  34extern void __page_ref_unfreeze(struct page *page, int v);
  35
  36#else
  37
  38#define page_ref_tracepoint_active(t) false
  39
  40static inline void __page_ref_set(struct page *page, int v)
  41{
  42}
  43static inline void __page_ref_mod(struct page *page, int v)
  44{
  45}
  46static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
  47{
  48}
  49static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
  50{
  51}
  52static inline void __page_ref_mod_unless(struct page *page, int v, int u)
  53{
  54}
  55static inline void __page_ref_freeze(struct page *page, int v, int ret)
  56{
  57}
  58static inline void __page_ref_unfreeze(struct page *page, int v)
  59{
  60}
  61
  62#endif
  63
  64static inline int page_ref_count(struct page *page)
  65{
  66        return atomic_read(&page->_count);
  67}
  68
  69static inline int page_count(struct page *page)
  70{
  71        return atomic_read(&compound_head(page)->_count);
  72}
  73
  74static inline void set_page_count(struct page *page, int v)
  75{
  76        atomic_set(&page->_count, v);
  77        if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
  78                __page_ref_set(page, v);
  79}
  80
  81/*
  82 * Setup the page count before being freed into the page allocator for
  83 * the first time (boot or memory hotplug)
  84 */
  85static inline void init_page_count(struct page *page)
  86{
  87        set_page_count(page, 1);
  88}
  89
  90static inline void page_ref_add(struct page *page, int nr)
  91{
  92        atomic_add(nr, &page->_count);
  93        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
  94                __page_ref_mod(page, nr);
  95}
  96
  97static inline void page_ref_sub(struct page *page, int nr)
  98{
  99        atomic_sub(nr, &page->_count);
 100        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
 101                __page_ref_mod(page, -nr);
 102}
 103
 104static inline void page_ref_inc(struct page *page)
 105{
 106        atomic_inc(&page->_count);
 107        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
 108                __page_ref_mod(page, 1);
 109}
 110
 111static inline void page_ref_dec(struct page *page)
 112{
 113        atomic_dec(&page->_count);
 114        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
 115                __page_ref_mod(page, -1);
 116}
 117
 118static inline int page_ref_sub_and_test(struct page *page, int nr)
 119{
 120        int ret = atomic_sub_and_test(nr, &page->_count);
 121
 122        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
 123                __page_ref_mod_and_test(page, -nr, ret);
 124        return ret;
 125}
 126
 127static inline int page_ref_dec_and_test(struct page *page)
 128{
 129        int ret = atomic_dec_and_test(&page->_count);
 130
 131        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
 132                __page_ref_mod_and_test(page, -1, ret);
 133        return ret;
 134}
 135
 136static inline int page_ref_dec_return(struct page *page)
 137{
 138        int ret = atomic_dec_return(&page->_count);
 139
 140        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
 141                __page_ref_mod_and_return(page, -1, ret);
 142        return ret;
 143}
 144
 145static inline int page_ref_add_unless(struct page *page, int nr, int u)
 146{
 147        int ret = atomic_add_unless(&page->_count, nr, u);
 148
 149        if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
 150                __page_ref_mod_unless(page, nr, ret);
 151        return ret;
 152}
 153
 154static inline int page_ref_freeze(struct page *page, int count)
 155{
 156        int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
 157
 158        if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
 159                __page_ref_freeze(page, count, ret);
 160        return ret;
 161}
 162
 163static inline void page_ref_unfreeze(struct page *page, int count)
 164{
 165        VM_BUG_ON_PAGE(page_count(page) != 0, page);
 166        VM_BUG_ON(count == 0);
 167
 168        atomic_set(&page->_count, count);
 169        if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
 170                __page_ref_unfreeze(page, count);
 171}
 172
 173#endif
 174