linux/include/linux/percpu_counter.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PERCPU_COUNTER_H
   3#define _LINUX_PERCPU_COUNTER_H
   4/*
   5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
   6 *
   7 * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/smp.h>
  12#include <linux/list.h>
  13#include <linux/threads.h>
  14#include <linux/percpu.h>
  15#include <linux/types.h>
  16#include <linux/gfp.h>
  17
  18#ifdef CONFIG_SMP
  19
  20struct percpu_counter {
  21        raw_spinlock_t lock;
  22        s64 count;
  23#ifdef CONFIG_HOTPLUG_CPU
  24        struct list_head list;  /* All percpu_counters are on a list */
  25#endif
  26        s32 __percpu *counters;
  27};
  28
  29extern int percpu_counter_batch;
  30
  31int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  32                          struct lock_class_key *key);
  33
  34#define percpu_counter_init(fbc, value, gfp)                            \
  35        ({                                                              \
  36                static struct lock_class_key __key;                     \
  37                                                                        \
  38                __percpu_counter_init(fbc, value, gfp, &__key);         \
  39        })
  40
  41void percpu_counter_destroy(struct percpu_counter *fbc);
  42void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  43void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
  44                              s32 batch);
  45s64 __percpu_counter_sum(struct percpu_counter *fbc);
  46int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  47
  48static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  49{
  50        return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  51}
  52
  53static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  54{
  55        percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
  56}
  57
  58static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  59{
  60        s64 ret = __percpu_counter_sum(fbc);
  61        return ret < 0 ? 0 : ret;
  62}
  63
  64static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  65{
  66        return __percpu_counter_sum(fbc);
  67}
  68
  69static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  70{
  71        return fbc->count;
  72}
  73
  74/*
  75 * It is possible for the percpu_counter_read() to return a small negative
  76 * number for some counter which should never be negative.
  77 *
  78 */
  79static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  80{
  81        s64 ret = fbc->count;
  82
  83        barrier();              /* Prevent reloads of fbc->count */
  84        if (ret >= 0)
  85                return ret;
  86        return 0;
  87}
  88
  89static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
  90{
  91        return (fbc->counters != NULL);
  92}
  93
  94#else /* !CONFIG_SMP */
  95
  96struct percpu_counter {
  97        s64 count;
  98};
  99
 100static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
 101                                      gfp_t gfp)
 102{
 103        fbc->count = amount;
 104        return 0;
 105}
 106
 107static inline void percpu_counter_destroy(struct percpu_counter *fbc)
 108{
 109}
 110
 111static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 112{
 113        fbc->count = amount;
 114}
 115
 116static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 117{
 118        if (fbc->count > rhs)
 119                return 1;
 120        else if (fbc->count < rhs)
 121                return -1;
 122        else
 123                return 0;
 124}
 125
 126static inline int
 127__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 128{
 129        return percpu_counter_compare(fbc, rhs);
 130}
 131
 132static inline void
 133percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 134{
 135        preempt_disable();
 136        fbc->count += amount;
 137        preempt_enable();
 138}
 139
 140static inline void
 141percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 142{
 143        percpu_counter_add(fbc, amount);
 144}
 145
 146static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 147{
 148        return fbc->count;
 149}
 150
 151/*
 152 * percpu_counter is intended to track positive numbers. In the UP case the
 153 * number should never be negative.
 154 */
 155static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
 156{
 157        return fbc->count;
 158}
 159
 160static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 161{
 162        return percpu_counter_read_positive(fbc);
 163}
 164
 165static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 166{
 167        return percpu_counter_read(fbc);
 168}
 169
 170static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
 171{
 172        return true;
 173}
 174
 175#endif  /* CONFIG_SMP */
 176
 177static inline void percpu_counter_inc(struct percpu_counter *fbc)
 178{
 179        percpu_counter_add(fbc, 1);
 180}
 181
 182static inline void percpu_counter_dec(struct percpu_counter *fbc)
 183{
 184        percpu_counter_add(fbc, -1);
 185}
 186
 187static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
 188{
 189        percpu_counter_add(fbc, -amount);
 190}
 191
 192#endif /* _LINUX_PERCPU_COUNTER_H */
 193