linux/include/linux/percpu_counter.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PERCPU_COUNTER_H
   3#define _LINUX_PERCPU_COUNTER_H
   4/*
   5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
   6 *
   7 * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/smp.h>
  12#include <linux/list.h>
  13#include <linux/threads.h>
  14#include <linux/percpu.h>
  15#include <linux/types.h>
  16#include <linux/gfp.h>
  17
  18#ifdef CONFIG_SMP
  19
  20struct percpu_counter {
  21        raw_spinlock_t lock;
  22        s64 count;
  23#ifdef CONFIG_HOTPLUG_CPU
  24        struct list_head list;  /* All percpu_counters are on a list */
  25#endif
  26        s32 __percpu *counters;
  27};
  28
  29extern int percpu_counter_batch;
  30
  31int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  32                          struct lock_class_key *key);
  33
  34#define percpu_counter_init(fbc, value, gfp)                            \
  35        ({                                                              \
  36                static struct lock_class_key __key;                     \
  37                                                                        \
  38                __percpu_counter_init(fbc, value, gfp, &__key);         \
  39        })
  40
  41void percpu_counter_destroy(struct percpu_counter *fbc);
  42void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  43void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
  44                              s32 batch);
  45s64 __percpu_counter_sum(struct percpu_counter *fbc);
  46int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  47void percpu_counter_sync(struct percpu_counter *fbc);
  48
  49static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  50{
  51        return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  52}
  53
  54static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  55{
  56        percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
  57}
  58
  59static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  60{
  61        s64 ret = __percpu_counter_sum(fbc);
  62        return ret < 0 ? 0 : ret;
  63}
  64
  65static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  66{
  67        return __percpu_counter_sum(fbc);
  68}
  69
  70static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  71{
  72        return fbc->count;
  73}
  74
  75/*
  76 * It is possible for the percpu_counter_read() to return a small negative
  77 * number for some counter which should never be negative.
  78 *
  79 */
  80static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  81{
  82        /* Prevent reloads of fbc->count */
  83        s64 ret = READ_ONCE(fbc->count);
  84
  85        if (ret >= 0)
  86                return ret;
  87        return 0;
  88}
  89
  90static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
  91{
  92        return (fbc->counters != NULL);
  93}
  94
  95#else /* !CONFIG_SMP */
  96
  97struct percpu_counter {
  98        s64 count;
  99};
 100
 101static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
 102                                      gfp_t gfp)
 103{
 104        fbc->count = amount;
 105        return 0;
 106}
 107
 108static inline void percpu_counter_destroy(struct percpu_counter *fbc)
 109{
 110}
 111
 112static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 113{
 114        fbc->count = amount;
 115}
 116
 117static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 118{
 119        if (fbc->count > rhs)
 120                return 1;
 121        else if (fbc->count < rhs)
 122                return -1;
 123        else
 124                return 0;
 125}
 126
 127static inline int
 128__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 129{
 130        return percpu_counter_compare(fbc, rhs);
 131}
 132
 133static inline void
 134percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 135{
 136        preempt_disable();
 137        fbc->count += amount;
 138        preempt_enable();
 139}
 140
 141static inline void
 142percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 143{
 144        percpu_counter_add(fbc, amount);
 145}
 146
 147static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 148{
 149        return fbc->count;
 150}
 151
 152/*
 153 * percpu_counter is intended to track positive numbers. In the UP case the
 154 * number should never be negative.
 155 */
 156static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
 157{
 158        return fbc->count;
 159}
 160
 161static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 162{
 163        return percpu_counter_read_positive(fbc);
 164}
 165
 166static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 167{
 168        return percpu_counter_read(fbc);
 169}
 170
 171static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
 172{
 173        return true;
 174}
 175
 176static inline void percpu_counter_sync(struct percpu_counter *fbc)
 177{
 178}
 179#endif  /* CONFIG_SMP */
 180
 181static inline void percpu_counter_inc(struct percpu_counter *fbc)
 182{
 183        percpu_counter_add(fbc, 1);
 184}
 185
 186static inline void percpu_counter_dec(struct percpu_counter *fbc)
 187{
 188        percpu_counter_add(fbc, -1);
 189}
 190
 191static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
 192{
 193        percpu_counter_add(fbc, -amount);
 194}
 195
 196#endif /* _LINUX_PERCPU_COUNTER_H */
 197