linux/include/linux/percpu_counter.h
<<
>>
Prefs
   1#ifndef _LINUX_PERCPU_COUNTER_H
   2#define _LINUX_PERCPU_COUNTER_H
   3/*
   4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
   5 *
   6 * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
   7 */
   8
   9#include <linux/spinlock.h>
  10#include <linux/smp.h>
  11#include <linux/list.h>
  12#include <linux/threads.h>
  13#include <linux/percpu.h>
  14#include <linux/types.h>
  15#include <linux/gfp.h>
  16
  17#ifdef CONFIG_SMP
  18
  19struct percpu_counter {
  20        raw_spinlock_t lock;
  21        s64 count;
  22#ifdef CONFIG_HOTPLUG_CPU
  23        struct list_head list;  /* All percpu_counters are on a list */
  24#endif
  25        s32 __percpu *counters;
  26};
  27
  28extern int percpu_counter_batch;
  29
  30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  31                          struct lock_class_key *key);
  32
  33#define percpu_counter_init(fbc, value, gfp)                            \
  34        ({                                                              \
  35                static struct lock_class_key __key;                     \
  36                                                                        \
  37                __percpu_counter_init(fbc, value, gfp, &__key);         \
  38        })
  39
  40void percpu_counter_destroy(struct percpu_counter *fbc);
  41void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  42void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  43s64 __percpu_counter_sum(struct percpu_counter *fbc);
  44int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  45
  46static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  47{
  48        return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  49}
  50
  51static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  52{
  53        __percpu_counter_add(fbc, amount, percpu_counter_batch);
  54}
  55
  56static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  57{
  58        s64 ret = __percpu_counter_sum(fbc);
  59        return ret < 0 ? 0 : ret;
  60}
  61
  62static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  63{
  64        return __percpu_counter_sum(fbc);
  65}
  66
  67static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  68{
  69        return fbc->count;
  70}
  71
  72/*
  73 * It is possible for the percpu_counter_read() to return a small negative
  74 * number for some counter which should never be negative.
  75 *
  76 */
  77static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  78{
  79        s64 ret = fbc->count;
  80
  81        barrier();              /* Prevent reloads of fbc->count */
  82        if (ret >= 0)
  83                return ret;
  84        return 0;
  85}
  86
  87static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  88{
  89        return (fbc->counters != NULL);
  90}
  91
  92#else /* !CONFIG_SMP */
  93
  94struct percpu_counter {
  95        s64 count;
  96};
  97
  98static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  99                                      gfp_t gfp)
 100{
 101        fbc->count = amount;
 102        return 0;
 103}
 104
 105static inline void percpu_counter_destroy(struct percpu_counter *fbc)
 106{
 107}
 108
 109static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 110{
 111        fbc->count = amount;
 112}
 113
 114static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 115{
 116        if (fbc->count > rhs)
 117                return 1;
 118        else if (fbc->count < rhs)
 119                return -1;
 120        else
 121                return 0;
 122}
 123
 124static inline int
 125__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 126{
 127        return percpu_counter_compare(fbc, rhs);
 128}
 129
 130static inline void
 131percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 132{
 133        preempt_disable();
 134        fbc->count += amount;
 135        preempt_enable();
 136}
 137
 138static inline void
 139__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 140{
 141        percpu_counter_add(fbc, amount);
 142}
 143
 144static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 145{
 146        return fbc->count;
 147}
 148
 149/*
 150 * percpu_counter is intended to track positive numbers. In the UP case the
 151 * number should never be negative.
 152 */
 153static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
 154{
 155        return fbc->count;
 156}
 157
 158static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 159{
 160        return percpu_counter_read_positive(fbc);
 161}
 162
 163static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 164{
 165        return percpu_counter_read(fbc);
 166}
 167
 168static inline int percpu_counter_initialized(struct percpu_counter *fbc)
 169{
 170        return 1;
 171}
 172
 173#endif  /* CONFIG_SMP */
 174
 175static inline void percpu_counter_inc(struct percpu_counter *fbc)
 176{
 177        percpu_counter_add(fbc, 1);
 178}
 179
 180static inline void percpu_counter_dec(struct percpu_counter *fbc)
 181{
 182        percpu_counter_add(fbc, -1);
 183}
 184
 185static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
 186{
 187        percpu_counter_add(fbc, -amount);
 188}
 189
 190#endif /* _LINUX_PERCPU_COUNTER_H */
 191