linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11#include <linux/debugobjects.h>
  12
  13#ifdef CONFIG_HOTPLUG_CPU
  14static LIST_HEAD(percpu_counters);
  15static DEFINE_SPINLOCK(percpu_counters_lock);
  16#endif
  17
  18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
  19
  20static struct debug_obj_descr percpu_counter_debug_descr;
  21
  22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
  23{
  24        struct percpu_counter *fbc = addr;
  25
  26        switch (state) {
  27        case ODEBUG_STATE_ACTIVE:
  28                percpu_counter_destroy(fbc);
  29                debug_object_free(fbc, &percpu_counter_debug_descr);
  30                return 1;
  31        default:
  32                return 0;
  33        }
  34}
  35
  36static struct debug_obj_descr percpu_counter_debug_descr = {
  37        .name           = "percpu_counter",
  38        .fixup_free     = percpu_counter_fixup_free,
  39};
  40
  41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  42{
  43        debug_object_init(fbc, &percpu_counter_debug_descr);
  44        debug_object_activate(fbc, &percpu_counter_debug_descr);
  45}
  46
  47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  48{
  49        debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  50        debug_object_free(fbc, &percpu_counter_debug_descr);
  51}
  52
  53#else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  55{ }
  56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  57{ }
  58#endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  59
  60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  61{
  62        int cpu;
  63        unsigned long flags;
  64
  65        raw_spin_lock_irqsave(&fbc->lock, flags);
  66        for_each_possible_cpu(cpu) {
  67                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  68                *pcount = 0;
  69        }
  70        fbc->count = amount;
  71        raw_spin_unlock_irqrestore(&fbc->lock, flags);
  72}
  73EXPORT_SYMBOL(percpu_counter_set);
  74
  75void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  76{
  77        s64 count;
  78
  79        preempt_disable();
  80        count = __this_cpu_read(*fbc->counters) + amount;
  81        if (count >= batch || count <= -batch) {
  82                unsigned long flags;
  83                raw_spin_lock_irqsave(&fbc->lock, flags);
  84                fbc->count += count;
  85                __this_cpu_sub(*fbc->counters, count - amount);
  86                raw_spin_unlock_irqrestore(&fbc->lock, flags);
  87        } else {
  88                this_cpu_add(*fbc->counters, amount);
  89        }
  90        preempt_enable();
  91}
  92EXPORT_SYMBOL(__percpu_counter_add);
  93
  94/*
  95 * Add up all the per-cpu counts, return the result.  This is a more accurate
  96 * but much slower version of percpu_counter_read_positive()
  97 */
  98s64 __percpu_counter_sum(struct percpu_counter *fbc)
  99{
 100        s64 ret;
 101        int cpu;
 102        unsigned long flags;
 103
 104        raw_spin_lock_irqsave(&fbc->lock, flags);
 105        ret = fbc->count;
 106        for_each_online_cpu(cpu) {
 107                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 108                ret += *pcount;
 109        }
 110        raw_spin_unlock_irqrestore(&fbc->lock, flags);
 111        return ret;
 112}
 113EXPORT_SYMBOL(__percpu_counter_sum);
 114
 115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
 116                          struct lock_class_key *key)
 117{
 118        unsigned long flags __maybe_unused;
 119
 120        raw_spin_lock_init(&fbc->lock);
 121        lockdep_set_class(&fbc->lock, key);
 122        fbc->count = amount;
 123        fbc->counters = alloc_percpu_gfp(s32, gfp);
 124        if (!fbc->counters)
 125                return -ENOMEM;
 126
 127        debug_percpu_counter_activate(fbc);
 128
 129#ifdef CONFIG_HOTPLUG_CPU
 130        INIT_LIST_HEAD(&fbc->list);
 131        spin_lock_irqsave(&percpu_counters_lock, flags);
 132        list_add(&fbc->list, &percpu_counters);
 133        spin_unlock_irqrestore(&percpu_counters_lock, flags);
 134#endif
 135        return 0;
 136}
 137EXPORT_SYMBOL(__percpu_counter_init);
 138
 139void percpu_counter_destroy(struct percpu_counter *fbc)
 140{
 141        unsigned long flags __maybe_unused;
 142
 143        if (!fbc->counters)
 144                return;
 145
 146        debug_percpu_counter_deactivate(fbc);
 147
 148#ifdef CONFIG_HOTPLUG_CPU
 149        spin_lock_irqsave(&percpu_counters_lock, flags);
 150        list_del(&fbc->list);
 151        spin_unlock_irqrestore(&percpu_counters_lock, flags);
 152#endif
 153        free_percpu(fbc->counters);
 154        fbc->counters = NULL;
 155}
 156EXPORT_SYMBOL(percpu_counter_destroy);
 157
 158int percpu_counter_batch __read_mostly = 32;
 159EXPORT_SYMBOL(percpu_counter_batch);
 160
 161static void compute_batch_value(void)
 162{
 163        int nr = num_online_cpus();
 164
 165        percpu_counter_batch = max(32, nr*2);
 166}
 167
 168static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
 169                                        unsigned long action, void *hcpu)
 170{
 171#ifdef CONFIG_HOTPLUG_CPU
 172        unsigned int cpu;
 173        struct percpu_counter *fbc;
 174
 175        compute_batch_value();
 176        if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
 177                return NOTIFY_OK;
 178
 179        cpu = (unsigned long)hcpu;
 180        spin_lock_irq(&percpu_counters_lock);
 181        list_for_each_entry(fbc, &percpu_counters, list) {
 182                s32 *pcount;
 183                unsigned long flags;
 184
 185                raw_spin_lock_irqsave(&fbc->lock, flags);
 186                pcount = per_cpu_ptr(fbc->counters, cpu);
 187                fbc->count += *pcount;
 188                *pcount = 0;
 189                raw_spin_unlock_irqrestore(&fbc->lock, flags);
 190        }
 191        spin_unlock_irq(&percpu_counters_lock);
 192#endif
 193        return NOTIFY_OK;
 194}
 195
 196/*
 197 * Compare counter against given value.
 198 * Return 1 if greater, 0 if equal and -1 if less
 199 */
 200int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 201{
 202        s64     count;
 203
 204        count = percpu_counter_read(fbc);
 205        /* Check to see if rough count will be sufficient for comparison */
 206        if (abs(count - rhs) > (batch * num_online_cpus())) {
 207                if (count > rhs)
 208                        return 1;
 209                else
 210                        return -1;
 211        }
 212        /* Need to use precise count */
 213        count = percpu_counter_sum(fbc);
 214        if (count > rhs)
 215                return 1;
 216        else if (count < rhs)
 217                return -1;
 218        else
 219                return 0;
 220}
 221EXPORT_SYMBOL(__percpu_counter_compare);
 222
 223static int __init percpu_counter_startup(void)
 224{
 225        compute_batch_value();
 226        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 227        return 0;
 228}
 229module_init(percpu_counter_startup);
 230