linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11#include <linux/debugobjects.h>
  12
  13#ifdef CONFIG_HOTPLUG_CPU
  14static LIST_HEAD(percpu_counters);
  15static DEFINE_SPINLOCK(percpu_counters_lock);
  16#endif
  17
  18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
  19
  20static struct debug_obj_descr percpu_counter_debug_descr;
  21
  22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
  23{
  24        struct percpu_counter *fbc = addr;
  25
  26        switch (state) {
  27        case ODEBUG_STATE_ACTIVE:
  28                percpu_counter_destroy(fbc);
  29                debug_object_free(fbc, &percpu_counter_debug_descr);
  30                return 1;
  31        default:
  32                return 0;
  33        }
  34}
  35
  36static struct debug_obj_descr percpu_counter_debug_descr = {
  37        .name           = "percpu_counter",
  38        .fixup_free     = percpu_counter_fixup_free,
  39};
  40
  41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  42{
  43        debug_object_init(fbc, &percpu_counter_debug_descr);
  44        debug_object_activate(fbc, &percpu_counter_debug_descr);
  45}
  46
  47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  48{
  49        debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  50        debug_object_free(fbc, &percpu_counter_debug_descr);
  51}
  52
  53#else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  55{ }
  56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  57{ }
  58#endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  59
  60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  61{
  62        int cpu;
  63
  64        raw_spin_lock(&fbc->lock);
  65        for_each_possible_cpu(cpu) {
  66                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  67                *pcount = 0;
  68        }
  69        fbc->count = amount;
  70        raw_spin_unlock(&fbc->lock);
  71}
  72EXPORT_SYMBOL(percpu_counter_set);
  73
  74void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  75{
  76        s64 count;
  77
  78        preempt_disable();
  79        count = __this_cpu_read(*fbc->counters) + amount;
  80        if (count >= batch || count <= -batch) {
  81                raw_spin_lock(&fbc->lock);
  82                fbc->count += count;
  83                raw_spin_unlock(&fbc->lock);
  84                __this_cpu_write(*fbc->counters, 0);
  85        } else {
  86                __this_cpu_write(*fbc->counters, count);
  87        }
  88        preempt_enable();
  89}
  90EXPORT_SYMBOL(__percpu_counter_add);
  91
  92/*
  93 * Add up all the per-cpu counts, return the result.  This is a more accurate
  94 * but much slower version of percpu_counter_read_positive()
  95 */
  96s64 __percpu_counter_sum(struct percpu_counter *fbc)
  97{
  98        s64 ret;
  99        int cpu;
 100
 101        raw_spin_lock(&fbc->lock);
 102        ret = fbc->count;
 103        for_each_online_cpu(cpu) {
 104                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 105                ret += *pcount;
 106        }
 107        raw_spin_unlock(&fbc->lock);
 108        return ret;
 109}
 110EXPORT_SYMBOL(__percpu_counter_sum);
 111
 112int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
 113                          struct lock_class_key *key)
 114{
 115        raw_spin_lock_init(&fbc->lock);
 116        lockdep_set_class(&fbc->lock, key);
 117        fbc->count = amount;
 118        fbc->counters = alloc_percpu(s32);
 119        if (!fbc->counters)
 120                return -ENOMEM;
 121
 122        debug_percpu_counter_activate(fbc);
 123
 124#ifdef CONFIG_HOTPLUG_CPU
 125        INIT_LIST_HEAD(&fbc->list);
 126        spin_lock(&percpu_counters_lock);
 127        list_add(&fbc->list, &percpu_counters);
 128        spin_unlock(&percpu_counters_lock);
 129#endif
 130        return 0;
 131}
 132EXPORT_SYMBOL(__percpu_counter_init);
 133
 134void percpu_counter_destroy(struct percpu_counter *fbc)
 135{
 136        if (!fbc->counters)
 137                return;
 138
 139        debug_percpu_counter_deactivate(fbc);
 140
 141#ifdef CONFIG_HOTPLUG_CPU
 142        spin_lock(&percpu_counters_lock);
 143        list_del(&fbc->list);
 144        spin_unlock(&percpu_counters_lock);
 145#endif
 146        free_percpu(fbc->counters);
 147        fbc->counters = NULL;
 148}
 149EXPORT_SYMBOL(percpu_counter_destroy);
 150
 151int percpu_counter_batch __read_mostly = 32;
 152EXPORT_SYMBOL(percpu_counter_batch);
 153
 154static void compute_batch_value(void)
 155{
 156        int nr = num_online_cpus();
 157
 158        percpu_counter_batch = max(32, nr*2);
 159}
 160
 161static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
 162                                        unsigned long action, void *hcpu)
 163{
 164#ifdef CONFIG_HOTPLUG_CPU
 165        unsigned int cpu;
 166        struct percpu_counter *fbc;
 167
 168        compute_batch_value();
 169        if (action != CPU_DEAD)
 170                return NOTIFY_OK;
 171
 172        cpu = (unsigned long)hcpu;
 173        spin_lock(&percpu_counters_lock);
 174        list_for_each_entry(fbc, &percpu_counters, list) {
 175                s32 *pcount;
 176                unsigned long flags;
 177
 178                raw_spin_lock_irqsave(&fbc->lock, flags);
 179                pcount = per_cpu_ptr(fbc->counters, cpu);
 180                fbc->count += *pcount;
 181                *pcount = 0;
 182                raw_spin_unlock_irqrestore(&fbc->lock, flags);
 183        }
 184        spin_unlock(&percpu_counters_lock);
 185#endif
 186        return NOTIFY_OK;
 187}
 188
 189/*
 190 * Compare counter against given value.
 191 * Return 1 if greater, 0 if equal and -1 if less
 192 */
 193int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 194{
 195        s64     count;
 196
 197        count = percpu_counter_read(fbc);
 198        /* Check to see if rough count will be sufficient for comparison */
 199        if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
 200                if (count > rhs)
 201                        return 1;
 202                else
 203                        return -1;
 204        }
 205        /* Need to use precise count */
 206        count = percpu_counter_sum(fbc);
 207        if (count > rhs)
 208                return 1;
 209        else if (count < rhs)
 210                return -1;
 211        else
 212                return 0;
 213}
 214EXPORT_SYMBOL(percpu_counter_compare);
 215
 216static int __init percpu_counter_startup(void)
 217{
 218        compute_batch_value();
 219        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 220        return 0;
 221}
 222module_init(percpu_counter_startup);
 223