linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11
  12static LIST_HEAD(percpu_counters);
  13static DEFINE_MUTEX(percpu_counters_lock);
  14
  15void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  16{
  17        int cpu;
  18
  19        spin_lock(&fbc->lock);
  20        for_each_possible_cpu(cpu) {
  21                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  22                *pcount = 0;
  23        }
  24        fbc->count = amount;
  25        spin_unlock(&fbc->lock);
  26}
  27EXPORT_SYMBOL(percpu_counter_set);
  28
  29void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  30{
  31        s64 count;
  32        s32 *pcount;
  33        int cpu = get_cpu();
  34
  35        pcount = per_cpu_ptr(fbc->counters, cpu);
  36        count = *pcount + amount;
  37        if (count >= batch || count <= -batch) {
  38                spin_lock(&fbc->lock);
  39                fbc->count += count;
  40                *pcount = 0;
  41                spin_unlock(&fbc->lock);
  42        } else {
  43                *pcount = count;
  44        }
  45        put_cpu();
  46}
  47EXPORT_SYMBOL(__percpu_counter_add);
  48
  49/*
  50 * Add up all the per-cpu counts, return the result.  This is a more accurate
  51 * but much slower version of percpu_counter_read_positive()
  52 */
  53s64 __percpu_counter_sum(struct percpu_counter *fbc)
  54{
  55        s64 ret;
  56        int cpu;
  57
  58        spin_lock(&fbc->lock);
  59        ret = fbc->count;
  60        for_each_online_cpu(cpu) {
  61                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  62                ret += *pcount;
  63        }
  64        spin_unlock(&fbc->lock);
  65        return ret;
  66}
  67EXPORT_SYMBOL(__percpu_counter_sum);
  68
  69int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  70                          struct lock_class_key *key)
  71{
  72        spin_lock_init(&fbc->lock);
  73        lockdep_set_class(&fbc->lock, key);
  74        fbc->count = amount;
  75        fbc->counters = alloc_percpu(s32);
  76        if (!fbc->counters)
  77                return -ENOMEM;
  78#ifdef CONFIG_HOTPLUG_CPU
  79        mutex_lock(&percpu_counters_lock);
  80        list_add(&fbc->list, &percpu_counters);
  81        mutex_unlock(&percpu_counters_lock);
  82#endif
  83        return 0;
  84}
  85EXPORT_SYMBOL(__percpu_counter_init);
  86
  87void percpu_counter_destroy(struct percpu_counter *fbc)
  88{
  89        if (!fbc->counters)
  90                return;
  91
  92#ifdef CONFIG_HOTPLUG_CPU
  93        mutex_lock(&percpu_counters_lock);
  94        list_del(&fbc->list);
  95        mutex_unlock(&percpu_counters_lock);
  96#endif
  97        free_percpu(fbc->counters);
  98        fbc->counters = NULL;
  99}
 100EXPORT_SYMBOL(percpu_counter_destroy);
 101
 102int percpu_counter_batch __read_mostly = 32;
 103EXPORT_SYMBOL(percpu_counter_batch);
 104
 105static void compute_batch_value(void)
 106{
 107        int nr = num_online_cpus();
 108
 109        percpu_counter_batch = max(32, nr*2);
 110}
 111
 112static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
 113                                        unsigned long action, void *hcpu)
 114{
 115#ifdef CONFIG_HOTPLUG_CPU
 116        unsigned int cpu;
 117        struct percpu_counter *fbc;
 118
 119        compute_batch_value();
 120        if (action != CPU_DEAD)
 121                return NOTIFY_OK;
 122
 123        cpu = (unsigned long)hcpu;
 124        mutex_lock(&percpu_counters_lock);
 125        list_for_each_entry(fbc, &percpu_counters, list) {
 126                s32 *pcount;
 127                unsigned long flags;
 128
 129                spin_lock_irqsave(&fbc->lock, flags);
 130                pcount = per_cpu_ptr(fbc->counters, cpu);
 131                fbc->count += *pcount;
 132                *pcount = 0;
 133                spin_unlock_irqrestore(&fbc->lock, flags);
 134        }
 135        mutex_unlock(&percpu_counters_lock);
 136#endif
 137        return NOTIFY_OK;
 138}
 139
 140static int __init percpu_counter_startup(void)
 141{
 142        compute_batch_value();
 143        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 144        return 0;
 145}
 146module_init(percpu_counter_startup);
 147