linux/net/core/gen_estimator.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/gen_estimator.c    Simple rate estimator.
   4 *
   5 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Eric Dumazet <edumazet@google.com>
   7 *
   8 * Changes:
   9 *              Jamal Hadi Salim - moved it to net/core and reshulfed
  10 *              names to make it usable in general net subsystem.
  11 */
  12
  13#include <linux/uaccess.h>
  14#include <linux/bitops.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/kernel.h>
  18#include <linux/jiffies.h>
  19#include <linux/string.h>
  20#include <linux/mm.h>
  21#include <linux/socket.h>
  22#include <linux/sockios.h>
  23#include <linux/in.h>
  24#include <linux/errno.h>
  25#include <linux/interrupt.h>
  26#include <linux/netdevice.h>
  27#include <linux/skbuff.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/init.h>
  30#include <linux/slab.h>
  31#include <linux/seqlock.h>
  32#include <net/sock.h>
  33#include <net/gen_stats.h>
  34
  35/* This code is NOT intended to be used for statistics collection,
  36 * its purpose is to provide a base for statistical multiplexing
  37 * for controlled load service.
  38 * If you need only statistics, run a user level daemon which
  39 * periodically reads byte counters.
  40 */
  41
  42struct net_rate_estimator {
  43        struct gnet_stats_basic_packed  *bstats;
  44        spinlock_t              *stats_lock;
  45        seqcount_t              *running;
  46        struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  47        u8                      ewma_log;
  48        u8                      intvl_log; /* period : (250ms << intvl_log) */
  49
  50        seqcount_t              seq;
  51        u32                     last_packets;
  52        u64                     last_bytes;
  53
  54        u64                     avpps;
  55        u64                     avbps;
  56
  57        unsigned long           next_jiffies;
  58        struct timer_list       timer;
  59        struct rcu_head         rcu;
  60};
  61
  62static void est_fetch_counters(struct net_rate_estimator *e,
  63                               struct gnet_stats_basic_packed *b)
  64{
  65        memset(b, 0, sizeof(*b));
  66        if (e->stats_lock)
  67                spin_lock(e->stats_lock);
  68
  69        __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
  70
  71        if (e->stats_lock)
  72                spin_unlock(e->stats_lock);
  73
  74}
  75
  76static void est_timer(struct timer_list *t)
  77{
  78        struct net_rate_estimator *est = from_timer(est, t, timer);
  79        struct gnet_stats_basic_packed b;
  80        u64 rate, brate;
  81
  82        est_fetch_counters(est, &b);
  83        brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
  84        brate -= (est->avbps >> est->ewma_log);
  85
  86        rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
  87        rate -= (est->avpps >> est->ewma_log);
  88
  89        write_seqcount_begin(&est->seq);
  90        est->avbps += brate;
  91        est->avpps += rate;
  92        write_seqcount_end(&est->seq);
  93
  94        est->last_bytes = b.bytes;
  95        est->last_packets = b.packets;
  96
  97        est->next_jiffies += ((HZ/4) << est->intvl_log);
  98
  99        if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
 100                /* Ouch... timer was delayed. */
 101                est->next_jiffies = jiffies + 1;
 102        }
 103        mod_timer(&est->timer, est->next_jiffies);
 104}
 105
 106/**
 107 * gen_new_estimator - create a new rate estimator
 108 * @bstats: basic statistics
 109 * @cpu_bstats: bstats per cpu
 110 * @rate_est: rate estimator statistics
 111 * @lock: lock for statistics and control path
 112 * @running: qdisc running seqcount
 113 * @opt: rate estimator configuration TLV
 114 *
 115 * Creates a new rate estimator with &bstats as source and &rate_est
 116 * as destination. A new timer with the interval specified in the
 117 * configuration TLV is created. Upon each interval, the latest statistics
 118 * will be read from &bstats and the estimated rate will be stored in
 119 * &rate_est with the statistics lock grabbed during this period.
 120 *
 121 * Returns 0 on success or a negative error code.
 122 *
 123 */
 124int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 125                      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
 126                      struct net_rate_estimator __rcu **rate_est,
 127                      spinlock_t *lock,
 128                      seqcount_t *running,
 129                      struct nlattr *opt)
 130{
 131        struct gnet_estimator *parm = nla_data(opt);
 132        struct net_rate_estimator *old, *est;
 133        struct gnet_stats_basic_packed b;
 134        int intvl_log;
 135
 136        if (nla_len(opt) < sizeof(*parm))
 137                return -EINVAL;
 138
 139        /* allowed timer periods are :
 140         * -2 : 250ms,   -1 : 500ms,    0 : 1 sec
 141         *  1 : 2 sec,    2 : 4 sec,    3 : 8 sec
 142         */
 143        if (parm->interval < -2 || parm->interval > 3)
 144                return -EINVAL;
 145
 146        est = kzalloc(sizeof(*est), GFP_KERNEL);
 147        if (!est)
 148                return -ENOBUFS;
 149
 150        seqcount_init(&est->seq);
 151        intvl_log = parm->interval + 2;
 152        est->bstats = bstats;
 153        est->stats_lock = lock;
 154        est->running  = running;
 155        est->ewma_log = parm->ewma_log;
 156        est->intvl_log = intvl_log;
 157        est->cpu_bstats = cpu_bstats;
 158
 159        if (lock)
 160                local_bh_disable();
 161        est_fetch_counters(est, &b);
 162        if (lock)
 163                local_bh_enable();
 164        est->last_bytes = b.bytes;
 165        est->last_packets = b.packets;
 166
 167        if (lock)
 168                spin_lock_bh(lock);
 169        old = rcu_dereference_protected(*rate_est, 1);
 170        if (old) {
 171                del_timer_sync(&old->timer);
 172                est->avbps = old->avbps;
 173                est->avpps = old->avpps;
 174        }
 175
 176        est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
 177        timer_setup(&est->timer, est_timer, 0);
 178        mod_timer(&est->timer, est->next_jiffies);
 179
 180        rcu_assign_pointer(*rate_est, est);
 181        if (lock)
 182                spin_unlock_bh(lock);
 183        if (old)
 184                kfree_rcu(old, rcu);
 185        return 0;
 186}
 187EXPORT_SYMBOL(gen_new_estimator);
 188
 189/**
 190 * gen_kill_estimator - remove a rate estimator
 191 * @rate_est: rate estimator
 192 *
 193 * Removes the rate estimator.
 194 *
 195 */
 196void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
 197{
 198        struct net_rate_estimator *est;
 199
 200        est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
 201        if (est) {
 202                del_timer_sync(&est->timer);
 203                kfree_rcu(est, rcu);
 204        }
 205}
 206EXPORT_SYMBOL(gen_kill_estimator);
 207
 208/**
 209 * gen_replace_estimator - replace rate estimator configuration
 210 * @bstats: basic statistics
 211 * @cpu_bstats: bstats per cpu
 212 * @rate_est: rate estimator statistics
 213 * @lock: lock for statistics and control path
 214 * @running: qdisc running seqcount (might be NULL)
 215 * @opt: rate estimator configuration TLV
 216 *
 217 * Replaces the configuration of a rate estimator by calling
 218 * gen_kill_estimator() and gen_new_estimator().
 219 *
 220 * Returns 0 on success or a negative error code.
 221 */
 222int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
 223                          struct gnet_stats_basic_cpu __percpu *cpu_bstats,
 224                          struct net_rate_estimator __rcu **rate_est,
 225                          spinlock_t *lock,
 226                          seqcount_t *running, struct nlattr *opt)
 227{
 228        return gen_new_estimator(bstats, cpu_bstats, rate_est,
 229                                 lock, running, opt);
 230}
 231EXPORT_SYMBOL(gen_replace_estimator);
 232
 233/**
 234 * gen_estimator_active - test if estimator is currently in use
 235 * @rate_est: rate estimator
 236 *
 237 * Returns true if estimator is active, and false if not.
 238 */
 239bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
 240{
 241        return !!rcu_access_pointer(*rate_est);
 242}
 243EXPORT_SYMBOL(gen_estimator_active);
 244
 245bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
 246                        struct gnet_stats_rate_est64 *sample)
 247{
 248        struct net_rate_estimator *est;
 249        unsigned seq;
 250
 251        rcu_read_lock();
 252        est = rcu_dereference(*rate_est);
 253        if (!est) {
 254                rcu_read_unlock();
 255                return false;
 256        }
 257
 258        do {
 259                seq = read_seqcount_begin(&est->seq);
 260                sample->bps = est->avbps >> 8;
 261                sample->pps = est->avpps >> 8;
 262        } while (read_seqcount_retry(&est->seq, seq));
 263
 264        rcu_read_unlock();
 265        return true;
 266}
 267EXPORT_SYMBOL(gen_estimator_read);
 268