linux/net/ipv4/inetpeer.c
<<
>>
Prefs
   1/*
   2 *              INETPEER - A storage for permanent information about peers
   3 *
   4 *  This source is covered by the GNU GPL, the same as all kernel sources.
   5 *
   6 *  Authors:    Andrey V. Savochkin <saw@msu.ru>
   7 */
   8
   9#include <linux/cache.h>
  10#include <linux/module.h>
  11#include <linux/types.h>
  12#include <linux/slab.h>
  13#include <linux/interrupt.h>
  14#include <linux/spinlock.h>
  15#include <linux/random.h>
  16#include <linux/timer.h>
  17#include <linux/time.h>
  18#include <linux/kernel.h>
  19#include <linux/mm.h>
  20#include <linux/net.h>
  21#include <linux/workqueue.h>
  22#include <net/ip.h>
  23#include <net/inetpeer.h>
  24#include <net/secure_seq.h>
  25
  26/*
  27 *  Theory of operations.
  28 *  We keep one entry for each peer IP address.  The nodes contains long-living
  29 *  information about the peer which doesn't depend on routes.
  30 *
  31 *  Nodes are removed only when reference counter goes to 0.
  32 *  When it's happened the node may be removed when a sufficient amount of
  33 *  time has been passed since its last use.  The less-recently-used entry can
  34 *  also be removed if the pool is overloaded i.e. if the total amount of
  35 *  entries is greater-or-equal than the threshold.
  36 *
  37 *  Node pool is organised as an RB tree.
  38 *  Such an implementation has been chosen not just for fun.  It's a way to
  39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
  40 *  amount of long living nodes in a single hash slot would significantly delay
  41 *  lookups performed with disabled BHs.
  42 *
  43 *  Serialisation issues.
  44 *  1.  Nodes may appear in the tree only with the pool lock held.
  45 *  2.  Nodes may disappear from the tree only with the pool lock held
  46 *      AND reference count being 0.
  47 *  3.  Global variable peer_total is modified under the pool lock.
  48 *  4.  struct inet_peer fields modification:
  49 *              rb_node: pool lock
  50 *              refcnt: atomically against modifications on other CPU;
  51 *                 usually under some other lock to prevent node disappearing
  52 *              daddr: unchangeable
  53 */
  54
  55static struct kmem_cache *peer_cachep __ro_after_init;
  56
  57void inet_peer_base_init(struct inet_peer_base *bp)
  58{
  59        bp->rb_root = RB_ROOT;
  60        seqlock_init(&bp->lock);
  61        bp->total = 0;
  62}
  63EXPORT_SYMBOL_GPL(inet_peer_base_init);
  64
  65#define PEER_MAX_GC 32
  66
  67/* Exported for sysctl_net_ipv4.  */
  68int inet_peer_threshold __read_mostly = 65536 + 128;    /* start to throw entries more
  69                                         * aggressively at this stage */
  70int inet_peer_minttl __read_mostly = 120 * HZ;  /* TTL under high load: 120 sec */
  71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;      /* usual time to live: 10 min */
  72
  73/* Called from ip_output.c:ip_init  */
  74void __init inet_initpeers(void)
  75{
  76        struct sysinfo si;
  77
  78        /* Use the straight interface to information about memory. */
  79        si_meminfo(&si);
  80        /* The values below were suggested by Alexey Kuznetsov
  81         * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
  82         * myself.  --SAW
  83         */
  84        if (si.totalram <= (32768*1024)/PAGE_SIZE)
  85                inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
  86        if (si.totalram <= (16384*1024)/PAGE_SIZE)
  87                inet_peer_threshold >>= 1; /* about 512KB */
  88        if (si.totalram <= (8192*1024)/PAGE_SIZE)
  89                inet_peer_threshold >>= 2; /* about 128KB */
  90
  91        peer_cachep = kmem_cache_create("inet_peer_cache",
  92                        sizeof(struct inet_peer),
  93                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
  94                        NULL);
  95}
  96
  97/* Called with rcu_read_lock() or base->lock held */
  98static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
  99                                struct inet_peer_base *base,
 100                                unsigned int seq,
 101                                struct inet_peer *gc_stack[],
 102                                unsigned int *gc_cnt,
 103                                struct rb_node **parent_p,
 104                                struct rb_node ***pp_p)
 105{
 106        struct rb_node **pp, *parent, *next;
 107        struct inet_peer *p;
 108
 109        pp = &base->rb_root.rb_node;
 110        parent = NULL;
 111        while (1) {
 112                int cmp;
 113
 114                next = rcu_dereference_raw(*pp);
 115                if (!next)
 116                        break;
 117                parent = next;
 118                p = rb_entry(parent, struct inet_peer, rb_node);
 119                cmp = inetpeer_addr_cmp(daddr, &p->daddr);
 120                if (cmp == 0) {
 121                        if (!refcount_inc_not_zero(&p->refcnt))
 122                                break;
 123                        return p;
 124                }
 125                if (gc_stack) {
 126                        if (*gc_cnt < PEER_MAX_GC)
 127                                gc_stack[(*gc_cnt)++] = p;
 128                } else if (unlikely(read_seqretry(&base->lock, seq))) {
 129                        break;
 130                }
 131                if (cmp == -1)
 132                        pp = &next->rb_left;
 133                else
 134                        pp = &next->rb_right;
 135        }
 136        *parent_p = parent;
 137        *pp_p = pp;
 138        return NULL;
 139}
 140
 141static void inetpeer_free_rcu(struct rcu_head *head)
 142{
 143        kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
 144}
 145
 146/* perform garbage collect on all items stacked during a lookup */
 147static void inet_peer_gc(struct inet_peer_base *base,
 148                         struct inet_peer *gc_stack[],
 149                         unsigned int gc_cnt)
 150{
 151        struct inet_peer *p;
 152        __u32 delta, ttl;
 153        int i;
 154
 155        if (base->total >= inet_peer_threshold)
 156                ttl = 0; /* be aggressive */
 157        else
 158                ttl = inet_peer_maxttl
 159                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
 160                                        base->total / inet_peer_threshold * HZ;
 161        for (i = 0; i < gc_cnt; i++) {
 162                p = gc_stack[i];
 163                delta = (__u32)jiffies - p->dtime;
 164                if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
 165                        gc_stack[i] = NULL;
 166        }
 167        for (i = 0; i < gc_cnt; i++) {
 168                p = gc_stack[i];
 169                if (p) {
 170                        rb_erase(&p->rb_node, &base->rb_root);
 171                        base->total--;
 172                        call_rcu(&p->rcu, inetpeer_free_rcu);
 173                }
 174        }
 175}
 176
 177struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 178                               const struct inetpeer_addr *daddr,
 179                               int create)
 180{
 181        struct inet_peer *p, *gc_stack[PEER_MAX_GC];
 182        struct rb_node **pp, *parent;
 183        unsigned int gc_cnt, seq;
 184        int invalidated;
 185
 186        /* Attempt a lockless lookup first.
 187         * Because of a concurrent writer, we might not find an existing entry.
 188         */
 189        rcu_read_lock();
 190        seq = read_seqbegin(&base->lock);
 191        p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
 192        invalidated = read_seqretry(&base->lock, seq);
 193        rcu_read_unlock();
 194
 195        if (p)
 196                return p;
 197
 198        /* If no writer did a change during our lookup, we can return early. */
 199        if (!create && !invalidated)
 200                return NULL;
 201
 202        /* retry an exact lookup, taking the lock before.
 203         * At least, nodes should be hot in our cache.
 204         */
 205        parent = NULL;
 206        write_seqlock_bh(&base->lock);
 207
 208        gc_cnt = 0;
 209        p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
 210        if (!p && create) {
 211                p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
 212                if (p) {
 213                        p->daddr = *daddr;
 214                        p->dtime = (__u32)jiffies;
 215                        refcount_set(&p->refcnt, 2);
 216                        atomic_set(&p->rid, 0);
 217                        p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
 218                        p->rate_tokens = 0;
 219                        /* 60*HZ is arbitrary, but chosen enough high so that the first
 220                         * calculation of tokens is at its maximum.
 221                         */
 222                        p->rate_last = jiffies - 60*HZ;
 223
 224                        rb_link_node(&p->rb_node, parent, pp);
 225                        rb_insert_color(&p->rb_node, &base->rb_root);
 226                        base->total++;
 227                }
 228        }
 229        if (gc_cnt)
 230                inet_peer_gc(base, gc_stack, gc_cnt);
 231        write_sequnlock_bh(&base->lock);
 232
 233        return p;
 234}
 235EXPORT_SYMBOL_GPL(inet_getpeer);
 236
 237void inet_putpeer(struct inet_peer *p)
 238{
 239        p->dtime = (__u32)jiffies;
 240
 241        if (refcount_dec_and_test(&p->refcnt))
 242                call_rcu(&p->rcu, inetpeer_free_rcu);
 243}
 244EXPORT_SYMBOL_GPL(inet_putpeer);
 245
 246/*
 247 *      Check transmit rate limitation for given message.
 248 *      The rate information is held in the inet_peer entries now.
 249 *      This function is generic and could be used for other purposes
 250 *      too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
 251 *
 252 *      Note that the same inet_peer fields are modified by functions in
 253 *      route.c too, but these work for packet destinations while xrlim_allow
 254 *      works for icmp destinations. This means the rate limiting information
 255 *      for one "ip object" is shared - and these ICMPs are twice limited:
 256 *      by source and by destination.
 257 *
 258 *      RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
 259 *                        SHOULD allow setting of rate limits
 260 *
 261 *      Shared between ICMPv4 and ICMPv6.
 262 */
 263#define XRLIM_BURST_FACTOR 6
 264bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
 265{
 266        unsigned long now, token;
 267        bool rc = false;
 268
 269        if (!peer)
 270                return true;
 271
 272        token = peer->rate_tokens;
 273        now = jiffies;
 274        token += now - peer->rate_last;
 275        peer->rate_last = now;
 276        if (token > XRLIM_BURST_FACTOR * timeout)
 277                token = XRLIM_BURST_FACTOR * timeout;
 278        if (token >= timeout) {
 279                token -= timeout;
 280                rc = true;
 281        }
 282        peer->rate_tokens = token;
 283        return rc;
 284}
 285EXPORT_SYMBOL(inet_peer_xrlim_allow);
 286
 287void inetpeer_invalidate_tree(struct inet_peer_base *base)
 288{
 289        struct rb_node *p = rb_first(&base->rb_root);
 290
 291        while (p) {
 292                struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
 293
 294                p = rb_next(p);
 295                rb_erase(&peer->rb_node, &base->rb_root);
 296                inet_putpeer(peer);
 297                cond_resched();
 298        }
 299
 300        base->total = 0;
 301}
 302EXPORT_SYMBOL(inetpeer_invalidate_tree);
 303