linux/net/core/flow.c
<<
>>
Prefs
   1/* flow.c: Generic flow cache.
   2 *
   3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
   4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/list.h>
  10#include <linux/jhash.h>
  11#include <linux/interrupt.h>
  12#include <linux/mm.h>
  13#include <linux/random.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/smp.h>
  17#include <linux/completion.h>
  18#include <linux/percpu.h>
  19#include <linux/bitops.h>
  20#include <linux/notifier.h>
  21#include <linux/cpu.h>
  22#include <linux/cpumask.h>
  23#include <linux/mutex.h>
  24#include <net/flow.h>
  25#include <linux/atomic.h>
  26#include <linux/security.h>
  27#include <net/net_namespace.h>
  28
  29struct flow_cache_entry {
  30        union {
  31                struct hlist_node       hlist;
  32                struct list_head        gc_list;
  33        } u;
  34        struct net                      *net;
  35        u16                             family;
  36        u8                              dir;
  37        u32                             genid;
  38        struct flowi                    key;
  39        struct flow_cache_object        *object;
  40};
  41
  42struct flow_flush_info {
  43        struct flow_cache               *cache;
  44        atomic_t                        cpuleft;
  45        struct completion               completion;
  46};
  47
  48static struct kmem_cache *flow_cachep __read_mostly;
  49
  50#define flow_cache_hash_size(cache)     (1U << (cache)->hash_shift)
  51#define FLOW_HASH_RND_PERIOD            (10 * 60 * HZ)
  52
  53static void flow_cache_new_hashrnd(unsigned long arg)
  54{
  55        struct flow_cache *fc = (void *) arg;
  56        int i;
  57
  58        for_each_possible_cpu(i)
  59                per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
  60
  61        fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  62        add_timer(&fc->rnd_timer);
  63}
  64
  65static int flow_entry_valid(struct flow_cache_entry *fle,
  66                                struct netns_xfrm *xfrm)
  67{
  68        if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
  69                return 0;
  70        if (fle->object && !fle->object->ops->check(fle->object))
  71                return 0;
  72        return 1;
  73}
  74
  75static void flow_entry_kill(struct flow_cache_entry *fle,
  76                                struct netns_xfrm *xfrm)
  77{
  78        if (fle->object)
  79                fle->object->ops->delete(fle->object);
  80        kmem_cache_free(flow_cachep, fle);
  81}
  82
  83static void flow_cache_gc_task(struct work_struct *work)
  84{
  85        struct list_head gc_list;
  86        struct flow_cache_entry *fce, *n;
  87        struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
  88                                                flow_cache_gc_work);
  89
  90        INIT_LIST_HEAD(&gc_list);
  91        spin_lock_bh(&xfrm->flow_cache_gc_lock);
  92        list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
  93        spin_unlock_bh(&xfrm->flow_cache_gc_lock);
  94
  95        list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
  96                flow_entry_kill(fce, xfrm);
  97                atomic_dec(&xfrm->flow_cache_gc_count);
  98        }
  99}
 100
 101static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
 102                                     unsigned int deleted,
 103                                     struct list_head *gc_list,
 104                                     struct netns_xfrm *xfrm)
 105{
 106        if (deleted) {
 107                atomic_add(deleted, &xfrm->flow_cache_gc_count);
 108                fcp->hash_count -= deleted;
 109                spin_lock_bh(&xfrm->flow_cache_gc_lock);
 110                list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
 111                spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 112                schedule_work(&xfrm->flow_cache_gc_work);
 113        }
 114}
 115
 116static void __flow_cache_shrink(struct flow_cache *fc,
 117                                struct flow_cache_percpu *fcp,
 118                                unsigned int shrink_to)
 119{
 120        struct flow_cache_entry *fle;
 121        struct hlist_node *tmp;
 122        LIST_HEAD(gc_list);
 123        unsigned int deleted = 0;
 124        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
 125                                                flow_cache_global);
 126        unsigned int i;
 127
 128        for (i = 0; i < flow_cache_hash_size(fc); i++) {
 129                unsigned int saved = 0;
 130
 131                hlist_for_each_entry_safe(fle, tmp,
 132                                          &fcp->hash_table[i], u.hlist) {
 133                        if (saved < shrink_to &&
 134                            flow_entry_valid(fle, xfrm)) {
 135                                saved++;
 136                        } else {
 137                                deleted++;
 138                                hlist_del(&fle->u.hlist);
 139                                list_add_tail(&fle->u.gc_list, &gc_list);
 140                        }
 141                }
 142        }
 143
 144        flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
 145}
 146
 147static void flow_cache_shrink(struct flow_cache *fc,
 148                              struct flow_cache_percpu *fcp)
 149{
 150        unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
 151
 152        __flow_cache_shrink(fc, fcp, shrink_to);
 153}
 154
 155static void flow_new_hash_rnd(struct flow_cache *fc,
 156                              struct flow_cache_percpu *fcp)
 157{
 158        get_random_bytes(&fcp->hash_rnd, sizeof(u32));
 159        fcp->hash_rnd_recalc = 0;
 160        __flow_cache_shrink(fc, fcp, 0);
 161}
 162
 163static u32 flow_hash_code(struct flow_cache *fc,
 164                          struct flow_cache_percpu *fcp,
 165                          const struct flowi *key,
 166                          unsigned int keysize)
 167{
 168        const u32 *k = (const u32 *) key;
 169        const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
 170
 171        return jhash2(k, length, fcp->hash_rnd)
 172                & (flow_cache_hash_size(fc) - 1);
 173}
 174
 175/* I hear what you're saying, use memcmp.  But memcmp cannot make
 176 * important assumptions that we can here, such as alignment.
 177 */
 178static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
 179                            unsigned int keysize)
 180{
 181        const flow_compare_t *k1, *k1_lim, *k2;
 182
 183        k1 = (const flow_compare_t *) key1;
 184        k1_lim = k1 + keysize;
 185
 186        k2 = (const flow_compare_t *) key2;
 187
 188        do {
 189                if (*k1++ != *k2++)
 190                        return 1;
 191        } while (k1 < k1_lim);
 192
 193        return 0;
 194}
 195
 196struct flow_cache_object *
 197flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 198                  flow_resolve_t resolver, void *ctx)
 199{
 200        struct flow_cache *fc = &net->xfrm.flow_cache_global;
 201        struct flow_cache_percpu *fcp;
 202        struct flow_cache_entry *fle, *tfle;
 203        struct flow_cache_object *flo;
 204        unsigned int keysize;
 205        unsigned int hash;
 206
 207        local_bh_disable();
 208        fcp = this_cpu_ptr(fc->percpu);
 209
 210        fle = NULL;
 211        flo = NULL;
 212
 213        keysize = flow_key_size(family);
 214        if (!keysize)
 215                goto nocache;
 216
 217        /* Packet really early in init?  Making flow_cache_init a
 218         * pre-smp initcall would solve this.  --RR */
 219        if (!fcp->hash_table)
 220                goto nocache;
 221
 222        if (fcp->hash_rnd_recalc)
 223                flow_new_hash_rnd(fc, fcp);
 224
 225        hash = flow_hash_code(fc, fcp, key, keysize);
 226        hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
 227                if (tfle->net == net &&
 228                    tfle->family == family &&
 229                    tfle->dir == dir &&
 230                    flow_key_compare(key, &tfle->key, keysize) == 0) {
 231                        fle = tfle;
 232                        break;
 233                }
 234        }
 235
 236        if (unlikely(!fle)) {
 237                if (fcp->hash_count > fc->high_watermark)
 238                        flow_cache_shrink(fc, fcp);
 239
 240                if (atomic_read(&net->xfrm.flow_cache_gc_count) >
 241                    2 * num_online_cpus() * fc->high_watermark) {
 242                        flo = ERR_PTR(-ENOBUFS);
 243                        goto ret_object;
 244                }
 245
 246                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
 247                if (fle) {
 248                        fle->net = net;
 249                        fle->family = family;
 250                        fle->dir = dir;
 251                        memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
 252                        fle->object = NULL;
 253                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
 254                        fcp->hash_count++;
 255                }
 256        } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
 257                flo = fle->object;
 258                if (!flo)
 259                        goto ret_object;
 260                flo = flo->ops->get(flo);
 261                if (flo)
 262                        goto ret_object;
 263        } else if (fle->object) {
 264                flo = fle->object;
 265                flo->ops->delete(flo);
 266                fle->object = NULL;
 267        }
 268
 269nocache:
 270        flo = NULL;
 271        if (fle) {
 272                flo = fle->object;
 273                fle->object = NULL;
 274        }
 275        flo = resolver(net, key, family, dir, flo, ctx);
 276        if (fle) {
 277                fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
 278                if (!IS_ERR(flo))
 279                        fle->object = flo;
 280                else
 281                        fle->genid--;
 282        } else {
 283                if (!IS_ERR_OR_NULL(flo))
 284                        flo->ops->delete(flo);
 285        }
 286ret_object:
 287        local_bh_enable();
 288        return flo;
 289}
 290EXPORT_SYMBOL(flow_cache_lookup);
 291
 292static void flow_cache_flush_tasklet(unsigned long data)
 293{
 294        struct flow_flush_info *info = (void *)data;
 295        struct flow_cache *fc = info->cache;
 296        struct flow_cache_percpu *fcp;
 297        struct flow_cache_entry *fle;
 298        struct hlist_node *tmp;
 299        LIST_HEAD(gc_list);
 300        unsigned int deleted = 0;
 301        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
 302                                                flow_cache_global);
 303        unsigned int i;
 304
 305        fcp = this_cpu_ptr(fc->percpu);
 306        for (i = 0; i < flow_cache_hash_size(fc); i++) {
 307                hlist_for_each_entry_safe(fle, tmp,
 308                                          &fcp->hash_table[i], u.hlist) {
 309                        if (flow_entry_valid(fle, xfrm))
 310                                continue;
 311
 312                        deleted++;
 313                        hlist_del(&fle->u.hlist);
 314                        list_add_tail(&fle->u.gc_list, &gc_list);
 315                }
 316        }
 317
 318        flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
 319
 320        if (atomic_dec_and_test(&info->cpuleft))
 321                complete(&info->completion);
 322}
 323
 324/*
 325 * Return whether a cpu needs flushing.  Conservatively, we assume
 326 * the presence of any entries means the core may require flushing,
 327 * since the flow_cache_ops.check() function may assume it's running
 328 * on the same core as the per-cpu cache component.
 329 */
 330static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
 331{
 332        struct flow_cache_percpu *fcp;
 333        unsigned int i;
 334
 335        fcp = per_cpu_ptr(fc->percpu, cpu);
 336        for (i = 0; i < flow_cache_hash_size(fc); i++)
 337                if (!hlist_empty(&fcp->hash_table[i]))
 338                        return 0;
 339        return 1;
 340}
 341
 342static void flow_cache_flush_per_cpu(void *data)
 343{
 344        struct flow_flush_info *info = data;
 345        struct tasklet_struct *tasklet;
 346
 347        tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
 348        tasklet->data = (unsigned long)info;
 349        tasklet_schedule(tasklet);
 350}
 351
 352void flow_cache_flush(struct net *net)
 353{
 354        struct flow_flush_info info;
 355        cpumask_var_t mask;
 356        int i, self;
 357
 358        /* Track which cpus need flushing to avoid disturbing all cores. */
 359        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 360                return;
 361        cpumask_clear(mask);
 362
 363        /* Don't want cpus going down or up during this. */
 364        get_online_cpus();
 365        mutex_lock(&net->xfrm.flow_flush_sem);
 366        info.cache = &net->xfrm.flow_cache_global;
 367        for_each_online_cpu(i)
 368                if (!flow_cache_percpu_empty(info.cache, i))
 369                        cpumask_set_cpu(i, mask);
 370        atomic_set(&info.cpuleft, cpumask_weight(mask));
 371        if (atomic_read(&info.cpuleft) == 0)
 372                goto done;
 373
 374        init_completion(&info.completion);
 375
 376        local_bh_disable();
 377        self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
 378        on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
 379        if (self)
 380                flow_cache_flush_tasklet((unsigned long)&info);
 381        local_bh_enable();
 382
 383        wait_for_completion(&info.completion);
 384
 385done:
 386        mutex_unlock(&net->xfrm.flow_flush_sem);
 387        put_online_cpus();
 388        free_cpumask_var(mask);
 389}
 390
 391static void flow_cache_flush_task(struct work_struct *work)
 392{
 393        struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
 394                                                flow_cache_flush_work);
 395        struct net *net = container_of(xfrm, struct net, xfrm);
 396
 397        flow_cache_flush(net);
 398}
 399
 400void flow_cache_flush_deferred(struct net *net)
 401{
 402        schedule_work(&net->xfrm.flow_cache_flush_work);
 403}
 404
 405static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 406{
 407        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
 408        unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
 409
 410        if (!fcp->hash_table) {
 411                fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
 412                if (!fcp->hash_table) {
 413                        pr_err("NET: failed to allocate flow cache sz %u\n", sz);
 414                        return -ENOMEM;
 415                }
 416                fcp->hash_rnd_recalc = 1;
 417                fcp->hash_count = 0;
 418                tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
 419        }
 420        return 0;
 421}
 422
 423static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
 424{
 425        struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
 426
 427        return flow_cache_cpu_prepare(fc, cpu);
 428}
 429
 430static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
 431{
 432        struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
 433        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
 434
 435        __flow_cache_shrink(fc, fcp, 0);
 436        return 0;
 437}
 438
 439int flow_cache_init(struct net *net)
 440{
 441        int i;
 442        struct flow_cache *fc = &net->xfrm.flow_cache_global;
 443
 444        if (!flow_cachep)
 445                flow_cachep = kmem_cache_create("flow_cache",
 446                                                sizeof(struct flow_cache_entry),
 447                                                0, SLAB_PANIC, NULL);
 448        spin_lock_init(&net->xfrm.flow_cache_gc_lock);
 449        INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
 450        INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
 451        INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
 452        mutex_init(&net->xfrm.flow_flush_sem);
 453        atomic_set(&net->xfrm.flow_cache_gc_count, 0);
 454
 455        fc->hash_shift = 10;
 456        fc->low_watermark = 2 * flow_cache_hash_size(fc);
 457        fc->high_watermark = 4 * flow_cache_hash_size(fc);
 458
 459        fc->percpu = alloc_percpu(struct flow_cache_percpu);
 460        if (!fc->percpu)
 461                return -ENOMEM;
 462
 463        if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
 464                goto err;
 465
 466        setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
 467                    (unsigned long) fc);
 468        fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
 469        add_timer(&fc->rnd_timer);
 470
 471        return 0;
 472
 473err:
 474        for_each_possible_cpu(i) {
 475                struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
 476                kfree(fcp->hash_table);
 477                fcp->hash_table = NULL;
 478        }
 479
 480        free_percpu(fc->percpu);
 481        fc->percpu = NULL;
 482
 483        return -ENOMEM;
 484}
 485EXPORT_SYMBOL(flow_cache_init);
 486
 487void flow_cache_fini(struct net *net)
 488{
 489        int i;
 490        struct flow_cache *fc = &net->xfrm.flow_cache_global;
 491
 492        del_timer_sync(&fc->rnd_timer);
 493
 494        cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
 495
 496        for_each_possible_cpu(i) {
 497                struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
 498                kfree(fcp->hash_table);
 499                fcp->hash_table = NULL;
 500        }
 501
 502        free_percpu(fc->percpu);
 503        fc->percpu = NULL;
 504}
 505EXPORT_SYMBOL(flow_cache_fini);
 506
 507void __init flow_cache_hp_init(void)
 508{
 509        int ret;
 510
 511        ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
 512                                      "net/flow:prepare",
 513                                      flow_cache_cpu_up_prep,
 514                                      flow_cache_cpu_dead);
 515        WARN_ON(ret < 0);
 516}
 517