linux/net/openvswitch/flow_table.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2007-2014 Nicira, Inc.
   4 */
   5
   6#include "flow.h"
   7#include "datapath.h"
   8#include "flow_netlink.h"
   9#include <linux/uaccess.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/if_ether.h>
  13#include <linux/if_vlan.h>
  14#include <net/llc_pdu.h>
  15#include <linux/kernel.h>
  16#include <linux/jhash.h>
  17#include <linux/jiffies.h>
  18#include <linux/llc.h>
  19#include <linux/module.h>
  20#include <linux/in.h>
  21#include <linux/rcupdate.h>
  22#include <linux/cpumask.h>
  23#include <linux/if_arp.h>
  24#include <linux/ip.h>
  25#include <linux/ipv6.h>
  26#include <linux/sctp.h>
  27#include <linux/tcp.h>
  28#include <linux/udp.h>
  29#include <linux/icmp.h>
  30#include <linux/icmpv6.h>
  31#include <linux/rculist.h>
  32#include <linux/sort.h>
  33#include <net/ip.h>
  34#include <net/ipv6.h>
  35#include <net/ndisc.h>
  36
  37#define TBL_MIN_BUCKETS         1024
  38#define MASK_ARRAY_SIZE_MIN     16
  39#define REHASH_INTERVAL         (10 * 60 * HZ)
  40
  41#define MC_DEFAULT_HASH_ENTRIES 256
  42#define MC_HASH_SHIFT           8
  43#define MC_HASH_SEGS            ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
  44
  45static struct kmem_cache *flow_cache;
  46struct kmem_cache *flow_stats_cache __read_mostly;
  47
  48static u16 range_n_bytes(const struct sw_flow_key_range *range)
  49{
  50        return range->end - range->start;
  51}
  52
  53void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  54                       bool full, const struct sw_flow_mask *mask)
  55{
  56        int start = full ? 0 : mask->range.start;
  57        int len = full ? sizeof *dst : range_n_bytes(&mask->range);
  58        const long *m = (const long *)((const u8 *)&mask->key + start);
  59        const long *s = (const long *)((const u8 *)src + start);
  60        long *d = (long *)((u8 *)dst + start);
  61        int i;
  62
  63        /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
  64         * if 'full' is false the memory outside of the 'mask->range' is left
  65         * uninitialized. This can be used as an optimization when further
  66         * operations on 'dst' only use contents within 'mask->range'.
  67         */
  68        for (i = 0; i < len; i += sizeof(long))
  69                *d++ = *s++ & *m++;
  70}
  71
  72struct sw_flow *ovs_flow_alloc(void)
  73{
  74        struct sw_flow *flow;
  75        struct sw_flow_stats *stats;
  76
  77        flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
  78        if (!flow)
  79                return ERR_PTR(-ENOMEM);
  80
  81        flow->stats_last_writer = -1;
  82
  83        /* Initialize the default stat node. */
  84        stats = kmem_cache_alloc_node(flow_stats_cache,
  85                                      GFP_KERNEL | __GFP_ZERO,
  86                                      node_online(0) ? 0 : NUMA_NO_NODE);
  87        if (!stats)
  88                goto err;
  89
  90        spin_lock_init(&stats->lock);
  91
  92        RCU_INIT_POINTER(flow->stats[0], stats);
  93
  94        cpumask_set_cpu(0, &flow->cpu_used_mask);
  95
  96        return flow;
  97err:
  98        kmem_cache_free(flow_cache, flow);
  99        return ERR_PTR(-ENOMEM);
 100}
 101
 102int ovs_flow_tbl_count(const struct flow_table *table)
 103{
 104        return table->count;
 105}
 106
 107static void flow_free(struct sw_flow *flow)
 108{
 109        int cpu;
 110
 111        if (ovs_identifier_is_key(&flow->id))
 112                kfree(flow->id.unmasked_key);
 113        if (flow->sf_acts)
 114                ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
 115        /* We open code this to make sure cpu 0 is always considered */
 116        for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
 117                if (flow->stats[cpu])
 118                        kmem_cache_free(flow_stats_cache,
 119                                        (struct sw_flow_stats __force *)flow->stats[cpu]);
 120        kmem_cache_free(flow_cache, flow);
 121}
 122
 123static void rcu_free_flow_callback(struct rcu_head *rcu)
 124{
 125        struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
 126
 127        flow_free(flow);
 128}
 129
 130void ovs_flow_free(struct sw_flow *flow, bool deferred)
 131{
 132        if (!flow)
 133                return;
 134
 135        if (deferred)
 136                call_rcu(&flow->rcu, rcu_free_flow_callback);
 137        else
 138                flow_free(flow);
 139}
 140
 141static void __table_instance_destroy(struct table_instance *ti)
 142{
 143        kvfree(ti->buckets);
 144        kfree(ti);
 145}
 146
 147static struct table_instance *table_instance_alloc(int new_size)
 148{
 149        struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
 150        int i;
 151
 152        if (!ti)
 153                return NULL;
 154
 155        ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
 156                                     GFP_KERNEL);
 157        if (!ti->buckets) {
 158                kfree(ti);
 159                return NULL;
 160        }
 161
 162        for (i = 0; i < new_size; i++)
 163                INIT_HLIST_HEAD(&ti->buckets[i]);
 164
 165        ti->n_buckets = new_size;
 166        ti->node_ver = 0;
 167        ti->keep_flows = false;
 168        get_random_bytes(&ti->hash_seed, sizeof(u32));
 169
 170        return ti;
 171}
 172
 173static void __mask_array_destroy(struct mask_array *ma)
 174{
 175        free_percpu(ma->masks_usage_cntr);
 176        kfree(ma);
 177}
 178
 179static void mask_array_rcu_cb(struct rcu_head *rcu)
 180{
 181        struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
 182
 183        __mask_array_destroy(ma);
 184}
 185
 186static void tbl_mask_array_reset_counters(struct mask_array *ma)
 187{
 188        int i, cpu;
 189
 190        /* As the per CPU counters are not atomic we can not go ahead and
 191         * reset them from another CPU. To be able to still have an approximate
 192         * zero based counter we store the value at reset, and subtract it
 193         * later when processing.
 194         */
 195        for (i = 0; i < ma->max; i++)  {
 196                ma->masks_usage_zero_cntr[i] = 0;
 197
 198                for_each_possible_cpu(cpu) {
 199                        u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
 200                                                          cpu);
 201                        unsigned int start;
 202                        u64 counter;
 203
 204                        do {
 205                                start = u64_stats_fetch_begin_irq(&ma->syncp);
 206                                counter = usage_counters[i];
 207                        } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
 208
 209                        ma->masks_usage_zero_cntr[i] += counter;
 210                }
 211        }
 212}
 213
 214static struct mask_array *tbl_mask_array_alloc(int size)
 215{
 216        struct mask_array *new;
 217
 218        size = max(MASK_ARRAY_SIZE_MIN, size);
 219        new = kzalloc(sizeof(struct mask_array) +
 220                      sizeof(struct sw_flow_mask *) * size +
 221                      sizeof(u64) * size, GFP_KERNEL);
 222        if (!new)
 223                return NULL;
 224
 225        new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
 226                                             sizeof(struct mask_array) +
 227                                             sizeof(struct sw_flow_mask *) *
 228                                             size);
 229
 230        new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
 231                                               __alignof__(u64));
 232        if (!new->masks_usage_cntr) {
 233                kfree(new);
 234                return NULL;
 235        }
 236
 237        new->count = 0;
 238        new->max = size;
 239
 240        return new;
 241}
 242
 243static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
 244{
 245        struct mask_array *old;
 246        struct mask_array *new;
 247
 248        new = tbl_mask_array_alloc(size);
 249        if (!new)
 250                return -ENOMEM;
 251
 252        old = ovsl_dereference(tbl->mask_array);
 253        if (old) {
 254                int i;
 255
 256                for (i = 0; i < old->max; i++) {
 257                        if (ovsl_dereference(old->masks[i]))
 258                                new->masks[new->count++] = old->masks[i];
 259                }
 260                call_rcu(&old->rcu, mask_array_rcu_cb);
 261        }
 262
 263        rcu_assign_pointer(tbl->mask_array, new);
 264
 265        return 0;
 266}
 267
 268static int tbl_mask_array_add_mask(struct flow_table *tbl,
 269                                   struct sw_flow_mask *new)
 270{
 271        struct mask_array *ma = ovsl_dereference(tbl->mask_array);
 272        int err, ma_count = READ_ONCE(ma->count);
 273
 274        if (ma_count >= ma->max) {
 275                err = tbl_mask_array_realloc(tbl, ma->max +
 276                                              MASK_ARRAY_SIZE_MIN);
 277                if (err)
 278                        return err;
 279
 280                ma = ovsl_dereference(tbl->mask_array);
 281        } else {
 282                /* On every add or delete we need to reset the counters so
 283                 * every new mask gets a fair chance of being prioritized.
 284                 */
 285                tbl_mask_array_reset_counters(ma);
 286        }
 287
 288        BUG_ON(ovsl_dereference(ma->masks[ma_count]));
 289
 290        rcu_assign_pointer(ma->masks[ma_count], new);
 291        WRITE_ONCE(ma->count, ma_count +1);
 292
 293        return 0;
 294}
 295
 296static void tbl_mask_array_del_mask(struct flow_table *tbl,
 297                                    struct sw_flow_mask *mask)
 298{
 299        struct mask_array *ma = ovsl_dereference(tbl->mask_array);
 300        int i, ma_count = READ_ONCE(ma->count);
 301
 302        /* Remove the deleted mask pointers from the array */
 303        for (i = 0; i < ma_count; i++) {
 304                if (mask == ovsl_dereference(ma->masks[i]))
 305                        goto found;
 306        }
 307
 308        BUG();
 309        return;
 310
 311found:
 312        WRITE_ONCE(ma->count, ma_count -1);
 313
 314        rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
 315        RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
 316
 317        kfree_rcu(mask, rcu);
 318
 319        /* Shrink the mask array if necessary. */
 320        if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
 321            ma_count <= (ma->max / 3))
 322                tbl_mask_array_realloc(tbl, ma->max / 2);
 323        else
 324                tbl_mask_array_reset_counters(ma);
 325
 326}
 327
 328/* Remove 'mask' from the mask list, if it is not needed any more. */
 329static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
 330{
 331        if (mask) {
 332                /* ovs-lock is required to protect mask-refcount and
 333                 * mask list.
 334                 */
 335                ASSERT_OVSL();
 336                BUG_ON(!mask->ref_count);
 337                mask->ref_count--;
 338
 339                if (!mask->ref_count)
 340                        tbl_mask_array_del_mask(tbl, mask);
 341        }
 342}
 343
 344static void __mask_cache_destroy(struct mask_cache *mc)
 345{
 346        free_percpu(mc->mask_cache);
 347        kfree(mc);
 348}
 349
 350static void mask_cache_rcu_cb(struct rcu_head *rcu)
 351{
 352        struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
 353
 354        __mask_cache_destroy(mc);
 355}
 356
 357static struct mask_cache *tbl_mask_cache_alloc(u32 size)
 358{
 359        struct mask_cache_entry __percpu *cache = NULL;
 360        struct mask_cache *new;
 361
 362        /* Only allow size to be 0, or a power of 2, and does not exceed
 363         * percpu allocation size.
 364         */
 365        if ((!is_power_of_2(size) && size != 0) ||
 366            (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
 367                return NULL;
 368
 369        new = kzalloc(sizeof(*new), GFP_KERNEL);
 370        if (!new)
 371                return NULL;
 372
 373        new->cache_size = size;
 374        if (new->cache_size > 0) {
 375                cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
 376                                                  new->cache_size),
 377                                       __alignof__(struct mask_cache_entry));
 378                if (!cache) {
 379                        kfree(new);
 380                        return NULL;
 381                }
 382        }
 383
 384        new->mask_cache = cache;
 385        return new;
 386}
 387int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
 388{
 389        struct mask_cache *mc = rcu_dereference(table->mask_cache);
 390        struct mask_cache *new;
 391
 392        if (size == mc->cache_size)
 393                return 0;
 394
 395        if ((!is_power_of_2(size) && size != 0) ||
 396            (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
 397                return -EINVAL;
 398
 399        new = tbl_mask_cache_alloc(size);
 400        if (!new)
 401                return -ENOMEM;
 402
 403        rcu_assign_pointer(table->mask_cache, new);
 404        call_rcu(&mc->rcu, mask_cache_rcu_cb);
 405
 406        return 0;
 407}
 408
 409int ovs_flow_tbl_init(struct flow_table *table)
 410{
 411        struct table_instance *ti, *ufid_ti;
 412        struct mask_cache *mc;
 413        struct mask_array *ma;
 414
 415        mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
 416        if (!mc)
 417                return -ENOMEM;
 418
 419        ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
 420        if (!ma)
 421                goto free_mask_cache;
 422
 423        ti = table_instance_alloc(TBL_MIN_BUCKETS);
 424        if (!ti)
 425                goto free_mask_array;
 426
 427        ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 428        if (!ufid_ti)
 429                goto free_ti;
 430
 431        rcu_assign_pointer(table->ti, ti);
 432        rcu_assign_pointer(table->ufid_ti, ufid_ti);
 433        rcu_assign_pointer(table->mask_array, ma);
 434        rcu_assign_pointer(table->mask_cache, mc);
 435        table->last_rehash = jiffies;
 436        table->count = 0;
 437        table->ufid_count = 0;
 438        return 0;
 439
 440free_ti:
 441        __table_instance_destroy(ti);
 442free_mask_array:
 443        __mask_array_destroy(ma);
 444free_mask_cache:
 445        __mask_cache_destroy(mc);
 446        return -ENOMEM;
 447}
 448
 449static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
 450{
 451        struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
 452
 453        __table_instance_destroy(ti);
 454}
 455
 456static void table_instance_flow_free(struct flow_table *table,
 457                                  struct table_instance *ti,
 458                                  struct table_instance *ufid_ti,
 459                                  struct sw_flow *flow,
 460                                  bool count)
 461{
 462        hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
 463        if (count)
 464                table->count--;
 465
 466        if (ovs_identifier_is_ufid(&flow->id)) {
 467                hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
 468
 469                if (count)
 470                        table->ufid_count--;
 471        }
 472
 473        flow_mask_remove(table, flow->mask);
 474}
 475
 476/* Must be called with OVS mutex held. */
 477void table_instance_flow_flush(struct flow_table *table,
 478                               struct table_instance *ti,
 479                               struct table_instance *ufid_ti)
 480{
 481        int i;
 482
 483        if (ti->keep_flows)
 484                return;
 485
 486        for (i = 0; i < ti->n_buckets; i++) {
 487                struct sw_flow *flow;
 488                struct hlist_head *head = &ti->buckets[i];
 489                struct hlist_node *n;
 490
 491                hlist_for_each_entry_safe(flow, n, head,
 492                                          flow_table.node[ti->node_ver]) {
 493
 494                        table_instance_flow_free(table, ti, ufid_ti,
 495                                                 flow, false);
 496                        ovs_flow_free(flow, true);
 497                }
 498        }
 499}
 500
 501static void table_instance_destroy(struct table_instance *ti,
 502                                   struct table_instance *ufid_ti)
 503{
 504        call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
 505        call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
 506}
 507
 508/* No need for locking this function is called from RCU callback or
 509 * error path.
 510 */
 511void ovs_flow_tbl_destroy(struct flow_table *table)
 512{
 513        struct table_instance *ti = rcu_dereference_raw(table->ti);
 514        struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 515        struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
 516        struct mask_array *ma = rcu_dereference_raw(table->mask_array);
 517
 518        call_rcu(&mc->rcu, mask_cache_rcu_cb);
 519        call_rcu(&ma->rcu, mask_array_rcu_cb);
 520        table_instance_destroy(ti, ufid_ti);
 521}
 522
 523struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
 524                                       u32 *bucket, u32 *last)
 525{
 526        struct sw_flow *flow;
 527        struct hlist_head *head;
 528        int ver;
 529        int i;
 530
 531        ver = ti->node_ver;
 532        while (*bucket < ti->n_buckets) {
 533                i = 0;
 534                head = &ti->buckets[*bucket];
 535                hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
 536                        if (i < *last) {
 537                                i++;
 538                                continue;
 539                        }
 540                        *last = i + 1;
 541                        return flow;
 542                }
 543                (*bucket)++;
 544                *last = 0;
 545        }
 546
 547        return NULL;
 548}
 549
 550static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
 551{
 552        hash = jhash_1word(hash, ti->hash_seed);
 553        return &ti->buckets[hash & (ti->n_buckets - 1)];
 554}
 555
 556static void table_instance_insert(struct table_instance *ti,
 557                                  struct sw_flow *flow)
 558{
 559        struct hlist_head *head;
 560
 561        head = find_bucket(ti, flow->flow_table.hash);
 562        hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
 563}
 564
 565static void ufid_table_instance_insert(struct table_instance *ti,
 566                                       struct sw_flow *flow)
 567{
 568        struct hlist_head *head;
 569
 570        head = find_bucket(ti, flow->ufid_table.hash);
 571        hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
 572}
 573
 574static void flow_table_copy_flows(struct table_instance *old,
 575                                  struct table_instance *new, bool ufid)
 576{
 577        int old_ver;
 578        int i;
 579
 580        old_ver = old->node_ver;
 581        new->node_ver = !old_ver;
 582
 583        /* Insert in new table. */
 584        for (i = 0; i < old->n_buckets; i++) {
 585                struct sw_flow *flow;
 586                struct hlist_head *head = &old->buckets[i];
 587
 588                if (ufid)
 589                        hlist_for_each_entry_rcu(flow, head,
 590                                                 ufid_table.node[old_ver],
 591                                                 lockdep_ovsl_is_held())
 592                                ufid_table_instance_insert(new, flow);
 593                else
 594                        hlist_for_each_entry_rcu(flow, head,
 595                                                 flow_table.node[old_ver],
 596                                                 lockdep_ovsl_is_held())
 597                                table_instance_insert(new, flow);
 598        }
 599
 600        old->keep_flows = true;
 601}
 602
 603static struct table_instance *table_instance_rehash(struct table_instance *ti,
 604                                                    int n_buckets, bool ufid)
 605{
 606        struct table_instance *new_ti;
 607
 608        new_ti = table_instance_alloc(n_buckets);
 609        if (!new_ti)
 610                return NULL;
 611
 612        flow_table_copy_flows(ti, new_ti, ufid);
 613
 614        return new_ti;
 615}
 616
 617int ovs_flow_tbl_flush(struct flow_table *flow_table)
 618{
 619        struct table_instance *old_ti, *new_ti;
 620        struct table_instance *old_ufid_ti, *new_ufid_ti;
 621
 622        new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 623        if (!new_ti)
 624                return -ENOMEM;
 625        new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
 626        if (!new_ufid_ti)
 627                goto err_free_ti;
 628
 629        old_ti = ovsl_dereference(flow_table->ti);
 630        old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
 631
 632        rcu_assign_pointer(flow_table->ti, new_ti);
 633        rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
 634        flow_table->last_rehash = jiffies;
 635        flow_table->count = 0;
 636        flow_table->ufid_count = 0;
 637
 638        table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
 639        table_instance_destroy(old_ti, old_ufid_ti);
 640        return 0;
 641
 642err_free_ti:
 643        __table_instance_destroy(new_ti);
 644        return -ENOMEM;
 645}
 646
 647static u32 flow_hash(const struct sw_flow_key *key,
 648                     const struct sw_flow_key_range *range)
 649{
 650        const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
 651
 652        /* Make sure number of hash bytes are multiple of u32. */
 653        int hash_u32s = range_n_bytes(range) >> 2;
 654
 655        return jhash2(hash_key, hash_u32s, 0);
 656}
 657
 658static int flow_key_start(const struct sw_flow_key *key)
 659{
 660        if (key->tun_proto)
 661                return 0;
 662        else
 663                return rounddown(offsetof(struct sw_flow_key, phy),
 664                                          sizeof(long));
 665}
 666
 667static bool cmp_key(const struct sw_flow_key *key1,
 668                    const struct sw_flow_key *key2,
 669                    int key_start, int key_end)
 670{
 671        const long *cp1 = (const long *)((const u8 *)key1 + key_start);
 672        const long *cp2 = (const long *)((const u8 *)key2 + key_start);
 673        long diffs = 0;
 674        int i;
 675
 676        for (i = key_start; i < key_end;  i += sizeof(long))
 677                diffs |= *cp1++ ^ *cp2++;
 678
 679        return diffs == 0;
 680}
 681
 682static bool flow_cmp_masked_key(const struct sw_flow *flow,
 683                                const struct sw_flow_key *key,
 684                                const struct sw_flow_key_range *range)
 685{
 686        return cmp_key(&flow->key, key, range->start, range->end);
 687}
 688
 689static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
 690                                      const struct sw_flow_match *match)
 691{
 692        struct sw_flow_key *key = match->key;
 693        int key_start = flow_key_start(key);
 694        int key_end = match->range.end;
 695
 696        BUG_ON(ovs_identifier_is_ufid(&flow->id));
 697        return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
 698}
 699
 700static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
 701                                          const struct sw_flow_key *unmasked,
 702                                          const struct sw_flow_mask *mask,
 703                                          u32 *n_mask_hit)
 704{
 705        struct sw_flow *flow;
 706        struct hlist_head *head;
 707        u32 hash;
 708        struct sw_flow_key masked_key;
 709
 710        ovs_flow_mask_key(&masked_key, unmasked, false, mask);
 711        hash = flow_hash(&masked_key, &mask->range);
 712        head = find_bucket(ti, hash);
 713        (*n_mask_hit)++;
 714
 715        hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
 716                                lockdep_ovsl_is_held()) {
 717                if (flow->mask == mask && flow->flow_table.hash == hash &&
 718                    flow_cmp_masked_key(flow, &masked_key, &mask->range))
 719                        return flow;
 720        }
 721        return NULL;
 722}
 723
 724/* Flow lookup does full lookup on flow table. It starts with
 725 * mask from index passed in *index.
 726 */
 727static struct sw_flow *flow_lookup(struct flow_table *tbl,
 728                                   struct table_instance *ti,
 729                                   struct mask_array *ma,
 730                                   const struct sw_flow_key *key,
 731                                   u32 *n_mask_hit,
 732                                   u32 *n_cache_hit,
 733                                   u32 *index)
 734{
 735        u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
 736        struct sw_flow *flow;
 737        struct sw_flow_mask *mask;
 738        int i;
 739
 740        if (likely(*index < ma->max)) {
 741                mask = rcu_dereference_ovsl(ma->masks[*index]);
 742                if (mask) {
 743                        flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
 744                        if (flow) {
 745                                u64_stats_update_begin(&ma->syncp);
 746                                usage_counters[*index]++;
 747                                u64_stats_update_end(&ma->syncp);
 748                                (*n_cache_hit)++;
 749                                return flow;
 750                        }
 751                }
 752        }
 753
 754        for (i = 0; i < ma->max; i++)  {
 755
 756                if (i == *index)
 757                        continue;
 758
 759                mask = rcu_dereference_ovsl(ma->masks[i]);
 760                if (unlikely(!mask))
 761                        break;
 762
 763                flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
 764                if (flow) { /* Found */
 765                        *index = i;
 766                        u64_stats_update_begin(&ma->syncp);
 767                        usage_counters[*index]++;
 768                        u64_stats_update_end(&ma->syncp);
 769                        return flow;
 770                }
 771        }
 772
 773        return NULL;
 774}
 775
 776/*
 777 * mask_cache maps flow to probable mask. This cache is not tightly
 778 * coupled cache, It means updates to  mask list can result in inconsistent
 779 * cache entry in mask cache.
 780 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
 781 * In case of a hash collision the entry is hashed in next segment.
 782 * */
 783struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 784                                          const struct sw_flow_key *key,
 785                                          u32 skb_hash,
 786                                          u32 *n_mask_hit,
 787                                          u32 *n_cache_hit)
 788{
 789        struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
 790        struct mask_array *ma = rcu_dereference(tbl->mask_array);
 791        struct table_instance *ti = rcu_dereference(tbl->ti);
 792        struct mask_cache_entry *entries, *ce;
 793        struct sw_flow *flow;
 794        u32 hash;
 795        int seg;
 796
 797        *n_mask_hit = 0;
 798        *n_cache_hit = 0;
 799        if (unlikely(!skb_hash || mc->cache_size == 0)) {
 800                u32 mask_index = 0;
 801                u32 cache = 0;
 802
 803                return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
 804                                   &mask_index);
 805        }
 806
 807        /* Pre and post recirulation flows usually have the same skb_hash
 808         * value. To avoid hash collisions, rehash the 'skb_hash' with
 809         * 'recirc_id'.  */
 810        if (key->recirc_id)
 811                skb_hash = jhash_1word(skb_hash, key->recirc_id);
 812
 813        ce = NULL;
 814        hash = skb_hash;
 815        entries = this_cpu_ptr(mc->mask_cache);
 816
 817        /* Find the cache entry 'ce' to operate on. */
 818        for (seg = 0; seg < MC_HASH_SEGS; seg++) {
 819                int index = hash & (mc->cache_size - 1);
 820                struct mask_cache_entry *e;
 821
 822                e = &entries[index];
 823                if (e->skb_hash == skb_hash) {
 824                        flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
 825                                           n_cache_hit, &e->mask_index);
 826                        if (!flow)
 827                                e->skb_hash = 0;
 828                        return flow;
 829                }
 830
 831                if (!ce || e->skb_hash < ce->skb_hash)
 832                        ce = e;  /* A better replacement cache candidate. */
 833
 834                hash >>= MC_HASH_SHIFT;
 835        }
 836
 837        /* Cache miss, do full lookup. */
 838        flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
 839                           &ce->mask_index);
 840        if (flow)
 841                ce->skb_hash = skb_hash;
 842
 843        *n_cache_hit = 0;
 844        return flow;
 845}
 846
 847struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
 848                                    const struct sw_flow_key *key)
 849{
 850        struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 851        struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
 852        u32 __always_unused n_mask_hit;
 853        u32 __always_unused n_cache_hit;
 854        u32 index = 0;
 855
 856        return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
 857}
 858
 859struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 860                                          const struct sw_flow_match *match)
 861{
 862        struct mask_array *ma = ovsl_dereference(tbl->mask_array);
 863        int i;
 864
 865        /* Always called under ovs-mutex. */
 866        for (i = 0; i < ma->max; i++) {
 867                struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 868                u32 __always_unused n_mask_hit;
 869                struct sw_flow_mask *mask;
 870                struct sw_flow *flow;
 871
 872                mask = ovsl_dereference(ma->masks[i]);
 873                if (!mask)
 874                        continue;
 875
 876                flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
 877                if (flow && ovs_identifier_is_key(&flow->id) &&
 878                    ovs_flow_cmp_unmasked_key(flow, match)) {
 879                        return flow;
 880                }
 881        }
 882
 883        return NULL;
 884}
 885
 886static u32 ufid_hash(const struct sw_flow_id *sfid)
 887{
 888        return jhash(sfid->ufid, sfid->ufid_len, 0);
 889}
 890
 891static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
 892                              const struct sw_flow_id *sfid)
 893{
 894        if (flow->id.ufid_len != sfid->ufid_len)
 895                return false;
 896
 897        return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
 898}
 899
 900bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
 901{
 902        if (ovs_identifier_is_ufid(&flow->id))
 903                return flow_cmp_masked_key(flow, match->key, &match->range);
 904
 905        return ovs_flow_cmp_unmasked_key(flow, match);
 906}
 907
 908struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
 909                                         const struct sw_flow_id *ufid)
 910{
 911        struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
 912        struct sw_flow *flow;
 913        struct hlist_head *head;
 914        u32 hash;
 915
 916        hash = ufid_hash(ufid);
 917        head = find_bucket(ti, hash);
 918        hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
 919                                lockdep_ovsl_is_held()) {
 920                if (flow->ufid_table.hash == hash &&
 921                    ovs_flow_cmp_ufid(flow, ufid))
 922                        return flow;
 923        }
 924        return NULL;
 925}
 926
 927int ovs_flow_tbl_num_masks(const struct flow_table *table)
 928{
 929        struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
 930        return READ_ONCE(ma->count);
 931}
 932
 933u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
 934{
 935        struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
 936
 937        return READ_ONCE(mc->cache_size);
 938}
 939
 940static struct table_instance *table_instance_expand(struct table_instance *ti,
 941                                                    bool ufid)
 942{
 943        return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
 944}
 945
 946/* Must be called with OVS mutex held. */
 947void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
 948{
 949        struct table_instance *ti = ovsl_dereference(table->ti);
 950        struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
 951
 952        BUG_ON(table->count == 0);
 953        table_instance_flow_free(table, ti, ufid_ti, flow, true);
 954}
 955
 956static struct sw_flow_mask *mask_alloc(void)
 957{
 958        struct sw_flow_mask *mask;
 959
 960        mask = kmalloc(sizeof(*mask), GFP_KERNEL);
 961        if (mask)
 962                mask->ref_count = 1;
 963
 964        return mask;
 965}
 966
 967static bool mask_equal(const struct sw_flow_mask *a,
 968                       const struct sw_flow_mask *b)
 969{
 970        const u8 *a_ = (const u8 *)&a->key + a->range.start;
 971        const u8 *b_ = (const u8 *)&b->key + b->range.start;
 972
 973        return  (a->range.end == b->range.end)
 974                && (a->range.start == b->range.start)
 975                && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
 976}
 977
 978static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
 979                                           const struct sw_flow_mask *mask)
 980{
 981        struct mask_array *ma;
 982        int i;
 983
 984        ma = ovsl_dereference(tbl->mask_array);
 985        for (i = 0; i < ma->max; i++) {
 986                struct sw_flow_mask *t;
 987                t = ovsl_dereference(ma->masks[i]);
 988
 989                if (t && mask_equal(mask, t))
 990                        return t;
 991        }
 992
 993        return NULL;
 994}
 995
 996/* Add 'mask' into the mask list, if it is not already there. */
 997static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
 998                            const struct sw_flow_mask *new)
 999{
1000        struct sw_flow_mask *mask;
1001
1002        mask = flow_mask_find(tbl, new);
1003        if (!mask) {
1004                /* Allocate a new mask if none exsits. */
1005                mask = mask_alloc();
1006                if (!mask)
1007                        return -ENOMEM;
1008                mask->key = new->key;
1009                mask->range = new->range;
1010
1011                /* Add mask to mask-list. */
1012                if (tbl_mask_array_add_mask(tbl, mask)) {
1013                        kfree(mask);
1014                        return -ENOMEM;
1015                }
1016        } else {
1017                BUG_ON(!mask->ref_count);
1018                mask->ref_count++;
1019        }
1020
1021        flow->mask = mask;
1022        return 0;
1023}
1024
1025/* Must be called with OVS mutex held. */
1026static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1027{
1028        struct table_instance *new_ti = NULL;
1029        struct table_instance *ti;
1030
1031        flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1032        ti = ovsl_dereference(table->ti);
1033        table_instance_insert(ti, flow);
1034        table->count++;
1035
1036        /* Expand table, if necessary, to make room. */
1037        if (table->count > ti->n_buckets)
1038                new_ti = table_instance_expand(ti, false);
1039        else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1040                new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1041
1042        if (new_ti) {
1043                rcu_assign_pointer(table->ti, new_ti);
1044                call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1045                table->last_rehash = jiffies;
1046        }
1047}
1048
1049/* Must be called with OVS mutex held. */
1050static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1051{
1052        struct table_instance *ti;
1053
1054        flow->ufid_table.hash = ufid_hash(&flow->id);
1055        ti = ovsl_dereference(table->ufid_ti);
1056        ufid_table_instance_insert(ti, flow);
1057        table->ufid_count++;
1058
1059        /* Expand table, if necessary, to make room. */
1060        if (table->ufid_count > ti->n_buckets) {
1061                struct table_instance *new_ti;
1062
1063                new_ti = table_instance_expand(ti, true);
1064                if (new_ti) {
1065                        rcu_assign_pointer(table->ufid_ti, new_ti);
1066                        call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1067                }
1068        }
1069}
1070
1071/* Must be called with OVS mutex held. */
1072int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1073                        const struct sw_flow_mask *mask)
1074{
1075        int err;
1076
1077        err = flow_mask_insert(table, flow, mask);
1078        if (err)
1079                return err;
1080        flow_key_insert(table, flow);
1081        if (ovs_identifier_is_ufid(&flow->id))
1082                flow_ufid_insert(table, flow);
1083
1084        return 0;
1085}
1086
1087static int compare_mask_and_count(const void *a, const void *b)
1088{
1089        const struct mask_count *mc_a = a;
1090        const struct mask_count *mc_b = b;
1091
1092        return (s64)mc_b->counter - (s64)mc_a->counter;
1093}
1094
1095/* Must be called with OVS mutex held. */
1096void ovs_flow_masks_rebalance(struct flow_table *table)
1097{
1098        struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1099        struct mask_count *masks_and_count;
1100        struct mask_array *new;
1101        int masks_entries = 0;
1102        int i;
1103
1104        /* Build array of all current entries with use counters. */
1105        masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1106                                        GFP_KERNEL);
1107        if (!masks_and_count)
1108                return;
1109
1110        for (i = 0; i < ma->max; i++)  {
1111                struct sw_flow_mask *mask;
1112                unsigned int start;
1113                int cpu;
1114
1115                mask = rcu_dereference_ovsl(ma->masks[i]);
1116                if (unlikely(!mask))
1117                        break;
1118
1119                masks_and_count[i].index = i;
1120                masks_and_count[i].counter = 0;
1121
1122                for_each_possible_cpu(cpu) {
1123                        u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
1124                                                          cpu);
1125                        u64 counter;
1126
1127                        do {
1128                                start = u64_stats_fetch_begin_irq(&ma->syncp);
1129                                counter = usage_counters[i];
1130                        } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
1131
1132                        masks_and_count[i].counter += counter;
1133                }
1134
1135                /* Subtract the zero count value. */
1136                masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1137
1138                /* Rather than calling tbl_mask_array_reset_counters()
1139                 * below when no change is needed, do it inline here.
1140                 */
1141                ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1142        }
1143
1144        if (i == 0)
1145                goto free_mask_entries;
1146
1147        /* Sort the entries */
1148        masks_entries = i;
1149        sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1150             compare_mask_and_count, NULL);
1151
1152        /* If the order is the same, nothing to do... */
1153        for (i = 0; i < masks_entries; i++) {
1154                if (i != masks_and_count[i].index)
1155                        break;
1156        }
1157        if (i == masks_entries)
1158                goto free_mask_entries;
1159
1160        /* Rebuilt the new list in order of usage. */
1161        new = tbl_mask_array_alloc(ma->max);
1162        if (!new)
1163                goto free_mask_entries;
1164
1165        for (i = 0; i < masks_entries; i++) {
1166                int index = masks_and_count[i].index;
1167
1168                if (ovsl_dereference(ma->masks[index]))
1169                        new->masks[new->count++] = ma->masks[index];
1170        }
1171
1172        rcu_assign_pointer(table->mask_array, new);
1173        call_rcu(&ma->rcu, mask_array_rcu_cb);
1174
1175free_mask_entries:
1176        kfree(masks_and_count);
1177}
1178
1179/* Initializes the flow module.
1180 * Returns zero if successful or a negative error code. */
1181int ovs_flow_init(void)
1182{
1183        BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1184        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1185
1186        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1187                                       + (nr_cpu_ids
1188                                          * sizeof(struct sw_flow_stats *)),
1189                                       0, 0, NULL);
1190        if (flow_cache == NULL)
1191                return -ENOMEM;
1192
1193        flow_stats_cache
1194                = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1195                                    0, SLAB_HWCACHE_ALIGN, NULL);
1196        if (flow_stats_cache == NULL) {
1197                kmem_cache_destroy(flow_cache);
1198                flow_cache = NULL;
1199                return -ENOMEM;
1200        }
1201
1202        return 0;
1203}
1204
1205/* Uninitializes the flow module. */
1206void ovs_flow_exit(void)
1207{
1208        kmem_cache_destroy(flow_stats_cache);
1209        kmem_cache_destroy(flow_cache);
1210}
1211