1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "flow.h"
20#include "datapath.h"
21#include "flow_netlink.h"
22#include <linux/uaccess.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
26#include <linux/if_vlan.h>
27#include <net/llc_pdu.h>
28#include <linux/kernel.h>
29#include <linux/jhash.h>
30#include <linux/jiffies.h>
31#include <linux/llc.h>
32#include <linux/module.h>
33#include <linux/in.h>
34#include <linux/rcupdate.h>
35#include <linux/cpumask.h>
36#include <linux/if_arp.h>
37#include <linux/ip.h>
38#include <linux/ipv6.h>
39#include <linux/sctp.h>
40#include <linux/tcp.h>
41#include <linux/udp.h>
42#include <linux/icmp.h>
43#include <linux/icmpv6.h>
44#include <linux/rculist.h>
45#include <linux/sort.h>
46#include <net/ip.h>
47#include <net/ipv6.h>
48#include <net/ndisc.h>
49
50#define TBL_MIN_BUCKETS 1024
51#define MASK_ARRAY_SIZE_MIN 16
52#define REHASH_INTERVAL (10 * 60 * HZ)
53
54#define MC_DEFAULT_HASH_ENTRIES 256
55#define MC_HASH_SHIFT 8
56#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
57
58static struct kmem_cache *flow_cache;
59struct kmem_cache *flow_stats_cache __read_mostly;
60
61static u16 range_n_bytes(const struct sw_flow_key_range *range)
62{
63 return range->end - range->start;
64}
65
66void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
67 bool full, const struct sw_flow_mask *mask)
68{
69 int start = full ? 0 : mask->range.start;
70 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
71 const long *m = (const long *)((const u8 *)&mask->key + start);
72 const long *s = (const long *)((const u8 *)src + start);
73 long *d = (long *)((u8 *)dst + start);
74 int i;
75
76
77
78
79
80
81 for (i = 0; i < len; i += sizeof(long))
82 *d++ = *s++ & *m++;
83}
84
85struct sw_flow *ovs_flow_alloc(void)
86{
87 struct sw_flow *flow;
88 struct sw_flow_stats *stats;
89
90 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
91 if (!flow)
92 return ERR_PTR(-ENOMEM);
93
94 flow->stats_last_writer = -1;
95
96
97 stats = kmem_cache_alloc_node(flow_stats_cache,
98 GFP_KERNEL | __GFP_ZERO,
99 node_online(0) ? 0 : NUMA_NO_NODE);
100 if (!stats)
101 goto err;
102
103 spin_lock_init(&stats->lock);
104
105 RCU_INIT_POINTER(flow->stats[0], stats);
106
107 cpumask_set_cpu(0, &flow->cpu_used_mask);
108
109 return flow;
110err:
111 kmem_cache_free(flow_cache, flow);
112 return ERR_PTR(-ENOMEM);
113}
114
115int ovs_flow_tbl_count(const struct flow_table *table)
116{
117 return table->count;
118}
119
120static void flow_free(struct sw_flow *flow)
121{
122 int cpu;
123
124 if (ovs_identifier_is_key(&flow->id))
125 kfree(flow->id.unmasked_key);
126 if (flow->sf_acts)
127 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
128 flow->sf_acts);
129
130 for (cpu = 0; cpu < nr_cpu_ids;
131 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
132 if (flow->stats[cpu])
133 kmem_cache_free(flow_stats_cache,
134 (struct sw_flow_stats __force *)flow->stats[cpu]);
135 }
136
137 kmem_cache_free(flow_cache, flow);
138}
139
140static void rcu_free_flow_callback(struct rcu_head *rcu)
141{
142 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
143
144 flow_free(flow);
145}
146
147void ovs_flow_free(struct sw_flow *flow, bool deferred)
148{
149 if (!flow)
150 return;
151
152 if (deferred)
153 call_rcu(&flow->rcu, rcu_free_flow_callback);
154 else
155 flow_free(flow);
156}
157
158static void __table_instance_destroy(struct table_instance *ti)
159{
160 kvfree(ti->buckets);
161 kfree(ti);
162}
163
164static struct table_instance *table_instance_alloc(int new_size)
165{
166 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
167 int i;
168
169 if (!ti)
170 return NULL;
171
172 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
173 GFP_KERNEL);
174 if (!ti->buckets) {
175 kfree(ti);
176 return NULL;
177 }
178
179 for (i = 0; i < new_size; i++)
180 INIT_HLIST_HEAD(&ti->buckets[i]);
181
182 ti->n_buckets = new_size;
183 ti->node_ver = 0;
184 get_random_bytes(&ti->hash_seed, sizeof(u32));
185
186 return ti;
187}
188
189static void __mask_array_destroy(struct mask_array *ma)
190{
191 free_percpu(ma->masks_usage_stats);
192 kfree(ma);
193}
194
195static void mask_array_rcu_cb(struct rcu_head *rcu)
196{
197 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
198
199 __mask_array_destroy(ma);
200}
201
202static void tbl_mask_array_reset_counters(struct mask_array *ma)
203{
204 int i, cpu;
205
206
207
208
209
210
211 for (i = 0; i < ma->max; i++) {
212 ma->masks_usage_zero_cntr[i] = 0;
213
214 for_each_possible_cpu(cpu) {
215 struct mask_array_stats *stats;
216 unsigned int start;
217 u64 counter;
218
219 stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
220 do {
221 start = u64_stats_fetch_begin_irq(&stats->syncp);
222 counter = stats->usage_cntrs[i];
223 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
224
225 ma->masks_usage_zero_cntr[i] += counter;
226 }
227 }
228}
229
230static struct mask_array *tbl_mask_array_alloc(int size)
231{
232 struct mask_array *new;
233
234 size = max(MASK_ARRAY_SIZE_MIN, size);
235 new = kzalloc(sizeof(struct mask_array) +
236 sizeof(struct sw_flow_mask *) * size +
237 sizeof(u64) * size, GFP_KERNEL);
238 if (!new)
239 return NULL;
240
241 new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
242 sizeof(struct mask_array) +
243 sizeof(struct sw_flow_mask *) *
244 size);
245
246 new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
247 sizeof(u64) * size,
248 __alignof__(u64));
249 if (!new->masks_usage_stats) {
250 kfree(new);
251 return NULL;
252 }
253
254 new->count = 0;
255 new->max = size;
256
257 return new;
258}
259
260static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
261{
262 struct mask_array *old;
263 struct mask_array *new;
264
265 new = tbl_mask_array_alloc(size);
266 if (!new)
267 return -ENOMEM;
268
269 old = ovsl_dereference(tbl->mask_array);
270 if (old) {
271 int i;
272
273 for (i = 0; i < old->max; i++) {
274 if (ovsl_dereference(old->masks[i]))
275 new->masks[new->count++] = old->masks[i];
276 }
277 call_rcu(&old->rcu, mask_array_rcu_cb);
278 }
279
280 rcu_assign_pointer(tbl->mask_array, new);
281
282 return 0;
283}
284
285static int tbl_mask_array_add_mask(struct flow_table *tbl,
286 struct sw_flow_mask *new)
287{
288 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
289 int err, ma_count = READ_ONCE(ma->count);
290
291 if (ma_count >= ma->max) {
292 err = tbl_mask_array_realloc(tbl, ma->max +
293 MASK_ARRAY_SIZE_MIN);
294 if (err)
295 return err;
296
297 ma = ovsl_dereference(tbl->mask_array);
298 } else {
299
300
301
302 tbl_mask_array_reset_counters(ma);
303 }
304
305 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
306
307 rcu_assign_pointer(ma->masks[ma_count], new);
308 WRITE_ONCE(ma->count, ma_count + 1);
309
310 return 0;
311}
312
313static void tbl_mask_array_del_mask(struct flow_table *tbl,
314 struct sw_flow_mask *mask)
315{
316 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
317 int i, ma_count = READ_ONCE(ma->count);
318
319
320 for (i = 0; i < ma_count; i++) {
321 if (mask == ovsl_dereference(ma->masks[i]))
322 goto found;
323 }
324
325 BUG();
326 return;
327
328found:
329 WRITE_ONCE(ma->count, ma_count - 1);
330
331 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
332 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
333
334 kfree_rcu(mask, rcu);
335
336
337 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
338 ma_count <= (ma->max / 3))
339 tbl_mask_array_realloc(tbl, ma->max / 2);
340 else
341 tbl_mask_array_reset_counters(ma);
342
343}
344
345
346static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
347{
348 if (mask) {
349
350
351
352 ASSERT_OVSL();
353 BUG_ON(!mask->ref_count);
354 mask->ref_count--;
355
356 if (!mask->ref_count)
357 tbl_mask_array_del_mask(tbl, mask);
358 }
359}
360
361static void __mask_cache_destroy(struct mask_cache *mc)
362{
363 free_percpu(mc->mask_cache);
364 kfree(mc);
365}
366
367static void mask_cache_rcu_cb(struct rcu_head *rcu)
368{
369 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
370
371 __mask_cache_destroy(mc);
372}
373
374static struct mask_cache *tbl_mask_cache_alloc(u32 size)
375{
376 struct mask_cache_entry __percpu *cache = NULL;
377 struct mask_cache *new;
378
379
380
381
382 if ((!is_power_of_2(size) && size != 0) ||
383 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
384 return NULL;
385
386 new = kzalloc(sizeof(*new), GFP_KERNEL);
387 if (!new)
388 return NULL;
389
390 new->cache_size = size;
391 if (new->cache_size > 0) {
392 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
393 new->cache_size),
394 __alignof__(struct mask_cache_entry));
395 if (!cache) {
396 kfree(new);
397 return NULL;
398 }
399 }
400
401 new->mask_cache = cache;
402 return new;
403}
404int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
405{
406 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
407 struct mask_cache *new;
408
409 if (size == mc->cache_size)
410 return 0;
411
412 if ((!is_power_of_2(size) && size != 0) ||
413 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
414 return -EINVAL;
415
416 new = tbl_mask_cache_alloc(size);
417 if (!new)
418 return -ENOMEM;
419
420 rcu_assign_pointer(table->mask_cache, new);
421 call_rcu(&mc->rcu, mask_cache_rcu_cb);
422
423 return 0;
424}
425
426int ovs_flow_tbl_init(struct flow_table *table)
427{
428 struct table_instance *ti, *ufid_ti;
429 struct mask_cache *mc;
430 struct mask_array *ma;
431
432 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
433 if (!mc)
434 return -ENOMEM;
435
436 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
437 if (!ma)
438 goto free_mask_cache;
439
440 ti = table_instance_alloc(TBL_MIN_BUCKETS);
441 if (!ti)
442 goto free_mask_array;
443
444 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
445 if (!ufid_ti)
446 goto free_ti;
447
448 rcu_assign_pointer(table->ti, ti);
449 rcu_assign_pointer(table->ufid_ti, ufid_ti);
450 rcu_assign_pointer(table->mask_array, ma);
451 rcu_assign_pointer(table->mask_cache, mc);
452 table->last_rehash = jiffies;
453 table->count = 0;
454 table->ufid_count = 0;
455 return 0;
456
457free_ti:
458 __table_instance_destroy(ti);
459free_mask_array:
460 __mask_array_destroy(ma);
461free_mask_cache:
462 __mask_cache_destroy(mc);
463 return -ENOMEM;
464}
465
466static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
467{
468 struct table_instance *ti;
469
470 ti = container_of(rcu, struct table_instance, rcu);
471 __table_instance_destroy(ti);
472}
473
474static void table_instance_flow_free(struct flow_table *table,
475 struct table_instance *ti,
476 struct table_instance *ufid_ti,
477 struct sw_flow *flow)
478{
479 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
480 table->count--;
481
482 if (ovs_identifier_is_ufid(&flow->id)) {
483 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
484 table->ufid_count--;
485 }
486
487 flow_mask_remove(table, flow->mask);
488}
489
490
491void table_instance_flow_flush(struct flow_table *table,
492 struct table_instance *ti,
493 struct table_instance *ufid_ti)
494{
495 int i;
496
497 for (i = 0; i < ti->n_buckets; i++) {
498 struct hlist_head *head = &ti->buckets[i];
499 struct hlist_node *n;
500 struct sw_flow *flow;
501
502 hlist_for_each_entry_safe(flow, n, head,
503 flow_table.node[ti->node_ver]) {
504
505 table_instance_flow_free(table, ti, ufid_ti,
506 flow);
507 ovs_flow_free(flow, true);
508 }
509 }
510
511 if (WARN_ON(table->count != 0 ||
512 table->ufid_count != 0)) {
513 table->count = 0;
514 table->ufid_count = 0;
515 }
516}
517
518static void table_instance_destroy(struct table_instance *ti,
519 struct table_instance *ufid_ti)
520{
521 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
522 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
523}
524
525
526
527
528void ovs_flow_tbl_destroy(struct flow_table *table)
529{
530 struct table_instance *ti = rcu_dereference_raw(table->ti);
531 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
532 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
533 struct mask_array *ma = rcu_dereference_raw(table->mask_array);
534
535 call_rcu(&mc->rcu, mask_cache_rcu_cb);
536 call_rcu(&ma->rcu, mask_array_rcu_cb);
537 table_instance_destroy(ti, ufid_ti);
538}
539
540struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
541 u32 *bucket, u32 *last)
542{
543 struct sw_flow *flow;
544 struct hlist_head *head;
545 int ver;
546 int i;
547
548 ver = ti->node_ver;
549 while (*bucket < ti->n_buckets) {
550 i = 0;
551 head = &ti->buckets[*bucket];
552 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
553 if (i < *last) {
554 i++;
555 continue;
556 }
557 *last = i + 1;
558 return flow;
559 }
560 (*bucket)++;
561 *last = 0;
562 }
563
564 return NULL;
565}
566
567static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
568{
569 hash = jhash_1word(hash, ti->hash_seed);
570 return &ti->buckets[hash & (ti->n_buckets - 1)];
571}
572
573static void table_instance_insert(struct table_instance *ti,
574 struct sw_flow *flow)
575{
576 struct hlist_head *head;
577
578 head = find_bucket(ti, flow->flow_table.hash);
579 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
580}
581
582static void ufid_table_instance_insert(struct table_instance *ti,
583 struct sw_flow *flow)
584{
585 struct hlist_head *head;
586
587 head = find_bucket(ti, flow->ufid_table.hash);
588 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
589}
590
591static void flow_table_copy_flows(struct table_instance *old,
592 struct table_instance *new, bool ufid)
593{
594 int old_ver;
595 int i;
596
597 old_ver = old->node_ver;
598 new->node_ver = !old_ver;
599
600
601 for (i = 0; i < old->n_buckets; i++) {
602 struct sw_flow *flow;
603 struct hlist_head *head = &old->buckets[i];
604
605 if (ufid)
606 hlist_for_each_entry_rcu(flow, head,
607 ufid_table.node[old_ver],
608 lockdep_ovsl_is_held())
609 ufid_table_instance_insert(new, flow);
610 else
611 hlist_for_each_entry_rcu(flow, head,
612 flow_table.node[old_ver],
613 lockdep_ovsl_is_held())
614 table_instance_insert(new, flow);
615 }
616}
617
618static struct table_instance *table_instance_rehash(struct table_instance *ti,
619 int n_buckets, bool ufid)
620{
621 struct table_instance *new_ti;
622
623 new_ti = table_instance_alloc(n_buckets);
624 if (!new_ti)
625 return NULL;
626
627 flow_table_copy_flows(ti, new_ti, ufid);
628
629 return new_ti;
630}
631
632int ovs_flow_tbl_flush(struct flow_table *flow_table)
633{
634 struct table_instance *old_ti, *new_ti;
635 struct table_instance *old_ufid_ti, *new_ufid_ti;
636
637 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
638 if (!new_ti)
639 return -ENOMEM;
640 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
641 if (!new_ufid_ti)
642 goto err_free_ti;
643
644 old_ti = ovsl_dereference(flow_table->ti);
645 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
646
647 rcu_assign_pointer(flow_table->ti, new_ti);
648 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
649 flow_table->last_rehash = jiffies;
650
651 table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
652 table_instance_destroy(old_ti, old_ufid_ti);
653 return 0;
654
655err_free_ti:
656 __table_instance_destroy(new_ti);
657 return -ENOMEM;
658}
659
660static u32 flow_hash(const struct sw_flow_key *key,
661 const struct sw_flow_key_range *range)
662{
663 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
664
665
666 int hash_u32s = range_n_bytes(range) >> 2;
667
668 return jhash2(hash_key, hash_u32s, 0);
669}
670
671static int flow_key_start(const struct sw_flow_key *key)
672{
673 if (key->tun_proto)
674 return 0;
675 else
676 return rounddown(offsetof(struct sw_flow_key, phy),
677 sizeof(long));
678}
679
680static bool cmp_key(const struct sw_flow_key *key1,
681 const struct sw_flow_key *key2,
682 int key_start, int key_end)
683{
684 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
685 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
686 long diffs = 0;
687 int i;
688
689 for (i = key_start; i < key_end; i += sizeof(long))
690 diffs |= *cp1++ ^ *cp2++;
691
692 return diffs == 0;
693}
694
695static bool flow_cmp_masked_key(const struct sw_flow *flow,
696 const struct sw_flow_key *key,
697 const struct sw_flow_key_range *range)
698{
699 return cmp_key(&flow->key, key, range->start, range->end);
700}
701
702static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
703 const struct sw_flow_match *match)
704{
705 struct sw_flow_key *key = match->key;
706 int key_start = flow_key_start(key);
707 int key_end = match->range.end;
708
709 BUG_ON(ovs_identifier_is_ufid(&flow->id));
710 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
711}
712
713static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
714 const struct sw_flow_key *unmasked,
715 const struct sw_flow_mask *mask,
716 u32 *n_mask_hit)
717{
718 struct sw_flow *flow;
719 struct hlist_head *head;
720 u32 hash;
721 struct sw_flow_key masked_key;
722
723 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
724 hash = flow_hash(&masked_key, &mask->range);
725 head = find_bucket(ti, hash);
726 (*n_mask_hit)++;
727
728 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
729 if (flow->mask == mask && flow->flow_table.hash == hash &&
730 flow_cmp_masked_key(flow, &masked_key, &mask->range))
731 return flow;
732 }
733 return NULL;
734}
735
736
737
738
739
740
741static struct sw_flow *flow_lookup(struct flow_table *tbl,
742 struct table_instance *ti,
743 struct mask_array *ma,
744 const struct sw_flow_key *key,
745 u32 *n_mask_hit,
746 u32 *n_cache_hit,
747 u32 *index)
748{
749 struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
750 struct sw_flow *flow;
751 struct sw_flow_mask *mask;
752 int i;
753
754 if (likely(*index < ma->max)) {
755 mask = rcu_dereference_ovsl(ma->masks[*index]);
756 if (mask) {
757 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
758 if (flow) {
759 u64_stats_update_begin(&stats->syncp);
760 stats->usage_cntrs[*index]++;
761 u64_stats_update_end(&stats->syncp);
762 (*n_cache_hit)++;
763 return flow;
764 }
765 }
766 }
767
768 for (i = 0; i < ma->max; i++) {
769
770 if (i == *index)
771 continue;
772
773 mask = rcu_dereference_ovsl(ma->masks[i]);
774 if (unlikely(!mask))
775 break;
776
777 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
778 if (flow) {
779 *index = i;
780 u64_stats_update_begin(&stats->syncp);
781 stats->usage_cntrs[*index]++;
782 u64_stats_update_end(&stats->syncp);
783 return flow;
784 }
785 }
786
787 return NULL;
788}
789
790
791
792
793
794
795
796
797struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
798 const struct sw_flow_key *key,
799 u32 skb_hash,
800 u32 *n_mask_hit,
801 u32 *n_cache_hit)
802{
803 struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
804 struct mask_array *ma = rcu_dereference(tbl->mask_array);
805 struct table_instance *ti = rcu_dereference(tbl->ti);
806 struct mask_cache_entry *entries, *ce;
807 struct sw_flow *flow;
808 u32 hash;
809 int seg;
810
811 *n_mask_hit = 0;
812 *n_cache_hit = 0;
813 if (unlikely(!skb_hash || mc->cache_size == 0)) {
814 u32 mask_index = 0;
815 u32 cache = 0;
816
817 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
818 &mask_index);
819 }
820
821
822
823
824 if (key->recirc_id)
825 skb_hash = jhash_1word(skb_hash, key->recirc_id);
826
827 ce = NULL;
828 hash = skb_hash;
829 entries = this_cpu_ptr(mc->mask_cache);
830
831
832 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
833 int index = hash & (mc->cache_size - 1);
834 struct mask_cache_entry *e;
835
836 e = &entries[index];
837 if (e->skb_hash == skb_hash) {
838 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
839 n_cache_hit, &e->mask_index);
840 if (!flow)
841 e->skb_hash = 0;
842 return flow;
843 }
844
845 if (!ce || e->skb_hash < ce->skb_hash)
846 ce = e;
847
848 hash >>= MC_HASH_SHIFT;
849 }
850
851
852 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
853 &ce->mask_index);
854 if (flow)
855 ce->skb_hash = skb_hash;
856
857 *n_cache_hit = 0;
858 return flow;
859}
860
861struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
862 const struct sw_flow_key *key)
863{
864 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
865 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
866 u32 __always_unused n_mask_hit;
867 u32 __always_unused n_cache_hit;
868 struct sw_flow *flow;
869 u32 index = 0;
870
871
872
873
874
875 local_bh_disable();
876 flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
877 local_bh_enable();
878 return flow;
879}
880
881struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
882 const struct sw_flow_match *match)
883{
884 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
885 int i;
886
887
888 for (i = 0; i < ma->max; i++) {
889 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
890 u32 __always_unused n_mask_hit;
891 struct sw_flow_mask *mask;
892 struct sw_flow *flow;
893
894 mask = ovsl_dereference(ma->masks[i]);
895 if (!mask)
896 continue;
897
898 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
899 if (flow && ovs_identifier_is_key(&flow->id) &&
900 ovs_flow_cmp_unmasked_key(flow, match)) {
901 return flow;
902 }
903 }
904
905 return NULL;
906}
907
908static u32 ufid_hash(const struct sw_flow_id *sfid)
909{
910 return jhash(sfid->ufid, sfid->ufid_len, 0);
911}
912
913static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
914 const struct sw_flow_id *sfid)
915{
916 if (flow->id.ufid_len != sfid->ufid_len)
917 return false;
918
919 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
920}
921
922bool ovs_flow_cmp(const struct sw_flow *flow,
923 const struct sw_flow_match *match)
924{
925 if (ovs_identifier_is_ufid(&flow->id))
926 return flow_cmp_masked_key(flow, match->key, &match->range);
927
928 return ovs_flow_cmp_unmasked_key(flow, match);
929}
930
931struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
932 const struct sw_flow_id *ufid)
933{
934 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
935 struct sw_flow *flow;
936 struct hlist_head *head;
937 u32 hash;
938
939 hash = ufid_hash(ufid);
940 head = find_bucket(ti, hash);
941 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
942 if (flow->ufid_table.hash == hash &&
943 ovs_flow_cmp_ufid(flow, ufid))
944 return flow;
945 }
946 return NULL;
947}
948
949int ovs_flow_tbl_num_masks(const struct flow_table *table)
950{
951 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
952 return READ_ONCE(ma->count);
953}
954
955u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
956{
957 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
958
959 return READ_ONCE(mc->cache_size);
960}
961
962static struct table_instance *table_instance_expand(struct table_instance *ti,
963 bool ufid)
964{
965 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
966}
967
968
969void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
970{
971 struct table_instance *ti = ovsl_dereference(table->ti);
972 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
973
974 BUG_ON(table->count == 0);
975 table_instance_flow_free(table, ti, ufid_ti, flow);
976}
977
978static struct sw_flow_mask *mask_alloc(void)
979{
980 struct sw_flow_mask *mask;
981
982 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
983 if (mask)
984 mask->ref_count = 1;
985
986 return mask;
987}
988
989static bool mask_equal(const struct sw_flow_mask *a,
990 const struct sw_flow_mask *b)
991{
992 const u8 *a_ = (const u8 *)&a->key + a->range.start;
993 const u8 *b_ = (const u8 *)&b->key + b->range.start;
994
995 return (a->range.end == b->range.end)
996 && (a->range.start == b->range.start)
997 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
998}
999
1000static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
1001 const struct sw_flow_mask *mask)
1002{
1003 struct mask_array *ma;
1004 int i;
1005
1006 ma = ovsl_dereference(tbl->mask_array);
1007 for (i = 0; i < ma->max; i++) {
1008 struct sw_flow_mask *t;
1009 t = ovsl_dereference(ma->masks[i]);
1010
1011 if (t && mask_equal(mask, t))
1012 return t;
1013 }
1014
1015 return NULL;
1016}
1017
1018
1019static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1020 const struct sw_flow_mask *new)
1021{
1022 struct sw_flow_mask *mask;
1023
1024 mask = flow_mask_find(tbl, new);
1025 if (!mask) {
1026
1027 mask = mask_alloc();
1028 if (!mask)
1029 return -ENOMEM;
1030 mask->key = new->key;
1031 mask->range = new->range;
1032
1033
1034 if (tbl_mask_array_add_mask(tbl, mask)) {
1035 kfree(mask);
1036 return -ENOMEM;
1037 }
1038 } else {
1039 BUG_ON(!mask->ref_count);
1040 mask->ref_count++;
1041 }
1042
1043 flow->mask = mask;
1044 return 0;
1045}
1046
1047
1048static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1049{
1050 struct table_instance *new_ti = NULL;
1051 struct table_instance *ti;
1052
1053 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1054 ti = ovsl_dereference(table->ti);
1055 table_instance_insert(ti, flow);
1056 table->count++;
1057
1058
1059 if (table->count > ti->n_buckets)
1060 new_ti = table_instance_expand(ti, false);
1061 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1062 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1063
1064 if (new_ti) {
1065 rcu_assign_pointer(table->ti, new_ti);
1066 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1067 table->last_rehash = jiffies;
1068 }
1069}
1070
1071
1072static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1073{
1074 struct table_instance *ti;
1075
1076 flow->ufid_table.hash = ufid_hash(&flow->id);
1077 ti = ovsl_dereference(table->ufid_ti);
1078 ufid_table_instance_insert(ti, flow);
1079 table->ufid_count++;
1080
1081
1082 if (table->ufid_count > ti->n_buckets) {
1083 struct table_instance *new_ti;
1084
1085 new_ti = table_instance_expand(ti, true);
1086 if (new_ti) {
1087 rcu_assign_pointer(table->ufid_ti, new_ti);
1088 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1089 }
1090 }
1091}
1092
1093
1094int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1095 const struct sw_flow_mask *mask)
1096{
1097 int err;
1098
1099 err = flow_mask_insert(table, flow, mask);
1100 if (err)
1101 return err;
1102 flow_key_insert(table, flow);
1103 if (ovs_identifier_is_ufid(&flow->id))
1104 flow_ufid_insert(table, flow);
1105
1106 return 0;
1107}
1108
1109static int compare_mask_and_count(const void *a, const void *b)
1110{
1111 const struct mask_count *mc_a = a;
1112 const struct mask_count *mc_b = b;
1113
1114 return (s64)mc_b->counter - (s64)mc_a->counter;
1115}
1116
1117
1118void ovs_flow_masks_rebalance(struct flow_table *table)
1119{
1120 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1121 struct mask_count *masks_and_count;
1122 struct mask_array *new;
1123 int masks_entries = 0;
1124 int i;
1125
1126
1127 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1128 GFP_KERNEL);
1129 if (!masks_and_count)
1130 return;
1131
1132 for (i = 0; i < ma->max; i++) {
1133 struct sw_flow_mask *mask;
1134 int cpu;
1135
1136 mask = rcu_dereference_ovsl(ma->masks[i]);
1137 if (unlikely(!mask))
1138 break;
1139
1140 masks_and_count[i].index = i;
1141 masks_and_count[i].counter = 0;
1142
1143 for_each_possible_cpu(cpu) {
1144 struct mask_array_stats *stats;
1145 unsigned int start;
1146 u64 counter;
1147
1148 stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
1149 do {
1150 start = u64_stats_fetch_begin_irq(&stats->syncp);
1151 counter = stats->usage_cntrs[i];
1152 } while (u64_stats_fetch_retry_irq(&stats->syncp,
1153 start));
1154
1155 masks_and_count[i].counter += counter;
1156 }
1157
1158
1159 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1160
1161
1162
1163
1164 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1165 }
1166
1167 if (i == 0)
1168 goto free_mask_entries;
1169
1170
1171 masks_entries = i;
1172 sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1173 compare_mask_and_count, NULL);
1174
1175
1176 for (i = 0; i < masks_entries; i++) {
1177 if (i != masks_and_count[i].index)
1178 break;
1179 }
1180 if (i == masks_entries)
1181 goto free_mask_entries;
1182
1183
1184 new = tbl_mask_array_alloc(ma->max);
1185 if (!new)
1186 goto free_mask_entries;
1187
1188 for (i = 0; i < masks_entries; i++) {
1189 int index = masks_and_count[i].index;
1190
1191 if (ovsl_dereference(ma->masks[index]))
1192 new->masks[new->count++] = ma->masks[index];
1193 }
1194
1195 rcu_assign_pointer(table->mask_array, new);
1196 call_rcu(&ma->rcu, mask_array_rcu_cb);
1197
1198free_mask_entries:
1199 kfree(masks_and_count);
1200}
1201
1202
1203
1204int ovs_flow_init(void)
1205{
1206 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1207 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1208
1209 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1210 + (nr_cpu_ids
1211 * sizeof(struct sw_flow_stats *)),
1212 0, 0, NULL);
1213 if (flow_cache == NULL)
1214 return -ENOMEM;
1215
1216 flow_stats_cache
1217 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1218 0, SLAB_HWCACHE_ALIGN, NULL);
1219 if (flow_stats_cache == NULL) {
1220 kmem_cache_destroy(flow_cache);
1221 flow_cache = NULL;
1222 return -ENOMEM;
1223 }
1224
1225 return 0;
1226}
1227
1228
1229void ovs_flow_exit(void)
1230{
1231 kmem_cache_destroy(flow_stats_cache);
1232 kmem_cache_destroy(flow_cache);
1233}
1234