1
2
3
4
5
6#include "flow.h"
7#include "datapath.h"
8#include "flow_netlink.h"
9#include <linux/uaccess.h>
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/if_ether.h>
13#include <linux/if_vlan.h>
14#include <net/llc_pdu.h>
15#include <linux/kernel.h>
16#include <linux/jhash.h>
17#include <linux/jiffies.h>
18#include <linux/llc.h>
19#include <linux/module.h>
20#include <linux/in.h>
21#include <linux/rcupdate.h>
22#include <linux/cpumask.h>
23#include <linux/if_arp.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/sctp.h>
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/icmp.h>
30#include <linux/icmpv6.h>
31#include <linux/rculist.h>
32#include <net/ip.h>
33#include <net/ipv6.h>
34#include <net/ndisc.h>
35
36#define TBL_MIN_BUCKETS 1024
37#define MASK_ARRAY_SIZE_MIN 16
38#define REHASH_INTERVAL (10 * 60 * HZ)
39
40#define MC_HASH_SHIFT 8
41#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
42#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
43
44static struct kmem_cache *flow_cache;
45struct kmem_cache *flow_stats_cache __read_mostly;
46
47static u16 range_n_bytes(const struct sw_flow_key_range *range)
48{
49 return range->end - range->start;
50}
51
52void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
53 bool full, const struct sw_flow_mask *mask)
54{
55 int start = full ? 0 : mask->range.start;
56 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
57 const long *m = (const long *)((const u8 *)&mask->key + start);
58 const long *s = (const long *)((const u8 *)src + start);
59 long *d = (long *)((u8 *)dst + start);
60 int i;
61
62
63
64
65
66
67 for (i = 0; i < len; i += sizeof(long))
68 *d++ = *s++ & *m++;
69}
70
71struct sw_flow *ovs_flow_alloc(void)
72{
73 struct sw_flow *flow;
74 struct sw_flow_stats *stats;
75
76 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
77 if (!flow)
78 return ERR_PTR(-ENOMEM);
79
80 flow->stats_last_writer = -1;
81
82
83 stats = kmem_cache_alloc_node(flow_stats_cache,
84 GFP_KERNEL | __GFP_ZERO,
85 node_online(0) ? 0 : NUMA_NO_NODE);
86 if (!stats)
87 goto err;
88
89 spin_lock_init(&stats->lock);
90
91 RCU_INIT_POINTER(flow->stats[0], stats);
92
93 cpumask_set_cpu(0, &flow->cpu_used_mask);
94
95 return flow;
96err:
97 kmem_cache_free(flow_cache, flow);
98 return ERR_PTR(-ENOMEM);
99}
100
101int ovs_flow_tbl_count(const struct flow_table *table)
102{
103 return table->count;
104}
105
106static void flow_free(struct sw_flow *flow)
107{
108 int cpu;
109
110 if (ovs_identifier_is_key(&flow->id))
111 kfree(flow->id.unmasked_key);
112 if (flow->sf_acts)
113 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
114
115 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
116 if (flow->stats[cpu])
117 kmem_cache_free(flow_stats_cache,
118 (struct sw_flow_stats __force *)flow->stats[cpu]);
119 kmem_cache_free(flow_cache, flow);
120}
121
122static void rcu_free_flow_callback(struct rcu_head *rcu)
123{
124 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
125
126 flow_free(flow);
127}
128
129void ovs_flow_free(struct sw_flow *flow, bool deferred)
130{
131 if (!flow)
132 return;
133
134 if (deferred)
135 call_rcu(&flow->rcu, rcu_free_flow_callback);
136 else
137 flow_free(flow);
138}
139
140static void __table_instance_destroy(struct table_instance *ti)
141{
142 kvfree(ti->buckets);
143 kfree(ti);
144}
145
146static struct table_instance *table_instance_alloc(int new_size)
147{
148 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
149 int i;
150
151 if (!ti)
152 return NULL;
153
154 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
155 GFP_KERNEL);
156 if (!ti->buckets) {
157 kfree(ti);
158 return NULL;
159 }
160
161 for (i = 0; i < new_size; i++)
162 INIT_HLIST_HEAD(&ti->buckets[i]);
163
164 ti->n_buckets = new_size;
165 ti->node_ver = 0;
166 ti->keep_flows = false;
167 get_random_bytes(&ti->hash_seed, sizeof(u32));
168
169 return ti;
170}
171
172static struct mask_array *tbl_mask_array_alloc(int size)
173{
174 struct mask_array *new;
175
176 size = max(MASK_ARRAY_SIZE_MIN, size);
177 new = kzalloc(sizeof(struct mask_array) +
178 sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
179 if (!new)
180 return NULL;
181
182 new->count = 0;
183 new->max = size;
184
185 return new;
186}
187
188static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
189{
190 struct mask_array *old;
191 struct mask_array *new;
192
193 new = tbl_mask_array_alloc(size);
194 if (!new)
195 return -ENOMEM;
196
197 old = ovsl_dereference(tbl->mask_array);
198 if (old) {
199 int i;
200
201 for (i = 0; i < old->max; i++) {
202 if (ovsl_dereference(old->masks[i]))
203 new->masks[new->count++] = old->masks[i];
204 }
205 }
206
207 rcu_assign_pointer(tbl->mask_array, new);
208 kfree_rcu(old, rcu);
209
210 return 0;
211}
212
213static int tbl_mask_array_add_mask(struct flow_table *tbl,
214 struct sw_flow_mask *new)
215{
216 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
217 int err, ma_count = READ_ONCE(ma->count);
218
219 if (ma_count >= ma->max) {
220 err = tbl_mask_array_realloc(tbl, ma->max +
221 MASK_ARRAY_SIZE_MIN);
222 if (err)
223 return err;
224
225 ma = ovsl_dereference(tbl->mask_array);
226 }
227
228 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
229
230 rcu_assign_pointer(ma->masks[ma_count], new);
231 WRITE_ONCE(ma->count, ma_count +1);
232
233 return 0;
234}
235
236static void tbl_mask_array_del_mask(struct flow_table *tbl,
237 struct sw_flow_mask *mask)
238{
239 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
240 int i, ma_count = READ_ONCE(ma->count);
241
242
243 for (i = 0; i < ma_count; i++) {
244 if (mask == ovsl_dereference(ma->masks[i]))
245 goto found;
246 }
247
248 BUG();
249 return;
250
251found:
252 WRITE_ONCE(ma->count, ma_count -1);
253
254 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
255 RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
256
257 kfree_rcu(mask, rcu);
258
259
260 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
261 ma_count <= (ma->max / 3))
262 tbl_mask_array_realloc(tbl, ma->max / 2);
263}
264
265
266static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
267{
268 if (mask) {
269
270
271
272 ASSERT_OVSL();
273 BUG_ON(!mask->ref_count);
274 mask->ref_count--;
275
276 if (!mask->ref_count)
277 tbl_mask_array_del_mask(tbl, mask);
278 }
279}
280
281int ovs_flow_tbl_init(struct flow_table *table)
282{
283 struct table_instance *ti, *ufid_ti;
284 struct mask_array *ma;
285
286 table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
287 MC_HASH_ENTRIES,
288 __alignof__(struct mask_cache_entry));
289 if (!table->mask_cache)
290 return -ENOMEM;
291
292 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
293 if (!ma)
294 goto free_mask_cache;
295
296 ti = table_instance_alloc(TBL_MIN_BUCKETS);
297 if (!ti)
298 goto free_mask_array;
299
300 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
301 if (!ufid_ti)
302 goto free_ti;
303
304 rcu_assign_pointer(table->ti, ti);
305 rcu_assign_pointer(table->ufid_ti, ufid_ti);
306 rcu_assign_pointer(table->mask_array, ma);
307 table->last_rehash = jiffies;
308 table->count = 0;
309 table->ufid_count = 0;
310 return 0;
311
312free_ti:
313 __table_instance_destroy(ti);
314free_mask_array:
315 kfree(ma);
316free_mask_cache:
317 free_percpu(table->mask_cache);
318 return -ENOMEM;
319}
320
321static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
322{
323 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
324
325 __table_instance_destroy(ti);
326}
327
328static void table_instance_flow_free(struct flow_table *table,
329 struct table_instance *ti,
330 struct table_instance *ufid_ti,
331 struct sw_flow *flow,
332 bool count)
333{
334 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
335 if (count)
336 table->count--;
337
338 if (ovs_identifier_is_ufid(&flow->id)) {
339 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
340
341 if (count)
342 table->ufid_count--;
343 }
344
345 flow_mask_remove(table, flow->mask);
346}
347
348static void table_instance_destroy(struct flow_table *table,
349 struct table_instance *ti,
350 struct table_instance *ufid_ti,
351 bool deferred)
352{
353 int i;
354
355 if (!ti)
356 return;
357
358 BUG_ON(!ufid_ti);
359 if (ti->keep_flows)
360 goto skip_flows;
361
362 for (i = 0; i < ti->n_buckets; i++) {
363 struct sw_flow *flow;
364 struct hlist_head *head = &ti->buckets[i];
365 struct hlist_node *n;
366
367 hlist_for_each_entry_safe(flow, n, head,
368 flow_table.node[ti->node_ver]) {
369
370 table_instance_flow_free(table, ti, ufid_ti,
371 flow, false);
372 ovs_flow_free(flow, deferred);
373 }
374 }
375
376skip_flows:
377 if (deferred) {
378 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
379 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
380 } else {
381 __table_instance_destroy(ti);
382 __table_instance_destroy(ufid_ti);
383 }
384}
385
386
387
388
389void ovs_flow_tbl_destroy(struct flow_table *table)
390{
391 struct table_instance *ti = rcu_dereference_raw(table->ti);
392 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
393
394 free_percpu(table->mask_cache);
395 kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
396 table_instance_destroy(table, ti, ufid_ti, false);
397}
398
399struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
400 u32 *bucket, u32 *last)
401{
402 struct sw_flow *flow;
403 struct hlist_head *head;
404 int ver;
405 int i;
406
407 ver = ti->node_ver;
408 while (*bucket < ti->n_buckets) {
409 i = 0;
410 head = &ti->buckets[*bucket];
411 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
412 if (i < *last) {
413 i++;
414 continue;
415 }
416 *last = i + 1;
417 return flow;
418 }
419 (*bucket)++;
420 *last = 0;
421 }
422
423 return NULL;
424}
425
426static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
427{
428 hash = jhash_1word(hash, ti->hash_seed);
429 return &ti->buckets[hash & (ti->n_buckets - 1)];
430}
431
432static void table_instance_insert(struct table_instance *ti,
433 struct sw_flow *flow)
434{
435 struct hlist_head *head;
436
437 head = find_bucket(ti, flow->flow_table.hash);
438 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
439}
440
441static void ufid_table_instance_insert(struct table_instance *ti,
442 struct sw_flow *flow)
443{
444 struct hlist_head *head;
445
446 head = find_bucket(ti, flow->ufid_table.hash);
447 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
448}
449
450static void flow_table_copy_flows(struct table_instance *old,
451 struct table_instance *new, bool ufid)
452{
453 int old_ver;
454 int i;
455
456 old_ver = old->node_ver;
457 new->node_ver = !old_ver;
458
459
460 for (i = 0; i < old->n_buckets; i++) {
461 struct sw_flow *flow;
462 struct hlist_head *head = &old->buckets[i];
463
464 if (ufid)
465 hlist_for_each_entry_rcu(flow, head,
466 ufid_table.node[old_ver],
467 lockdep_ovsl_is_held())
468 ufid_table_instance_insert(new, flow);
469 else
470 hlist_for_each_entry_rcu(flow, head,
471 flow_table.node[old_ver],
472 lockdep_ovsl_is_held())
473 table_instance_insert(new, flow);
474 }
475
476 old->keep_flows = true;
477}
478
479static struct table_instance *table_instance_rehash(struct table_instance *ti,
480 int n_buckets, bool ufid)
481{
482 struct table_instance *new_ti;
483
484 new_ti = table_instance_alloc(n_buckets);
485 if (!new_ti)
486 return NULL;
487
488 flow_table_copy_flows(ti, new_ti, ufid);
489
490 return new_ti;
491}
492
493int ovs_flow_tbl_flush(struct flow_table *flow_table)
494{
495 struct table_instance *old_ti, *new_ti;
496 struct table_instance *old_ufid_ti, *new_ufid_ti;
497
498 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
499 if (!new_ti)
500 return -ENOMEM;
501 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
502 if (!new_ufid_ti)
503 goto err_free_ti;
504
505 old_ti = ovsl_dereference(flow_table->ti);
506 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
507
508 rcu_assign_pointer(flow_table->ti, new_ti);
509 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
510 flow_table->last_rehash = jiffies;
511 flow_table->count = 0;
512 flow_table->ufid_count = 0;
513
514 table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
515 return 0;
516
517err_free_ti:
518 __table_instance_destroy(new_ti);
519 return -ENOMEM;
520}
521
522static u32 flow_hash(const struct sw_flow_key *key,
523 const struct sw_flow_key_range *range)
524{
525 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
526
527
528 int hash_u32s = range_n_bytes(range) >> 2;
529
530 return jhash2(hash_key, hash_u32s, 0);
531}
532
533static int flow_key_start(const struct sw_flow_key *key)
534{
535 if (key->tun_proto)
536 return 0;
537 else
538 return rounddown(offsetof(struct sw_flow_key, phy),
539 sizeof(long));
540}
541
542static bool cmp_key(const struct sw_flow_key *key1,
543 const struct sw_flow_key *key2,
544 int key_start, int key_end)
545{
546 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
547 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
548 long diffs = 0;
549 int i;
550
551 for (i = key_start; i < key_end; i += sizeof(long))
552 diffs |= *cp1++ ^ *cp2++;
553
554 return diffs == 0;
555}
556
557static bool flow_cmp_masked_key(const struct sw_flow *flow,
558 const struct sw_flow_key *key,
559 const struct sw_flow_key_range *range)
560{
561 return cmp_key(&flow->key, key, range->start, range->end);
562}
563
564static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
565 const struct sw_flow_match *match)
566{
567 struct sw_flow_key *key = match->key;
568 int key_start = flow_key_start(key);
569 int key_end = match->range.end;
570
571 BUG_ON(ovs_identifier_is_ufid(&flow->id));
572 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
573}
574
575static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
576 const struct sw_flow_key *unmasked,
577 const struct sw_flow_mask *mask,
578 u32 *n_mask_hit)
579{
580 struct sw_flow *flow;
581 struct hlist_head *head;
582 u32 hash;
583 struct sw_flow_key masked_key;
584
585 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
586 hash = flow_hash(&masked_key, &mask->range);
587 head = find_bucket(ti, hash);
588 (*n_mask_hit)++;
589
590 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
591 lockdep_ovsl_is_held()) {
592 if (flow->mask == mask && flow->flow_table.hash == hash &&
593 flow_cmp_masked_key(flow, &masked_key, &mask->range))
594 return flow;
595 }
596 return NULL;
597}
598
599
600
601
602static struct sw_flow *flow_lookup(struct flow_table *tbl,
603 struct table_instance *ti,
604 struct mask_array *ma,
605 const struct sw_flow_key *key,
606 u32 *n_mask_hit,
607 u32 *index)
608{
609 struct sw_flow *flow;
610 struct sw_flow_mask *mask;
611 int i;
612
613 if (likely(*index < ma->max)) {
614 mask = rcu_dereference_ovsl(ma->masks[*index]);
615 if (mask) {
616 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
617 if (flow)
618 return flow;
619 }
620 }
621
622 for (i = 0; i < ma->max; i++) {
623
624 if (i == *index)
625 continue;
626
627 mask = rcu_dereference_ovsl(ma->masks[i]);
628 if (unlikely(!mask))
629 break;
630
631 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
632 if (flow) {
633 *index = i;
634 return flow;
635 }
636 }
637
638 return NULL;
639}
640
641
642
643
644
645
646
647
648struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
649 const struct sw_flow_key *key,
650 u32 skb_hash,
651 u32 *n_mask_hit)
652{
653 struct mask_array *ma = rcu_dereference(tbl->mask_array);
654 struct table_instance *ti = rcu_dereference(tbl->ti);
655 struct mask_cache_entry *entries, *ce;
656 struct sw_flow *flow;
657 u32 hash;
658 int seg;
659
660 *n_mask_hit = 0;
661 if (unlikely(!skb_hash)) {
662 u32 mask_index = 0;
663
664 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
665 }
666
667
668
669
670 if (key->recirc_id)
671 skb_hash = jhash_1word(skb_hash, key->recirc_id);
672
673 ce = NULL;
674 hash = skb_hash;
675 entries = this_cpu_ptr(tbl->mask_cache);
676
677
678 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
679 int index = hash & (MC_HASH_ENTRIES - 1);
680 struct mask_cache_entry *e;
681
682 e = &entries[index];
683 if (e->skb_hash == skb_hash) {
684 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
685 &e->mask_index);
686 if (!flow)
687 e->skb_hash = 0;
688 return flow;
689 }
690
691 if (!ce || e->skb_hash < ce->skb_hash)
692 ce = e;
693
694 hash >>= MC_HASH_SHIFT;
695 }
696
697
698 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
699 if (flow)
700 ce->skb_hash = skb_hash;
701
702 return flow;
703}
704
705struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
706 const struct sw_flow_key *key)
707{
708 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
709 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
710 u32 __always_unused n_mask_hit;
711 u32 index = 0;
712
713 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
714}
715
716struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
717 const struct sw_flow_match *match)
718{
719 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
720 int i;
721
722
723 for (i = 0; i < ma->max; i++) {
724 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
725 u32 __always_unused n_mask_hit;
726 struct sw_flow_mask *mask;
727 struct sw_flow *flow;
728
729 mask = ovsl_dereference(ma->masks[i]);
730 if (!mask)
731 continue;
732
733 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
734 if (flow && ovs_identifier_is_key(&flow->id) &&
735 ovs_flow_cmp_unmasked_key(flow, match)) {
736 return flow;
737 }
738 }
739
740 return NULL;
741}
742
743static u32 ufid_hash(const struct sw_flow_id *sfid)
744{
745 return jhash(sfid->ufid, sfid->ufid_len, 0);
746}
747
748static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
749 const struct sw_flow_id *sfid)
750{
751 if (flow->id.ufid_len != sfid->ufid_len)
752 return false;
753
754 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
755}
756
757bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
758{
759 if (ovs_identifier_is_ufid(&flow->id))
760 return flow_cmp_masked_key(flow, match->key, &match->range);
761
762 return ovs_flow_cmp_unmasked_key(flow, match);
763}
764
765struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
766 const struct sw_flow_id *ufid)
767{
768 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
769 struct sw_flow *flow;
770 struct hlist_head *head;
771 u32 hash;
772
773 hash = ufid_hash(ufid);
774 head = find_bucket(ti, hash);
775 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
776 lockdep_ovsl_is_held()) {
777 if (flow->ufid_table.hash == hash &&
778 ovs_flow_cmp_ufid(flow, ufid))
779 return flow;
780 }
781 return NULL;
782}
783
784int ovs_flow_tbl_num_masks(const struct flow_table *table)
785{
786 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
787 return READ_ONCE(ma->count);
788}
789
790static struct table_instance *table_instance_expand(struct table_instance *ti,
791 bool ufid)
792{
793 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
794}
795
796
797void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
798{
799 struct table_instance *ti = ovsl_dereference(table->ti);
800 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
801
802 BUG_ON(table->count == 0);
803 table_instance_flow_free(table, ti, ufid_ti, flow, true);
804}
805
806static struct sw_flow_mask *mask_alloc(void)
807{
808 struct sw_flow_mask *mask;
809
810 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
811 if (mask)
812 mask->ref_count = 1;
813
814 return mask;
815}
816
817static bool mask_equal(const struct sw_flow_mask *a,
818 const struct sw_flow_mask *b)
819{
820 const u8 *a_ = (const u8 *)&a->key + a->range.start;
821 const u8 *b_ = (const u8 *)&b->key + b->range.start;
822
823 return (a->range.end == b->range.end)
824 && (a->range.start == b->range.start)
825 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
826}
827
828static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
829 const struct sw_flow_mask *mask)
830{
831 struct mask_array *ma;
832 int i;
833
834 ma = ovsl_dereference(tbl->mask_array);
835 for (i = 0; i < ma->max; i++) {
836 struct sw_flow_mask *t;
837 t = ovsl_dereference(ma->masks[i]);
838
839 if (t && mask_equal(mask, t))
840 return t;
841 }
842
843 return NULL;
844}
845
846
847static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
848 const struct sw_flow_mask *new)
849{
850 struct sw_flow_mask *mask;
851
852 mask = flow_mask_find(tbl, new);
853 if (!mask) {
854
855 mask = mask_alloc();
856 if (!mask)
857 return -ENOMEM;
858 mask->key = new->key;
859 mask->range = new->range;
860
861
862 if (tbl_mask_array_add_mask(tbl, mask)) {
863 kfree(mask);
864 return -ENOMEM;
865 }
866 } else {
867 BUG_ON(!mask->ref_count);
868 mask->ref_count++;
869 }
870
871 flow->mask = mask;
872 return 0;
873}
874
875
876static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
877{
878 struct table_instance *new_ti = NULL;
879 struct table_instance *ti;
880
881 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
882 ti = ovsl_dereference(table->ti);
883 table_instance_insert(ti, flow);
884 table->count++;
885
886
887 if (table->count > ti->n_buckets)
888 new_ti = table_instance_expand(ti, false);
889 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
890 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
891
892 if (new_ti) {
893 rcu_assign_pointer(table->ti, new_ti);
894 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
895 table->last_rehash = jiffies;
896 }
897}
898
899
900static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
901{
902 struct table_instance *ti;
903
904 flow->ufid_table.hash = ufid_hash(&flow->id);
905 ti = ovsl_dereference(table->ufid_ti);
906 ufid_table_instance_insert(ti, flow);
907 table->ufid_count++;
908
909
910 if (table->ufid_count > ti->n_buckets) {
911 struct table_instance *new_ti;
912
913 new_ti = table_instance_expand(ti, true);
914 if (new_ti) {
915 rcu_assign_pointer(table->ufid_ti, new_ti);
916 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
917 }
918 }
919}
920
921
922int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
923 const struct sw_flow_mask *mask)
924{
925 int err;
926
927 err = flow_mask_insert(table, flow, mask);
928 if (err)
929 return err;
930 flow_key_insert(table, flow);
931 if (ovs_identifier_is_ufid(&flow->id))
932 flow_ufid_insert(table, flow);
933
934 return 0;
935}
936
937
938
939int ovs_flow_init(void)
940{
941 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
942 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
943
944 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
945 + (nr_cpu_ids
946 * sizeof(struct sw_flow_stats *)),
947 0, 0, NULL);
948 if (flow_cache == NULL)
949 return -ENOMEM;
950
951 flow_stats_cache
952 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
953 0, SLAB_HWCACHE_ALIGN, NULL);
954 if (flow_stats_cache == NULL) {
955 kmem_cache_destroy(flow_cache);
956 flow_cache = NULL;
957 return -ENOMEM;
958 }
959
960 return 0;
961}
962
963
964void ovs_flow_exit(void)
965{
966 kmem_cache_destroy(flow_stats_cache);
967 kmem_cache_destroy(flow_cache);
968}
969