1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/vport.h>
36#include <linux/mlx5/eswitch.h>
37
38#include "mlx5_core.h"
39#include "fs_core.h"
40#include "fs_cmd.h"
41#include "diag/fs_tracepoint.h"
42#include "accel/ipsec.h"
43#include "fpga/ipsec.h"
44#include "eswitch.h"
45
46#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
49#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
54 .caps = caps_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57}
58
59#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 __VA_ARGS__)\
62
63#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
64 .children = (struct init_tree_node[]) {__VA_ARGS__},\
65 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
66}
67
68#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
69 sizeof(long))
70
71#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
72
73#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
74 .caps = (long[]) {__VA_ARGS__} }
75
76#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
77 FS_CAP(flow_table_properties_nic_receive.modify_root), \
78 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
79 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
80
81#define FS_CHAINING_CAPS_EGRESS \
82 FS_REQUIRED_CAPS( \
83 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85 FS_CAP(flow_table_properties_nic_transmit \
86 .identified_miss_table_mode), \
87 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88
89#define LEFTOVERS_NUM_LEVELS 1
90#define LEFTOVERS_NUM_PRIOS 1
91
92#define BY_PASS_PRIO_NUM_LEVELS 1
93#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
94 LEFTOVERS_NUM_PRIOS)
95
96#define ETHTOOL_PRIO_NUM_LEVELS 1
97#define ETHTOOL_NUM_PRIOS 11
98#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
99
100#define KERNEL_NIC_PRIO_NUM_LEVELS 5
101#define KERNEL_NIC_NUM_PRIOS 1
102
103#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
104
105#define KERNEL_NIC_TC_NUM_PRIOS 1
106#define KERNEL_NIC_TC_NUM_LEVELS 2
107
108#define ANCHOR_NUM_LEVELS 1
109#define ANCHOR_NUM_PRIOS 1
110#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
111
112#define OFFLOADS_MAX_FT 1
113#define OFFLOADS_NUM_PRIOS 1
114#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
115
116#define LAG_PRIO_NUM_LEVELS 1
117#define LAG_NUM_PRIOS 1
118#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
119
120struct node_caps {
121 size_t arr_sz;
122 long *caps;
123};
124
125static struct init_tree_node {
126 enum fs_node_type type;
127 struct init_tree_node *children;
128 int ar_size;
129 struct node_caps caps;
130 int min_ft_level;
131 int num_leaf_prios;
132 int prio;
133 int num_levels;
134} root_fs = {
135 .type = FS_TYPE_NAMESPACE,
136 .ar_size = 7,
137 .children = (struct init_tree_node[]) {
138 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
139 FS_CHAINING_CAPS,
140 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
141 BY_PASS_PRIO_NUM_LEVELS))),
142 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
143 FS_CHAINING_CAPS,
144 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
145 LAG_PRIO_NUM_LEVELS))),
146 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
147 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
148 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
149 FS_CHAINING_CAPS,
150 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
151 ETHTOOL_PRIO_NUM_LEVELS))),
152 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
153 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
154 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
155 KERNEL_NIC_PRIO_NUM_LEVELS))),
156 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
157 FS_CHAINING_CAPS,
158 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
159 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
160 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
161 }
162};
163
164static struct init_tree_node egress_root_fs = {
165 .type = FS_TYPE_NAMESPACE,
166 .ar_size = 1,
167 .children = (struct init_tree_node[]) {
168 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
169 FS_CHAINING_CAPS_EGRESS,
170 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
171 BY_PASS_PRIO_NUM_LEVELS))),
172 }
173};
174
175enum fs_i_lock_class {
176 FS_LOCK_GRANDPARENT,
177 FS_LOCK_PARENT,
178 FS_LOCK_CHILD
179};
180
181static const struct rhashtable_params rhash_fte = {
182 .key_len = FIELD_SIZEOF(struct fs_fte, val),
183 .key_offset = offsetof(struct fs_fte, val),
184 .head_offset = offsetof(struct fs_fte, hash),
185 .automatic_shrinking = true,
186 .min_size = 1,
187};
188
189static const struct rhashtable_params rhash_fg = {
190 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
191 .key_offset = offsetof(struct mlx5_flow_group, mask),
192 .head_offset = offsetof(struct mlx5_flow_group, hash),
193 .automatic_shrinking = true,
194 .min_size = 1,
195
196};
197
198static void del_hw_flow_table(struct fs_node *node);
199static void del_hw_flow_group(struct fs_node *node);
200static void del_hw_fte(struct fs_node *node);
201static void del_sw_flow_table(struct fs_node *node);
202static void del_sw_flow_group(struct fs_node *node);
203static void del_sw_fte(struct fs_node *node);
204static void del_sw_prio(struct fs_node *node);
205static void del_sw_ns(struct fs_node *node);
206
207
208
209static void del_sw_hw_rule(struct fs_node *node);
210static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
211 struct mlx5_flow_destination *d2);
212static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
213static struct mlx5_flow_rule *
214find_flow_rule(struct fs_fte *fte,
215 struct mlx5_flow_destination *dest);
216
217static void tree_init_node(struct fs_node *node,
218 void (*del_hw_func)(struct fs_node *),
219 void (*del_sw_func)(struct fs_node *))
220{
221 refcount_set(&node->refcount, 1);
222 INIT_LIST_HEAD(&node->list);
223 INIT_LIST_HEAD(&node->children);
224 init_rwsem(&node->lock);
225 node->del_hw_func = del_hw_func;
226 node->del_sw_func = del_sw_func;
227 node->active = false;
228}
229
230static void tree_add_node(struct fs_node *node, struct fs_node *parent)
231{
232 if (parent)
233 refcount_inc(&parent->refcount);
234 node->parent = parent;
235
236
237 if (!parent)
238 node->root = node;
239 else
240 node->root = parent->root;
241}
242
243static int tree_get_node(struct fs_node *node)
244{
245 return refcount_inc_not_zero(&node->refcount);
246}
247
248static void nested_down_read_ref_node(struct fs_node *node,
249 enum fs_i_lock_class class)
250{
251 if (node) {
252 down_read_nested(&node->lock, class);
253 refcount_inc(&node->refcount);
254 }
255}
256
257static void nested_down_write_ref_node(struct fs_node *node,
258 enum fs_i_lock_class class)
259{
260 if (node) {
261 down_write_nested(&node->lock, class);
262 refcount_inc(&node->refcount);
263 }
264}
265
266static void down_write_ref_node(struct fs_node *node, bool locked)
267{
268 if (node) {
269 if (!locked)
270 down_write(&node->lock);
271 refcount_inc(&node->refcount);
272 }
273}
274
275static void up_read_ref_node(struct fs_node *node)
276{
277 refcount_dec(&node->refcount);
278 up_read(&node->lock);
279}
280
281static void up_write_ref_node(struct fs_node *node, bool locked)
282{
283 refcount_dec(&node->refcount);
284 if (!locked)
285 up_write(&node->lock);
286}
287
288static void tree_put_node(struct fs_node *node, bool locked)
289{
290 struct fs_node *parent_node = node->parent;
291
292 if (refcount_dec_and_test(&node->refcount)) {
293 if (node->del_hw_func)
294 node->del_hw_func(node);
295 if (parent_node) {
296
297
298
299 down_write_ref_node(parent_node, locked);
300 list_del_init(&node->list);
301 if (node->del_sw_func)
302 node->del_sw_func(node);
303 up_write_ref_node(parent_node, locked);
304 } else {
305 kfree(node);
306 }
307 node = NULL;
308 }
309 if (!node && parent_node)
310 tree_put_node(parent_node, locked);
311}
312
313static int tree_remove_node(struct fs_node *node, bool locked)
314{
315 if (refcount_read(&node->refcount) > 1) {
316 refcount_dec(&node->refcount);
317 return -EEXIST;
318 }
319 tree_put_node(node, locked);
320 return 0;
321}
322
323static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
324 unsigned int prio)
325{
326 struct fs_prio *iter_prio;
327
328 fs_for_each_prio(iter_prio, ns) {
329 if (iter_prio->prio == prio)
330 return iter_prio;
331 }
332
333 return NULL;
334}
335
336static bool check_valid_spec(const struct mlx5_flow_spec *spec)
337{
338 int i;
339
340 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
341 if (spec->match_value[i] & ~spec->match_criteria[i]) {
342 pr_warn("mlx5_core: match_value differs from match_criteria\n");
343 return false;
344 }
345
346 return true;
347}
348
349static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
350{
351 struct fs_node *root;
352 struct mlx5_flow_namespace *ns;
353
354 root = node->root;
355
356 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
357 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
358 return NULL;
359 }
360
361 ns = container_of(root, struct mlx5_flow_namespace, node);
362 return container_of(ns, struct mlx5_flow_root_namespace, ns);
363}
364
365static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
366{
367 struct mlx5_flow_root_namespace *root = find_root(node);
368
369 if (root)
370 return root->dev->priv.steering;
371 return NULL;
372}
373
374static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
375{
376 struct mlx5_flow_root_namespace *root = find_root(node);
377
378 if (root)
379 return root->dev;
380 return NULL;
381}
382
383static void del_sw_ns(struct fs_node *node)
384{
385 kfree(node);
386}
387
388static void del_sw_prio(struct fs_node *node)
389{
390 kfree(node);
391}
392
393static void del_hw_flow_table(struct fs_node *node)
394{
395 struct mlx5_flow_root_namespace *root;
396 struct mlx5_flow_table *ft;
397 struct mlx5_core_dev *dev;
398 int err;
399
400 fs_get_obj(ft, node);
401 dev = get_dev(&ft->node);
402 root = find_root(&ft->node);
403 trace_mlx5_fs_del_ft(ft);
404
405 if (node->active) {
406 err = root->cmds->destroy_flow_table(root, ft);
407 if (err)
408 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
409 }
410}
411
412static void del_sw_flow_table(struct fs_node *node)
413{
414 struct mlx5_flow_table *ft;
415 struct fs_prio *prio;
416
417 fs_get_obj(ft, node);
418
419 rhltable_destroy(&ft->fgs_hash);
420 fs_get_obj(prio, ft->node.parent);
421 prio->num_ft--;
422 kfree(ft);
423}
424
425static void modify_fte(struct fs_fte *fte)
426{
427 struct mlx5_flow_root_namespace *root;
428 struct mlx5_flow_table *ft;
429 struct mlx5_flow_group *fg;
430 struct mlx5_core_dev *dev;
431 int err;
432
433 fs_get_obj(fg, fte->node.parent);
434 fs_get_obj(ft, fg->node.parent);
435 dev = get_dev(&fte->node);
436
437 root = find_root(&ft->node);
438 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
439 if (err)
440 mlx5_core_warn(dev,
441 "%s can't del rule fg id=%d fte_index=%d\n",
442 __func__, fg->id, fte->index);
443 fte->modify_mask = 0;
444}
445
446static void del_sw_hw_rule(struct fs_node *node)
447{
448 struct mlx5_flow_rule *rule;
449 struct fs_fte *fte;
450
451 fs_get_obj(rule, node);
452 fs_get_obj(fte, rule->node.parent);
453 trace_mlx5_fs_del_rule(rule);
454 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
455 mutex_lock(&rule->dest_attr.ft->lock);
456 list_del(&rule->next_ft);
457 mutex_unlock(&rule->dest_attr.ft->lock);
458 }
459
460 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
461 --fte->dests_size) {
462 fte->modify_mask |=
463 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
464 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
465 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
466 goto out;
467 }
468
469 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
470 --fte->dests_size) {
471 fte->modify_mask |=
472 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
473 }
474out:
475 kfree(rule);
476}
477
478static void del_hw_fte(struct fs_node *node)
479{
480 struct mlx5_flow_root_namespace *root;
481 struct mlx5_flow_table *ft;
482 struct mlx5_flow_group *fg;
483 struct mlx5_core_dev *dev;
484 struct fs_fte *fte;
485 int err;
486
487 fs_get_obj(fte, node);
488 fs_get_obj(fg, fte->node.parent);
489 fs_get_obj(ft, fg->node.parent);
490
491 trace_mlx5_fs_del_fte(fte);
492 dev = get_dev(&ft->node);
493 root = find_root(&ft->node);
494 if (node->active) {
495 err = root->cmds->delete_fte(root, ft, fte);
496 if (err)
497 mlx5_core_warn(dev,
498 "flow steering can't delete fte in index %d of flow group id %d\n",
499 fte->index, fg->id);
500 node->active = 0;
501 }
502}
503
504static void del_sw_fte(struct fs_node *node)
505{
506 struct mlx5_flow_steering *steering = get_steering(node);
507 struct mlx5_flow_group *fg;
508 struct fs_fte *fte;
509 int err;
510
511 fs_get_obj(fte, node);
512 fs_get_obj(fg, fte->node.parent);
513
514 err = rhashtable_remove_fast(&fg->ftes_hash,
515 &fte->hash,
516 rhash_fte);
517 WARN_ON(err);
518 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
519 kmem_cache_free(steering->ftes_cache, fte);
520}
521
522static void del_hw_flow_group(struct fs_node *node)
523{
524 struct mlx5_flow_root_namespace *root;
525 struct mlx5_flow_group *fg;
526 struct mlx5_flow_table *ft;
527 struct mlx5_core_dev *dev;
528
529 fs_get_obj(fg, node);
530 fs_get_obj(ft, fg->node.parent);
531 dev = get_dev(&ft->node);
532 trace_mlx5_fs_del_fg(fg);
533
534 root = find_root(&ft->node);
535 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
536 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
537 fg->id, ft->id);
538}
539
540static void del_sw_flow_group(struct fs_node *node)
541{
542 struct mlx5_flow_steering *steering = get_steering(node);
543 struct mlx5_flow_group *fg;
544 struct mlx5_flow_table *ft;
545 int err;
546
547 fs_get_obj(fg, node);
548 fs_get_obj(ft, fg->node.parent);
549
550 rhashtable_destroy(&fg->ftes_hash);
551 ida_destroy(&fg->fte_allocator);
552 if (ft->autogroup.active)
553 ft->autogroup.num_groups--;
554 err = rhltable_remove(&ft->fgs_hash,
555 &fg->hash,
556 rhash_fg);
557 WARN_ON(err);
558 kmem_cache_free(steering->fgs_cache, fg);
559}
560
561static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
562{
563 int index;
564 int ret;
565
566 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
567 if (index < 0)
568 return index;
569
570 fte->index = index + fg->start_index;
571 ret = rhashtable_insert_fast(&fg->ftes_hash,
572 &fte->hash,
573 rhash_fte);
574 if (ret)
575 goto err_ida_remove;
576
577 tree_add_node(&fte->node, &fg->node);
578 list_add_tail(&fte->node.list, &fg->node.children);
579 return 0;
580
581err_ida_remove:
582 ida_simple_remove(&fg->fte_allocator, index);
583 return ret;
584}
585
586static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
587 const struct mlx5_flow_spec *spec,
588 struct mlx5_flow_act *flow_act)
589{
590 struct mlx5_flow_steering *steering = get_steering(&ft->node);
591 struct fs_fte *fte;
592
593 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
594 if (!fte)
595 return ERR_PTR(-ENOMEM);
596
597 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
598 fte->node.type = FS_TYPE_FLOW_ENTRY;
599 fte->action = *flow_act;
600 fte->flow_context = spec->flow_context;
601
602 tree_init_node(&fte->node, NULL, del_sw_fte);
603
604 return fte;
605}
606
607static void dealloc_flow_group(struct mlx5_flow_steering *steering,
608 struct mlx5_flow_group *fg)
609{
610 rhashtable_destroy(&fg->ftes_hash);
611 kmem_cache_free(steering->fgs_cache, fg);
612}
613
614static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
615 u8 match_criteria_enable,
616 const void *match_criteria,
617 int start_index,
618 int end_index)
619{
620 struct mlx5_flow_group *fg;
621 int ret;
622
623 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
624 if (!fg)
625 return ERR_PTR(-ENOMEM);
626
627 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
628 if (ret) {
629 kmem_cache_free(steering->fgs_cache, fg);
630 return ERR_PTR(ret);
631 }
632
633 ida_init(&fg->fte_allocator);
634 fg->mask.match_criteria_enable = match_criteria_enable;
635 memcpy(&fg->mask.match_criteria, match_criteria,
636 sizeof(fg->mask.match_criteria));
637 fg->node.type = FS_TYPE_FLOW_GROUP;
638 fg->start_index = start_index;
639 fg->max_ftes = end_index - start_index + 1;
640
641 return fg;
642}
643
644static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
645 u8 match_criteria_enable,
646 const void *match_criteria,
647 int start_index,
648 int end_index,
649 struct list_head *prev)
650{
651 struct mlx5_flow_steering *steering = get_steering(&ft->node);
652 struct mlx5_flow_group *fg;
653 int ret;
654
655 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
656 start_index, end_index);
657 if (IS_ERR(fg))
658 return fg;
659
660
661 ret = rhltable_insert(&ft->fgs_hash,
662 &fg->hash,
663 rhash_fg);
664 if (ret) {
665 dealloc_flow_group(steering, fg);
666 return ERR_PTR(ret);
667 }
668
669 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
670 tree_add_node(&fg->node, &ft->node);
671
672 list_add(&fg->node.list, prev);
673 atomic_inc(&ft->node.version);
674
675 return fg;
676}
677
678static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
679 enum fs_flow_table_type table_type,
680 enum fs_flow_table_op_mod op_mod,
681 u32 flags)
682{
683 struct mlx5_flow_table *ft;
684 int ret;
685
686 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
687 if (!ft)
688 return ERR_PTR(-ENOMEM);
689
690 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
691 if (ret) {
692 kfree(ft);
693 return ERR_PTR(ret);
694 }
695
696 ft->level = level;
697 ft->node.type = FS_TYPE_FLOW_TABLE;
698 ft->op_mod = op_mod;
699 ft->type = table_type;
700 ft->vport = vport;
701 ft->max_fte = max_fte;
702 ft->flags = flags;
703 INIT_LIST_HEAD(&ft->fwd_rules);
704 mutex_init(&ft->lock);
705
706 return ft;
707}
708
709
710
711
712
713static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
714 struct list_head *start,
715 bool reverse)
716{
717#define list_advance_entry(pos, reverse) \
718 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
719
720#define list_for_each_advance_continue(pos, head, reverse) \
721 for (pos = list_advance_entry(pos, reverse); \
722 &pos->list != (head); \
723 pos = list_advance_entry(pos, reverse))
724
725 struct fs_node *iter = list_entry(start, struct fs_node, list);
726 struct mlx5_flow_table *ft = NULL;
727
728 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
729 return NULL;
730
731 list_for_each_advance_continue(iter, &root->children, reverse) {
732 if (iter->type == FS_TYPE_FLOW_TABLE) {
733 fs_get_obj(ft, iter);
734 return ft;
735 }
736 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
737 if (ft)
738 return ft;
739 }
740
741 return ft;
742}
743
744
745
746
747
748static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
749{
750 struct mlx5_flow_table *ft = NULL;
751 struct fs_node *curr_node;
752 struct fs_node *parent;
753
754 parent = prio->node.parent;
755 curr_node = &prio->node;
756 while (!ft && parent) {
757 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
758 curr_node = parent;
759 parent = curr_node->parent;
760 }
761 return ft;
762}
763
764
765static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
766{
767 return find_closest_ft(prio, false);
768}
769
770
771static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
772{
773 return find_closest_ft(prio, true);
774}
775
776static int connect_fts_in_prio(struct mlx5_core_dev *dev,
777 struct fs_prio *prio,
778 struct mlx5_flow_table *ft)
779{
780 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
781 struct mlx5_flow_table *iter;
782 int i = 0;
783 int err;
784
785 fs_for_each_ft(iter, prio) {
786 i++;
787 err = root->cmds->modify_flow_table(root, iter, ft);
788 if (err) {
789 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
790 iter->id);
791
792 if (i > 1)
793 WARN_ON(true);
794 return err;
795 }
796 }
797 return 0;
798}
799
800
801static int connect_prev_fts(struct mlx5_core_dev *dev,
802 struct mlx5_flow_table *ft,
803 struct fs_prio *prio)
804{
805 struct mlx5_flow_table *prev_ft;
806
807 prev_ft = find_prev_chained_ft(prio);
808 if (prev_ft) {
809 struct fs_prio *prev_prio;
810
811 fs_get_obj(prev_prio, prev_ft->node.parent);
812 return connect_fts_in_prio(dev, prev_prio, ft);
813 }
814 return 0;
815}
816
817static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
818 *prio)
819{
820 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
821 struct mlx5_ft_underlay_qp *uqp;
822 int min_level = INT_MAX;
823 int err = 0;
824 u32 qpn;
825
826 if (root->root_ft)
827 min_level = root->root_ft->level;
828
829 if (ft->level >= min_level)
830 return 0;
831
832 if (list_empty(&root->underlay_qpns)) {
833
834 qpn = 0;
835 err = root->cmds->update_root_ft(root, ft, qpn, false);
836 } else {
837 list_for_each_entry(uqp, &root->underlay_qpns, list) {
838 qpn = uqp->qpn;
839 err = root->cmds->update_root_ft(root, ft,
840 qpn, false);
841 if (err)
842 break;
843 }
844 }
845
846 if (err)
847 mlx5_core_warn(root->dev,
848 "Update root flow table of id(%u) qpn(%d) failed\n",
849 ft->id, qpn);
850 else
851 root->root_ft = ft;
852
853 return err;
854}
855
856static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
857 struct mlx5_flow_destination *dest)
858{
859 struct mlx5_flow_root_namespace *root;
860 struct mlx5_flow_table *ft;
861 struct mlx5_flow_group *fg;
862 struct fs_fte *fte;
863 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
864 int err = 0;
865
866 fs_get_obj(fte, rule->node.parent);
867 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
868 return -EINVAL;
869 down_write_ref_node(&fte->node, false);
870 fs_get_obj(fg, fte->node.parent);
871 fs_get_obj(ft, fg->node.parent);
872
873 memcpy(&rule->dest_attr, dest, sizeof(*dest));
874 root = find_root(&ft->node);
875 err = root->cmds->update_fte(root, ft, fg,
876 modify_mask, fte);
877 up_write_ref_node(&fte->node, false);
878
879 return err;
880}
881
882int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
883 struct mlx5_flow_destination *new_dest,
884 struct mlx5_flow_destination *old_dest)
885{
886 int i;
887
888 if (!old_dest) {
889 if (handle->num_rules != 1)
890 return -EINVAL;
891 return _mlx5_modify_rule_destination(handle->rule[0],
892 new_dest);
893 }
894
895 for (i = 0; i < handle->num_rules; i++) {
896 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
897 return _mlx5_modify_rule_destination(handle->rule[i],
898 new_dest);
899 }
900
901 return -EINVAL;
902}
903
904
905static int connect_fwd_rules(struct mlx5_core_dev *dev,
906 struct mlx5_flow_table *new_next_ft,
907 struct mlx5_flow_table *old_next_ft)
908{
909 struct mlx5_flow_destination dest = {};
910 struct mlx5_flow_rule *iter;
911 int err = 0;
912
913
914
915
916 if (!new_next_ft || !old_next_ft)
917 return 0;
918
919 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
920 dest.ft = new_next_ft;
921
922 mutex_lock(&old_next_ft->lock);
923 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
924 mutex_unlock(&old_next_ft->lock);
925 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
926 err = _mlx5_modify_rule_destination(iter, &dest);
927 if (err)
928 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
929 new_next_ft->id);
930 }
931 return 0;
932}
933
934static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
935 struct fs_prio *prio)
936{
937 struct mlx5_flow_table *next_ft;
938 int err = 0;
939
940
941
942 if (list_empty(&prio->node.children)) {
943 err = connect_prev_fts(dev, ft, prio);
944 if (err)
945 return err;
946
947 next_ft = find_next_chained_ft(prio);
948 err = connect_fwd_rules(dev, ft, next_ft);
949 if (err)
950 return err;
951 }
952
953 if (MLX5_CAP_FLOWTABLE(dev,
954 flow_table_properties_nic_receive.modify_root))
955 err = update_root_ft_create(ft, prio);
956 return err;
957}
958
959static void list_add_flow_table(struct mlx5_flow_table *ft,
960 struct fs_prio *prio)
961{
962 struct list_head *prev = &prio->node.children;
963 struct mlx5_flow_table *iter;
964
965 fs_for_each_ft(iter, prio) {
966 if (iter->level > ft->level)
967 break;
968 prev = &iter->node.list;
969 }
970 list_add(&ft->node.list, prev);
971}
972
973static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
974 struct mlx5_flow_table_attr *ft_attr,
975 enum fs_flow_table_op_mod op_mod,
976 u16 vport)
977{
978 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
979 struct mlx5_flow_table *next_ft = NULL;
980 struct fs_prio *fs_prio = NULL;
981 struct mlx5_flow_table *ft;
982 int log_table_sz;
983 int err;
984
985 if (!root) {
986 pr_err("mlx5: flow steering failed to find root of namespace\n");
987 return ERR_PTR(-ENODEV);
988 }
989
990 mutex_lock(&root->chain_lock);
991 fs_prio = find_prio(ns, ft_attr->prio);
992 if (!fs_prio) {
993 err = -EINVAL;
994 goto unlock_root;
995 }
996 if (ft_attr->level >= fs_prio->num_levels) {
997 err = -ENOSPC;
998 goto unlock_root;
999 }
1000
1001
1002
1003 ft_attr->level += fs_prio->start_level;
1004 ft = alloc_flow_table(ft_attr->level,
1005 vport,
1006 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1007 root->table_type,
1008 op_mod, ft_attr->flags);
1009 if (IS_ERR(ft)) {
1010 err = PTR_ERR(ft);
1011 goto unlock_root;
1012 }
1013
1014 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1015 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1016 next_ft = find_next_chained_ft(fs_prio);
1017 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1018 if (err)
1019 goto free_ft;
1020
1021 err = connect_flow_table(root->dev, ft, fs_prio);
1022 if (err)
1023 goto destroy_ft;
1024 ft->node.active = true;
1025 down_write_ref_node(&fs_prio->node, false);
1026 tree_add_node(&ft->node, &fs_prio->node);
1027 list_add_flow_table(ft, fs_prio);
1028 fs_prio->num_ft++;
1029 up_write_ref_node(&fs_prio->node, false);
1030 mutex_unlock(&root->chain_lock);
1031 trace_mlx5_fs_add_ft(ft);
1032 return ft;
1033destroy_ft:
1034 root->cmds->destroy_flow_table(root, ft);
1035free_ft:
1036 kfree(ft);
1037unlock_root:
1038 mutex_unlock(&root->chain_lock);
1039 return ERR_PTR(err);
1040}
1041
1042struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1043 struct mlx5_flow_table_attr *ft_attr)
1044{
1045 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1046}
1047
1048struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1049 int prio, int max_fte,
1050 u32 level, u16 vport)
1051{
1052 struct mlx5_flow_table_attr ft_attr = {};
1053
1054 ft_attr.max_fte = max_fte;
1055 ft_attr.level = level;
1056 ft_attr.prio = prio;
1057
1058 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1059}
1060
1061struct mlx5_flow_table*
1062mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1063 int prio, u32 level)
1064{
1065 struct mlx5_flow_table_attr ft_attr = {};
1066
1067 ft_attr.level = level;
1068 ft_attr.prio = prio;
1069 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1070}
1071EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1072
1073struct mlx5_flow_table*
1074mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1075 int prio,
1076 int num_flow_table_entries,
1077 int max_num_groups,
1078 u32 level,
1079 u32 flags)
1080{
1081 struct mlx5_flow_table_attr ft_attr = {};
1082 struct mlx5_flow_table *ft;
1083
1084 if (max_num_groups > num_flow_table_entries)
1085 return ERR_PTR(-EINVAL);
1086
1087 ft_attr.max_fte = num_flow_table_entries;
1088 ft_attr.prio = prio;
1089 ft_attr.level = level;
1090 ft_attr.flags = flags;
1091
1092 ft = mlx5_create_flow_table(ns, &ft_attr);
1093 if (IS_ERR(ft))
1094 return ft;
1095
1096 ft->autogroup.active = true;
1097 ft->autogroup.required_groups = max_num_groups;
1098
1099 return ft;
1100}
1101EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1102
1103struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1104 u32 *fg_in)
1105{
1106 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1107 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1108 fg_in, match_criteria);
1109 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1110 fg_in,
1111 match_criteria_enable);
1112 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1113 start_flow_index);
1114 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1115 end_flow_index);
1116 struct mlx5_flow_group *fg;
1117 int err;
1118
1119 if (ft->autogroup.active)
1120 return ERR_PTR(-EPERM);
1121
1122 down_write_ref_node(&ft->node, false);
1123 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1124 start_index, end_index,
1125 ft->node.children.prev);
1126 up_write_ref_node(&ft->node, false);
1127 if (IS_ERR(fg))
1128 return fg;
1129
1130 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1131 if (err) {
1132 tree_put_node(&fg->node, false);
1133 return ERR_PTR(err);
1134 }
1135 trace_mlx5_fs_add_fg(fg);
1136 fg->node.active = true;
1137
1138 return fg;
1139}
1140
1141static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1142{
1143 struct mlx5_flow_rule *rule;
1144
1145 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1146 if (!rule)
1147 return NULL;
1148
1149 INIT_LIST_HEAD(&rule->next_ft);
1150 rule->node.type = FS_TYPE_FLOW_DEST;
1151 if (dest)
1152 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1153
1154 return rule;
1155}
1156
1157static struct mlx5_flow_handle *alloc_handle(int num_rules)
1158{
1159 struct mlx5_flow_handle *handle;
1160
1161 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1162 if (!handle)
1163 return NULL;
1164
1165 handle->num_rules = num_rules;
1166
1167 return handle;
1168}
1169
1170static void destroy_flow_handle(struct fs_fte *fte,
1171 struct mlx5_flow_handle *handle,
1172 struct mlx5_flow_destination *dest,
1173 int i)
1174{
1175 for (; --i >= 0;) {
1176 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1177 fte->dests_size--;
1178 list_del(&handle->rule[i]->node.list);
1179 kfree(handle->rule[i]);
1180 }
1181 }
1182 kfree(handle);
1183}
1184
1185static struct mlx5_flow_handle *
1186create_flow_handle(struct fs_fte *fte,
1187 struct mlx5_flow_destination *dest,
1188 int dest_num,
1189 int *modify_mask,
1190 bool *new_rule)
1191{
1192 struct mlx5_flow_handle *handle;
1193 struct mlx5_flow_rule *rule = NULL;
1194 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1195 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1196 int type;
1197 int i = 0;
1198
1199 handle = alloc_handle((dest_num) ? dest_num : 1);
1200 if (!handle)
1201 return ERR_PTR(-ENOMEM);
1202
1203 do {
1204 if (dest) {
1205 rule = find_flow_rule(fte, dest + i);
1206 if (rule) {
1207 refcount_inc(&rule->node.refcount);
1208 goto rule_found;
1209 }
1210 }
1211
1212 *new_rule = true;
1213 rule = alloc_rule(dest + i);
1214 if (!rule)
1215 goto free_rules;
1216
1217
1218
1219
1220 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1221 if (dest &&
1222 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1223 list_add(&rule->node.list, &fte->node.children);
1224 else
1225 list_add_tail(&rule->node.list, &fte->node.children);
1226 if (dest) {
1227 fte->dests_size++;
1228
1229 type = dest[i].type ==
1230 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1231 *modify_mask |= type ? count : dst;
1232 }
1233rule_found:
1234 handle->rule[i] = rule;
1235 } while (++i < dest_num);
1236
1237 return handle;
1238
1239free_rules:
1240 destroy_flow_handle(fte, handle, dest, i);
1241 return ERR_PTR(-ENOMEM);
1242}
1243
1244
1245static struct mlx5_flow_handle *
1246add_rule_fte(struct fs_fte *fte,
1247 struct mlx5_flow_group *fg,
1248 struct mlx5_flow_destination *dest,
1249 int dest_num,
1250 bool update_action)
1251{
1252 struct mlx5_flow_root_namespace *root;
1253 struct mlx5_flow_handle *handle;
1254 struct mlx5_flow_table *ft;
1255 int modify_mask = 0;
1256 int err;
1257 bool new_rule = false;
1258
1259 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1260 &new_rule);
1261 if (IS_ERR(handle) || !new_rule)
1262 goto out;
1263
1264 if (update_action)
1265 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1266
1267 fs_get_obj(ft, fg->node.parent);
1268 root = find_root(&fg->node);
1269 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1270 err = root->cmds->create_fte(root, ft, fg, fte);
1271 else
1272 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1273 if (err)
1274 goto free_handle;
1275
1276 fte->node.active = true;
1277 fte->status |= FS_FTE_STATUS_EXISTING;
1278 atomic_inc(&fte->node.version);
1279
1280out:
1281 return handle;
1282
1283free_handle:
1284 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1285 return ERR_PTR(err);
1286}
1287
1288static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1289 const struct mlx5_flow_spec *spec)
1290{
1291 struct list_head *prev = &ft->node.children;
1292 struct mlx5_flow_group *fg;
1293 unsigned int candidate_index = 0;
1294 unsigned int group_size = 0;
1295
1296 if (!ft->autogroup.active)
1297 return ERR_PTR(-ENOENT);
1298
1299 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1300
1301 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1302
1303
1304 if (group_size == 0)
1305 group_size = 1;
1306
1307
1308 fs_for_each_fg(fg, ft) {
1309 if (candidate_index + group_size > fg->start_index)
1310 candidate_index = fg->start_index + fg->max_ftes;
1311 else
1312 break;
1313 prev = &fg->node.list;
1314 }
1315
1316 if (candidate_index + group_size > ft->max_fte)
1317 return ERR_PTR(-ENOSPC);
1318
1319 fg = alloc_insert_flow_group(ft,
1320 spec->match_criteria_enable,
1321 spec->match_criteria,
1322 candidate_index,
1323 candidate_index + group_size - 1,
1324 prev);
1325 if (IS_ERR(fg))
1326 goto out;
1327
1328 ft->autogroup.num_groups++;
1329
1330out:
1331 return fg;
1332}
1333
1334static int create_auto_flow_group(struct mlx5_flow_table *ft,
1335 struct mlx5_flow_group *fg)
1336{
1337 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1338 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1339 void *match_criteria_addr;
1340 u8 src_esw_owner_mask_on;
1341 void *misc;
1342 int err;
1343 u32 *in;
1344
1345 in = kvzalloc(inlen, GFP_KERNEL);
1346 if (!in)
1347 return -ENOMEM;
1348
1349 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1350 fg->mask.match_criteria_enable);
1351 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1352 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1353 fg->max_ftes - 1);
1354
1355 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1356 misc_parameters);
1357 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1358 source_eswitch_owner_vhca_id);
1359 MLX5_SET(create_flow_group_in, in,
1360 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1361
1362 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1363 in, match_criteria);
1364 memcpy(match_criteria_addr, fg->mask.match_criteria,
1365 sizeof(fg->mask.match_criteria));
1366
1367 err = root->cmds->create_flow_group(root, ft, in, fg);
1368 if (!err) {
1369 fg->node.active = true;
1370 trace_mlx5_fs_add_fg(fg);
1371 }
1372
1373 kvfree(in);
1374 return err;
1375}
1376
1377static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1378 struct mlx5_flow_destination *d2)
1379{
1380 if (d1->type == d2->type) {
1381 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1382 d1->vport.num == d2->vport.num &&
1383 d1->vport.flags == d2->vport.flags &&
1384 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1385 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1386 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1387 (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
1388 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1389 d1->ft == d2->ft) ||
1390 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1391 d1->tir_num == d2->tir_num) ||
1392 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1393 d1->ft_num == d2->ft_num))
1394 return true;
1395 }
1396
1397 return false;
1398}
1399
1400static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1401 struct mlx5_flow_destination *dest)
1402{
1403 struct mlx5_flow_rule *rule;
1404
1405 list_for_each_entry(rule, &fte->node.children, node.list) {
1406 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1407 return rule;
1408 }
1409 return NULL;
1410}
1411
1412static bool check_conflicting_actions(u32 action1, u32 action2)
1413{
1414 u32 xored_actions = action1 ^ action2;
1415
1416
1417 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1418 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1419 return false;
1420
1421 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1422 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1423 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1424 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1425 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1426 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1427 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1428 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1429 return true;
1430
1431 return false;
1432}
1433
1434static int check_conflicting_ftes(struct fs_fte *fte,
1435 const struct mlx5_flow_context *flow_context,
1436 const struct mlx5_flow_act *flow_act)
1437{
1438 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1439 mlx5_core_warn(get_dev(&fte->node),
1440 "Found two FTEs with conflicting actions\n");
1441 return -EEXIST;
1442 }
1443
1444 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1445 fte->flow_context.flow_tag != flow_context->flow_tag) {
1446 mlx5_core_warn(get_dev(&fte->node),
1447 "FTE flow tag %u already exists with different flow tag %u\n",
1448 fte->flow_context.flow_tag,
1449 flow_context->flow_tag);
1450 return -EEXIST;
1451 }
1452
1453 return 0;
1454}
1455
1456static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1457 const struct mlx5_flow_spec *spec,
1458 struct mlx5_flow_act *flow_act,
1459 struct mlx5_flow_destination *dest,
1460 int dest_num,
1461 struct fs_fte *fte)
1462{
1463 struct mlx5_flow_handle *handle;
1464 int old_action;
1465 int i;
1466 int ret;
1467
1468 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1469 if (ret)
1470 return ERR_PTR(ret);
1471
1472 old_action = fte->action.action;
1473 fte->action.action |= flow_act->action;
1474 handle = add_rule_fte(fte, fg, dest, dest_num,
1475 old_action != flow_act->action);
1476 if (IS_ERR(handle)) {
1477 fte->action.action = old_action;
1478 return handle;
1479 }
1480 trace_mlx5_fs_set_fte(fte, false);
1481
1482 for (i = 0; i < handle->num_rules; i++) {
1483 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1484 tree_add_node(&handle->rule[i]->node, &fte->node);
1485 trace_mlx5_fs_add_rule(handle->rule[i]);
1486 }
1487 }
1488 return handle;
1489}
1490
1491static bool counter_is_valid(u32 action)
1492{
1493 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1494 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1495}
1496
1497static bool dest_is_valid(struct mlx5_flow_destination *dest,
1498 u32 action,
1499 struct mlx5_flow_table *ft)
1500{
1501 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1502 return counter_is_valid(action);
1503
1504 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1505 return true;
1506
1507 if (!dest || ((dest->type ==
1508 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1509 (dest->ft->level <= ft->level)))
1510 return false;
1511 return true;
1512}
1513
1514struct match_list {
1515 struct list_head list;
1516 struct mlx5_flow_group *g;
1517};
1518
1519struct match_list_head {
1520 struct list_head list;
1521 struct match_list first;
1522};
1523
1524static void free_match_list(struct match_list_head *head)
1525{
1526 if (!list_empty(&head->list)) {
1527 struct match_list *iter, *match_tmp;
1528
1529 list_del(&head->first.list);
1530 tree_put_node(&head->first.g->node, false);
1531 list_for_each_entry_safe(iter, match_tmp, &head->list,
1532 list) {
1533 tree_put_node(&iter->g->node, false);
1534 list_del(&iter->list);
1535 kfree(iter);
1536 }
1537 }
1538}
1539
1540static int build_match_list(struct match_list_head *match_head,
1541 struct mlx5_flow_table *ft,
1542 const struct mlx5_flow_spec *spec)
1543{
1544 struct rhlist_head *tmp, *list;
1545 struct mlx5_flow_group *g;
1546 int err = 0;
1547
1548 rcu_read_lock();
1549 INIT_LIST_HEAD(&match_head->list);
1550
1551 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1552
1553 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1554 struct match_list *curr_match;
1555
1556 if (likely(list_empty(&match_head->list))) {
1557 if (!tree_get_node(&g->node))
1558 continue;
1559 match_head->first.g = g;
1560 list_add_tail(&match_head->first.list,
1561 &match_head->list);
1562 continue;
1563 }
1564
1565 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1566 if (!curr_match) {
1567 free_match_list(match_head);
1568 err = -ENOMEM;
1569 goto out;
1570 }
1571 if (!tree_get_node(&g->node)) {
1572 kfree(curr_match);
1573 continue;
1574 }
1575 curr_match->g = g;
1576 list_add_tail(&curr_match->list, &match_head->list);
1577 }
1578out:
1579 rcu_read_unlock();
1580 return err;
1581}
1582
1583static u64 matched_fgs_get_version(struct list_head *match_head)
1584{
1585 struct match_list *iter;
1586 u64 version = 0;
1587
1588 list_for_each_entry(iter, match_head, list)
1589 version += (u64)atomic_read(&iter->g->node.version);
1590 return version;
1591}
1592
1593static struct fs_fte *
1594lookup_fte_locked(struct mlx5_flow_group *g,
1595 const u32 *match_value,
1596 bool take_write)
1597{
1598 struct fs_fte *fte_tmp;
1599
1600 if (take_write)
1601 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1602 else
1603 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1604 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1605 rhash_fte);
1606 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1607 fte_tmp = NULL;
1608 goto out;
1609 }
1610 if (!fte_tmp->node.active) {
1611 tree_put_node(&fte_tmp->node, false);
1612 fte_tmp = NULL;
1613 goto out;
1614 }
1615
1616 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1617out:
1618 if (take_write)
1619 up_write_ref_node(&g->node, false);
1620 else
1621 up_read_ref_node(&g->node);
1622 return fte_tmp;
1623}
1624
1625static struct mlx5_flow_handle *
1626try_add_to_existing_fg(struct mlx5_flow_table *ft,
1627 struct list_head *match_head,
1628 const struct mlx5_flow_spec *spec,
1629 struct mlx5_flow_act *flow_act,
1630 struct mlx5_flow_destination *dest,
1631 int dest_num,
1632 int ft_version)
1633{
1634 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1635 struct mlx5_flow_group *g;
1636 struct mlx5_flow_handle *rule;
1637 struct match_list *iter;
1638 bool take_write = false;
1639 struct fs_fte *fte;
1640 u64 version;
1641 int err;
1642
1643 fte = alloc_fte(ft, spec, flow_act);
1644 if (IS_ERR(fte))
1645 return ERR_PTR(-ENOMEM);
1646
1647search_again_locked:
1648 version = matched_fgs_get_version(match_head);
1649 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1650 goto skip_search;
1651
1652 list_for_each_entry(iter, match_head, list) {
1653 struct fs_fte *fte_tmp;
1654
1655 g = iter->g;
1656 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1657 if (!fte_tmp)
1658 continue;
1659 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1660 up_write_ref_node(&fte_tmp->node, false);
1661 tree_put_node(&fte_tmp->node, false);
1662 kmem_cache_free(steering->ftes_cache, fte);
1663 return rule;
1664 }
1665
1666skip_search:
1667
1668
1669
1670
1671
1672
1673
1674 if (atomic_read(&ft->node.version) != ft_version) {
1675 rule = ERR_PTR(-EAGAIN);
1676 goto out;
1677 }
1678
1679
1680
1681
1682 if (version != matched_fgs_get_version(match_head)) {
1683 take_write = true;
1684 goto search_again_locked;
1685 }
1686
1687 list_for_each_entry(iter, match_head, list) {
1688 g = iter->g;
1689
1690 if (!g->node.active)
1691 continue;
1692
1693 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1694
1695 err = insert_fte(g, fte);
1696 if (err) {
1697 up_write_ref_node(&g->node, false);
1698 if (err == -ENOSPC)
1699 continue;
1700 kmem_cache_free(steering->ftes_cache, fte);
1701 return ERR_PTR(err);
1702 }
1703
1704 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1705 up_write_ref_node(&g->node, false);
1706 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1707 up_write_ref_node(&fte->node, false);
1708 tree_put_node(&fte->node, false);
1709 return rule;
1710 }
1711 rule = ERR_PTR(-ENOENT);
1712out:
1713 kmem_cache_free(steering->ftes_cache, fte);
1714 return rule;
1715}
1716
1717static struct mlx5_flow_handle *
1718_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1719 const struct mlx5_flow_spec *spec,
1720 struct mlx5_flow_act *flow_act,
1721 struct mlx5_flow_destination *dest,
1722 int dest_num)
1723
1724{
1725 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1726 struct mlx5_flow_group *g;
1727 struct mlx5_flow_handle *rule;
1728 struct match_list_head match_head;
1729 bool take_write = false;
1730 struct fs_fte *fte;
1731 int version;
1732 int err;
1733 int i;
1734
1735 if (!check_valid_spec(spec))
1736 return ERR_PTR(-EINVAL);
1737
1738 for (i = 0; i < dest_num; i++) {
1739 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1740 return ERR_PTR(-EINVAL);
1741 }
1742 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1743search_again_locked:
1744 version = atomic_read(&ft->node.version);
1745
1746
1747 err = build_match_list(&match_head, ft, spec);
1748 if (err) {
1749 if (take_write)
1750 up_write_ref_node(&ft->node, false);
1751 else
1752 up_read_ref_node(&ft->node);
1753 return ERR_PTR(err);
1754 }
1755
1756 if (!take_write)
1757 up_read_ref_node(&ft->node);
1758
1759 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1760 dest_num, version);
1761 free_match_list(&match_head);
1762 if (!IS_ERR(rule) ||
1763 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1764 if (take_write)
1765 up_write_ref_node(&ft->node, false);
1766 return rule;
1767 }
1768
1769 if (!take_write) {
1770 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1771 take_write = true;
1772 }
1773
1774 if (PTR_ERR(rule) == -EAGAIN ||
1775 version != atomic_read(&ft->node.version))
1776 goto search_again_locked;
1777
1778 g = alloc_auto_flow_group(ft, spec);
1779 if (IS_ERR(g)) {
1780 rule = ERR_CAST(g);
1781 up_write_ref_node(&ft->node, false);
1782 return rule;
1783 }
1784
1785 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1786 up_write_ref_node(&ft->node, false);
1787
1788 err = create_auto_flow_group(ft, g);
1789 if (err)
1790 goto err_release_fg;
1791
1792 fte = alloc_fte(ft, spec, flow_act);
1793 if (IS_ERR(fte)) {
1794 err = PTR_ERR(fte);
1795 goto err_release_fg;
1796 }
1797
1798 err = insert_fte(g, fte);
1799 if (err) {
1800 kmem_cache_free(steering->ftes_cache, fte);
1801 goto err_release_fg;
1802 }
1803
1804 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1805 up_write_ref_node(&g->node, false);
1806 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1807 up_write_ref_node(&fte->node, false);
1808 tree_put_node(&fte->node, false);
1809 tree_put_node(&g->node, false);
1810 return rule;
1811
1812err_release_fg:
1813 up_write_ref_node(&g->node, false);
1814 tree_put_node(&g->node, false);
1815 return ERR_PTR(err);
1816}
1817
1818static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1819{
1820 return ((ft->type == FS_FT_NIC_RX) &&
1821 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1822}
1823
1824struct mlx5_flow_handle *
1825mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1826 const struct mlx5_flow_spec *spec,
1827 struct mlx5_flow_act *flow_act,
1828 struct mlx5_flow_destination *dest,
1829 int num_dest)
1830{
1831 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1832 struct mlx5_flow_destination gen_dest = {};
1833 struct mlx5_flow_table *next_ft = NULL;
1834 struct mlx5_flow_handle *handle = NULL;
1835 u32 sw_action = flow_act->action;
1836 struct fs_prio *prio;
1837
1838 fs_get_obj(prio, ft->node.parent);
1839 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1840 if (!fwd_next_prio_supported(ft))
1841 return ERR_PTR(-EOPNOTSUPP);
1842 if (num_dest)
1843 return ERR_PTR(-EINVAL);
1844 mutex_lock(&root->chain_lock);
1845 next_ft = find_next_chained_ft(prio);
1846 if (next_ft) {
1847 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1848 gen_dest.ft = next_ft;
1849 dest = &gen_dest;
1850 num_dest = 1;
1851 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1852 } else {
1853 mutex_unlock(&root->chain_lock);
1854 return ERR_PTR(-EOPNOTSUPP);
1855 }
1856 }
1857
1858 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1859
1860 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1861 if (!IS_ERR_OR_NULL(handle) &&
1862 (list_empty(&handle->rule[0]->next_ft))) {
1863 mutex_lock(&next_ft->lock);
1864 list_add(&handle->rule[0]->next_ft,
1865 &next_ft->fwd_rules);
1866 mutex_unlock(&next_ft->lock);
1867 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1868 }
1869 mutex_unlock(&root->chain_lock);
1870 }
1871 return handle;
1872}
1873EXPORT_SYMBOL(mlx5_add_flow_rules);
1874
1875void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1876{
1877 struct fs_fte *fte;
1878 int i;
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 fs_get_obj(fte, handle->rule[0]->node.parent);
1893 down_write_ref_node(&fte->node, false);
1894 for (i = handle->num_rules - 1; i >= 0; i--)
1895 tree_remove_node(&handle->rule[i]->node, true);
1896 if (fte->modify_mask && fte->dests_size) {
1897 modify_fte(fte);
1898 up_write_ref_node(&fte->node, false);
1899 } else {
1900 del_hw_fte(&fte->node);
1901 up_write(&fte->node.lock);
1902 tree_put_node(&fte->node, false);
1903 }
1904 kfree(handle);
1905}
1906EXPORT_SYMBOL(mlx5_del_flow_rules);
1907
1908
1909static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1910{
1911 struct fs_prio *prio;
1912
1913 fs_get_obj(prio, ft->node.parent);
1914
1915 if (!list_is_last(&ft->node.list, &prio->node.children))
1916 return list_next_entry(ft, node.list);
1917 return find_next_chained_ft(prio);
1918}
1919
1920static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1921{
1922 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1923 struct mlx5_ft_underlay_qp *uqp;
1924 struct mlx5_flow_table *new_root_ft = NULL;
1925 int err = 0;
1926 u32 qpn;
1927
1928 if (root->root_ft != ft)
1929 return 0;
1930
1931 new_root_ft = find_next_ft(ft);
1932 if (!new_root_ft) {
1933 root->root_ft = NULL;
1934 return 0;
1935 }
1936
1937 if (list_empty(&root->underlay_qpns)) {
1938
1939 qpn = 0;
1940 err = root->cmds->update_root_ft(root, new_root_ft,
1941 qpn, false);
1942 } else {
1943 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1944 qpn = uqp->qpn;
1945 err = root->cmds->update_root_ft(root,
1946 new_root_ft, qpn,
1947 false);
1948 if (err)
1949 break;
1950 }
1951 }
1952
1953 if (err)
1954 mlx5_core_warn(root->dev,
1955 "Update root flow table of id(%u) qpn(%d) failed\n",
1956 ft->id, qpn);
1957 else
1958 root->root_ft = new_root_ft;
1959
1960 return 0;
1961}
1962
1963
1964
1965
1966static int disconnect_flow_table(struct mlx5_flow_table *ft)
1967{
1968 struct mlx5_core_dev *dev = get_dev(&ft->node);
1969 struct mlx5_flow_table *next_ft;
1970 struct fs_prio *prio;
1971 int err = 0;
1972
1973 err = update_root_ft_destroy(ft);
1974 if (err)
1975 return err;
1976
1977 fs_get_obj(prio, ft->node.parent);
1978 if (!(list_first_entry(&prio->node.children,
1979 struct mlx5_flow_table,
1980 node.list) == ft))
1981 return 0;
1982
1983 next_ft = find_next_chained_ft(prio);
1984 err = connect_fwd_rules(dev, next_ft, ft);
1985 if (err)
1986 return err;
1987
1988 err = connect_prev_fts(dev, next_ft, prio);
1989 if (err)
1990 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1991 ft->id);
1992 return err;
1993}
1994
1995int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
1996{
1997 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1998 int err = 0;
1999
2000 mutex_lock(&root->chain_lock);
2001 err = disconnect_flow_table(ft);
2002 if (err) {
2003 mutex_unlock(&root->chain_lock);
2004 return err;
2005 }
2006 if (tree_remove_node(&ft->node, false))
2007 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2008 ft->id);
2009 mutex_unlock(&root->chain_lock);
2010
2011 return err;
2012}
2013EXPORT_SYMBOL(mlx5_destroy_flow_table);
2014
2015void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2016{
2017 if (tree_remove_node(&fg->node, false))
2018 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2019 fg->id);
2020}
2021
2022struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2023 int n)
2024{
2025 struct mlx5_flow_steering *steering = dev->priv.steering;
2026
2027 if (!steering || !steering->fdb_sub_ns)
2028 return NULL;
2029
2030 return steering->fdb_sub_ns[n];
2031}
2032EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2033
2034struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2035 enum mlx5_flow_namespace_type type)
2036{
2037 struct mlx5_flow_steering *steering = dev->priv.steering;
2038 struct mlx5_flow_root_namespace *root_ns;
2039 int prio = 0;
2040 struct fs_prio *fs_prio;
2041 struct mlx5_flow_namespace *ns;
2042
2043 if (!steering)
2044 return NULL;
2045
2046 switch (type) {
2047 case MLX5_FLOW_NAMESPACE_FDB:
2048 if (steering->fdb_root_ns)
2049 return &steering->fdb_root_ns->ns;
2050 return NULL;
2051 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2052 if (steering->sniffer_rx_root_ns)
2053 return &steering->sniffer_rx_root_ns->ns;
2054 return NULL;
2055 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2056 if (steering->sniffer_tx_root_ns)
2057 return &steering->sniffer_tx_root_ns->ns;
2058 return NULL;
2059 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2060 if (steering->rdma_rx_root_ns)
2061 return &steering->rdma_rx_root_ns->ns;
2062 return NULL;
2063 default:
2064 break;
2065 }
2066
2067 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2068 root_ns = steering->egress_root_ns;
2069 } else {
2070 root_ns = steering->root_ns;
2071 prio = type;
2072 }
2073
2074 if (!root_ns)
2075 return NULL;
2076
2077 fs_prio = find_prio(&root_ns->ns, prio);
2078 if (!fs_prio)
2079 return NULL;
2080
2081 ns = list_first_entry(&fs_prio->node.children,
2082 typeof(*ns),
2083 node.list);
2084
2085 return ns;
2086}
2087EXPORT_SYMBOL(mlx5_get_flow_namespace);
2088
2089struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2090 enum mlx5_flow_namespace_type type,
2091 int vport)
2092{
2093 struct mlx5_flow_steering *steering = dev->priv.steering;
2094
2095 if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
2096 return NULL;
2097
2098 switch (type) {
2099 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2100 if (steering->esw_egress_root_ns &&
2101 steering->esw_egress_root_ns[vport])
2102 return &steering->esw_egress_root_ns[vport]->ns;
2103 else
2104 return NULL;
2105 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2106 if (steering->esw_ingress_root_ns &&
2107 steering->esw_ingress_root_ns[vport])
2108 return &steering->esw_ingress_root_ns[vport]->ns;
2109 else
2110 return NULL;
2111 default:
2112 return NULL;
2113 }
2114}
2115
2116static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2117 unsigned int prio,
2118 int num_levels,
2119 enum fs_node_type type)
2120{
2121 struct fs_prio *fs_prio;
2122
2123 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2124 if (!fs_prio)
2125 return ERR_PTR(-ENOMEM);
2126
2127 fs_prio->node.type = type;
2128 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2129 tree_add_node(&fs_prio->node, &ns->node);
2130 fs_prio->num_levels = num_levels;
2131 fs_prio->prio = prio;
2132 list_add_tail(&fs_prio->node.list, &ns->node.children);
2133
2134 return fs_prio;
2135}
2136
2137static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2138 unsigned int prio,
2139 int num_levels)
2140{
2141 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2142}
2143
2144static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2145 unsigned int prio, int num_levels)
2146{
2147 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2148}
2149
2150static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2151 *ns)
2152{
2153 ns->node.type = FS_TYPE_NAMESPACE;
2154
2155 return ns;
2156}
2157
2158static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2159{
2160 struct mlx5_flow_namespace *ns;
2161
2162 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2163 if (!ns)
2164 return ERR_PTR(-ENOMEM);
2165
2166 fs_init_namespace(ns);
2167 tree_init_node(&ns->node, NULL, del_sw_ns);
2168 tree_add_node(&ns->node, &prio->node);
2169 list_add_tail(&ns->node.list, &prio->node.children);
2170
2171 return ns;
2172}
2173
2174static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2175 struct init_tree_node *prio_metadata)
2176{
2177 struct fs_prio *fs_prio;
2178 int i;
2179
2180 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2181 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2182 if (IS_ERR(fs_prio))
2183 return PTR_ERR(fs_prio);
2184 }
2185 return 0;
2186}
2187
2188#define FLOW_TABLE_BIT_SZ 1
2189#define GET_FLOW_TABLE_CAP(dev, offset) \
2190 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2191 offset / 32)) >> \
2192 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2193static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2194{
2195 int i;
2196
2197 for (i = 0; i < caps->arr_sz; i++) {
2198 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2199 return false;
2200 }
2201 return true;
2202}
2203
2204static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2205 struct init_tree_node *init_node,
2206 struct fs_node *fs_parent_node,
2207 struct init_tree_node *init_parent_node,
2208 int prio)
2209{
2210 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2211 flow_table_properties_nic_receive.
2212 max_ft_level);
2213 struct mlx5_flow_namespace *fs_ns;
2214 struct fs_prio *fs_prio;
2215 struct fs_node *base;
2216 int i;
2217 int err;
2218
2219 if (init_node->type == FS_TYPE_PRIO) {
2220 if ((init_node->min_ft_level > max_ft_level) ||
2221 !has_required_caps(steering->dev, &init_node->caps))
2222 return 0;
2223
2224 fs_get_obj(fs_ns, fs_parent_node);
2225 if (init_node->num_leaf_prios)
2226 return create_leaf_prios(fs_ns, prio, init_node);
2227 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2228 if (IS_ERR(fs_prio))
2229 return PTR_ERR(fs_prio);
2230 base = &fs_prio->node;
2231 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2232 fs_get_obj(fs_prio, fs_parent_node);
2233 fs_ns = fs_create_namespace(fs_prio);
2234 if (IS_ERR(fs_ns))
2235 return PTR_ERR(fs_ns);
2236 base = &fs_ns->node;
2237 } else {
2238 return -EINVAL;
2239 }
2240 prio = 0;
2241 for (i = 0; i < init_node->ar_size; i++) {
2242 err = init_root_tree_recursive(steering, &init_node->children[i],
2243 base, init_node, prio);
2244 if (err)
2245 return err;
2246 if (init_node->children[i].type == FS_TYPE_PRIO &&
2247 init_node->children[i].num_leaf_prios) {
2248 prio += init_node->children[i].num_leaf_prios;
2249 }
2250 }
2251
2252 return 0;
2253}
2254
2255static int init_root_tree(struct mlx5_flow_steering *steering,
2256 struct init_tree_node *init_node,
2257 struct fs_node *fs_parent_node)
2258{
2259 int i;
2260 struct mlx5_flow_namespace *fs_ns;
2261 int err;
2262
2263 fs_get_obj(fs_ns, fs_parent_node);
2264 for (i = 0; i < init_node->ar_size; i++) {
2265 err = init_root_tree_recursive(steering, &init_node->children[i],
2266 &fs_ns->node,
2267 init_node, i);
2268 if (err)
2269 return err;
2270 }
2271 return 0;
2272}
2273
2274static struct mlx5_flow_root_namespace
2275*create_root_ns(struct mlx5_flow_steering *steering,
2276 enum fs_flow_table_type table_type)
2277{
2278 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2279 struct mlx5_flow_root_namespace *root_ns;
2280 struct mlx5_flow_namespace *ns;
2281
2282 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2283 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2284 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2285
2286
2287 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2288 if (!root_ns)
2289 return NULL;
2290
2291 root_ns->dev = steering->dev;
2292 root_ns->table_type = table_type;
2293 root_ns->cmds = cmds;
2294
2295 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2296
2297 ns = &root_ns->ns;
2298 fs_init_namespace(ns);
2299 mutex_init(&root_ns->chain_lock);
2300 tree_init_node(&ns->node, NULL, NULL);
2301 tree_add_node(&ns->node, NULL);
2302
2303 return root_ns;
2304}
2305
2306static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2307
2308static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2309{
2310 struct fs_prio *prio;
2311
2312 fs_for_each_prio(prio, ns) {
2313
2314 set_prio_attrs_in_prio(prio, acc_level);
2315 acc_level += prio->num_levels;
2316 }
2317 return acc_level;
2318}
2319
2320static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2321{
2322 struct mlx5_flow_namespace *ns;
2323 int acc_level_ns = acc_level;
2324
2325 prio->start_level = acc_level;
2326 fs_for_each_ns(ns, prio)
2327
2328 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2329 if (!prio->num_levels)
2330 prio->num_levels = acc_level_ns - prio->start_level;
2331 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2332}
2333
2334static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2335{
2336 struct mlx5_flow_namespace *ns = &root_ns->ns;
2337 struct fs_prio *prio;
2338 int start_level = 0;
2339
2340 fs_for_each_prio(prio, ns) {
2341 set_prio_attrs_in_prio(prio, start_level);
2342 start_level += prio->num_levels;
2343 }
2344}
2345
2346#define ANCHOR_PRIO 0
2347#define ANCHOR_SIZE 1
2348#define ANCHOR_LEVEL 0
2349static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2350{
2351 struct mlx5_flow_namespace *ns = NULL;
2352 struct mlx5_flow_table_attr ft_attr = {};
2353 struct mlx5_flow_table *ft;
2354
2355 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2356 if (WARN_ON(!ns))
2357 return -EINVAL;
2358
2359 ft_attr.max_fte = ANCHOR_SIZE;
2360 ft_attr.level = ANCHOR_LEVEL;
2361 ft_attr.prio = ANCHOR_PRIO;
2362
2363 ft = mlx5_create_flow_table(ns, &ft_attr);
2364 if (IS_ERR(ft)) {
2365 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2366 return PTR_ERR(ft);
2367 }
2368 return 0;
2369}
2370
2371static int init_root_ns(struct mlx5_flow_steering *steering)
2372{
2373 int err;
2374
2375 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2376 if (!steering->root_ns)
2377 return -ENOMEM;
2378
2379 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2380 if (err)
2381 goto out_err;
2382
2383 set_prio_attrs(steering->root_ns);
2384 err = create_anchor_flow_table(steering);
2385 if (err)
2386 goto out_err;
2387
2388 return 0;
2389
2390out_err:
2391 cleanup_root_ns(steering->root_ns);
2392 steering->root_ns = NULL;
2393 return err;
2394}
2395
2396static void clean_tree(struct fs_node *node)
2397{
2398 if (node) {
2399 struct fs_node *iter;
2400 struct fs_node *temp;
2401
2402 tree_get_node(node);
2403 list_for_each_entry_safe(iter, temp, &node->children, list)
2404 clean_tree(iter);
2405 tree_put_node(node, false);
2406 tree_remove_node(node, false);
2407 }
2408}
2409
2410static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2411{
2412 if (!root_ns)
2413 return;
2414
2415 clean_tree(&root_ns->ns.node);
2416}
2417
2418static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2419{
2420 struct mlx5_flow_steering *steering = dev->priv.steering;
2421 int i;
2422
2423 if (!steering->esw_egress_root_ns)
2424 return;
2425
2426 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2427 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2428
2429 kfree(steering->esw_egress_root_ns);
2430 steering->esw_egress_root_ns = NULL;
2431}
2432
2433static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2434{
2435 struct mlx5_flow_steering *steering = dev->priv.steering;
2436 int i;
2437
2438 if (!steering->esw_ingress_root_ns)
2439 return;
2440
2441 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2442 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2443
2444 kfree(steering->esw_ingress_root_ns);
2445 steering->esw_ingress_root_ns = NULL;
2446}
2447
2448void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2449{
2450 struct mlx5_flow_steering *steering = dev->priv.steering;
2451
2452 cleanup_root_ns(steering->root_ns);
2453 cleanup_egress_acls_root_ns(dev);
2454 cleanup_ingress_acls_root_ns(dev);
2455 cleanup_root_ns(steering->fdb_root_ns);
2456 steering->fdb_root_ns = NULL;
2457 kfree(steering->fdb_sub_ns);
2458 steering->fdb_sub_ns = NULL;
2459 cleanup_root_ns(steering->sniffer_rx_root_ns);
2460 cleanup_root_ns(steering->sniffer_tx_root_ns);
2461 cleanup_root_ns(steering->rdma_rx_root_ns);
2462 cleanup_root_ns(steering->egress_root_ns);
2463 mlx5_cleanup_fc_stats(dev);
2464 kmem_cache_destroy(steering->ftes_cache);
2465 kmem_cache_destroy(steering->fgs_cache);
2466 kfree(steering);
2467}
2468
2469static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2470{
2471 struct fs_prio *prio;
2472
2473 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2474 if (!steering->sniffer_tx_root_ns)
2475 return -ENOMEM;
2476
2477
2478 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2479 return PTR_ERR_OR_ZERO(prio);
2480}
2481
2482static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2483{
2484 struct fs_prio *prio;
2485
2486 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2487 if (!steering->sniffer_rx_root_ns)
2488 return -ENOMEM;
2489
2490
2491 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2492 return PTR_ERR_OR_ZERO(prio);
2493}
2494
2495static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2496{
2497 struct fs_prio *prio;
2498
2499 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2500 if (!steering->rdma_rx_root_ns)
2501 return -ENOMEM;
2502
2503 steering->rdma_rx_root_ns->def_miss_action =
2504 MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
2505
2506
2507 prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
2508 return PTR_ERR_OR_ZERO(prio);
2509}
2510static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2511{
2512 struct mlx5_flow_namespace *ns;
2513 struct fs_prio *maj_prio;
2514 struct fs_prio *min_prio;
2515 int levels;
2516 int chain;
2517 int prio;
2518 int err;
2519
2520 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2521 if (!steering->fdb_root_ns)
2522 return -ENOMEM;
2523
2524 steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
2525 (FDB_MAX_CHAIN + 1), GFP_KERNEL);
2526 if (!steering->fdb_sub_ns)
2527 return -ENOMEM;
2528
2529 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2530 1);
2531 if (IS_ERR(maj_prio)) {
2532 err = PTR_ERR(maj_prio);
2533 goto out_err;
2534 }
2535
2536 levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
2537 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2538 FDB_FAST_PATH,
2539 levels);
2540 if (IS_ERR(maj_prio)) {
2541 err = PTR_ERR(maj_prio);
2542 goto out_err;
2543 }
2544
2545 for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
2546 ns = fs_create_namespace(maj_prio);
2547 if (IS_ERR(ns)) {
2548 err = PTR_ERR(ns);
2549 goto out_err;
2550 }
2551
2552 for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
2553 min_prio = fs_create_prio(ns, prio, 2);
2554 if (IS_ERR(min_prio)) {
2555 err = PTR_ERR(min_prio);
2556 goto out_err;
2557 }
2558 }
2559
2560 steering->fdb_sub_ns[chain] = ns;
2561 }
2562
2563 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2564 if (IS_ERR(maj_prio)) {
2565 err = PTR_ERR(maj_prio);
2566 goto out_err;
2567 }
2568
2569 set_prio_attrs(steering->fdb_root_ns);
2570 return 0;
2571
2572out_err:
2573 cleanup_root_ns(steering->fdb_root_ns);
2574 kfree(steering->fdb_sub_ns);
2575 steering->fdb_sub_ns = NULL;
2576 steering->fdb_root_ns = NULL;
2577 return err;
2578}
2579
2580static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2581{
2582 struct fs_prio *prio;
2583
2584 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2585 if (!steering->esw_egress_root_ns[vport])
2586 return -ENOMEM;
2587
2588
2589 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2590 return PTR_ERR_OR_ZERO(prio);
2591}
2592
2593static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2594{
2595 struct fs_prio *prio;
2596
2597 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2598 if (!steering->esw_ingress_root_ns[vport])
2599 return -ENOMEM;
2600
2601
2602 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2603 return PTR_ERR_OR_ZERO(prio);
2604}
2605
2606static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2607{
2608 struct mlx5_flow_steering *steering = dev->priv.steering;
2609 int total_vports = mlx5_eswitch_get_total_vports(dev);
2610 int err;
2611 int i;
2612
2613 steering->esw_egress_root_ns =
2614 kcalloc(total_vports,
2615 sizeof(*steering->esw_egress_root_ns),
2616 GFP_KERNEL);
2617 if (!steering->esw_egress_root_ns)
2618 return -ENOMEM;
2619
2620 for (i = 0; i < total_vports; i++) {
2621 err = init_egress_acl_root_ns(steering, i);
2622 if (err)
2623 goto cleanup_root_ns;
2624 }
2625
2626 return 0;
2627
2628cleanup_root_ns:
2629 for (i--; i >= 0; i--)
2630 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2631 kfree(steering->esw_egress_root_ns);
2632 steering->esw_egress_root_ns = NULL;
2633 return err;
2634}
2635
2636static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2637{
2638 struct mlx5_flow_steering *steering = dev->priv.steering;
2639 int total_vports = mlx5_eswitch_get_total_vports(dev);
2640 int err;
2641 int i;
2642
2643 steering->esw_ingress_root_ns =
2644 kcalloc(total_vports,
2645 sizeof(*steering->esw_ingress_root_ns),
2646 GFP_KERNEL);
2647 if (!steering->esw_ingress_root_ns)
2648 return -ENOMEM;
2649
2650 for (i = 0; i < total_vports; i++) {
2651 err = init_ingress_acl_root_ns(steering, i);
2652 if (err)
2653 goto cleanup_root_ns;
2654 }
2655
2656 return 0;
2657
2658cleanup_root_ns:
2659 for (i--; i >= 0; i--)
2660 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2661 kfree(steering->esw_ingress_root_ns);
2662 steering->esw_ingress_root_ns = NULL;
2663 return err;
2664}
2665
2666static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2667{
2668 int err;
2669
2670 steering->egress_root_ns = create_root_ns(steering,
2671 FS_FT_NIC_TX);
2672 if (!steering->egress_root_ns)
2673 return -ENOMEM;
2674
2675 err = init_root_tree(steering, &egress_root_fs,
2676 &steering->egress_root_ns->ns.node);
2677 if (err)
2678 goto cleanup;
2679 set_prio_attrs(steering->egress_root_ns);
2680 return 0;
2681cleanup:
2682 cleanup_root_ns(steering->egress_root_ns);
2683 steering->egress_root_ns = NULL;
2684 return err;
2685}
2686
2687int mlx5_init_fs(struct mlx5_core_dev *dev)
2688{
2689 struct mlx5_flow_steering *steering;
2690 int err = 0;
2691
2692 err = mlx5_init_fc_stats(dev);
2693 if (err)
2694 return err;
2695
2696 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2697 if (!steering)
2698 return -ENOMEM;
2699 steering->dev = dev;
2700 dev->priv.steering = steering;
2701
2702 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2703 sizeof(struct mlx5_flow_group), 0,
2704 0, NULL);
2705 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2706 0, NULL);
2707 if (!steering->ftes_cache || !steering->fgs_cache) {
2708 err = -ENOMEM;
2709 goto err;
2710 }
2711
2712 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2713 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2714 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2715 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2716 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2717 err = init_root_ns(steering);
2718 if (err)
2719 goto err;
2720 }
2721
2722 if (MLX5_ESWITCH_MANAGER(dev)) {
2723 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2724 err = init_fdb_root_ns(steering);
2725 if (err)
2726 goto err;
2727 }
2728 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2729 err = init_egress_acls_root_ns(dev);
2730 if (err)
2731 goto err;
2732 }
2733 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2734 err = init_ingress_acls_root_ns(dev);
2735 if (err)
2736 goto err;
2737 }
2738 }
2739
2740 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2741 err = init_sniffer_rx_root_ns(steering);
2742 if (err)
2743 goto err;
2744 }
2745
2746 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2747 err = init_sniffer_tx_root_ns(steering);
2748 if (err)
2749 goto err;
2750 }
2751
2752 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2753 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
2754 err = init_rdma_rx_root_ns(steering);
2755 if (err)
2756 goto err;
2757 }
2758
2759 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
2760 err = init_egress_root_ns(steering);
2761 if (err)
2762 goto err;
2763 }
2764
2765 return 0;
2766err:
2767 mlx5_cleanup_fs(dev);
2768 return err;
2769}
2770
2771int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2772{
2773 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2774 struct mlx5_ft_underlay_qp *new_uqp;
2775 int err = 0;
2776
2777 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2778 if (!new_uqp)
2779 return -ENOMEM;
2780
2781 mutex_lock(&root->chain_lock);
2782
2783 if (!root->root_ft) {
2784 err = -EINVAL;
2785 goto update_ft_fail;
2786 }
2787
2788 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2789 false);
2790 if (err) {
2791 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2792 underlay_qpn, err);
2793 goto update_ft_fail;
2794 }
2795
2796 new_uqp->qpn = underlay_qpn;
2797 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2798
2799 mutex_unlock(&root->chain_lock);
2800
2801 return 0;
2802
2803update_ft_fail:
2804 mutex_unlock(&root->chain_lock);
2805 kfree(new_uqp);
2806 return err;
2807}
2808EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2809
2810int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2811{
2812 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2813 struct mlx5_ft_underlay_qp *uqp;
2814 bool found = false;
2815 int err = 0;
2816
2817 mutex_lock(&root->chain_lock);
2818 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2819 if (uqp->qpn == underlay_qpn) {
2820 found = true;
2821 break;
2822 }
2823 }
2824
2825 if (!found) {
2826 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2827 underlay_qpn);
2828 err = -EINVAL;
2829 goto out;
2830 }
2831
2832 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2833 true);
2834 if (err)
2835 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2836 underlay_qpn, err);
2837
2838 list_del(&uqp->list);
2839 mutex_unlock(&root->chain_lock);
2840 kfree(uqp);
2841
2842 return 0;
2843
2844out:
2845 mutex_unlock(&root->chain_lock);
2846 return err;
2847}
2848EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
2849