1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/vport.h>
36#include <linux/mlx5/eswitch.h>
37
38#include "mlx5_core.h"
39#include "fs_core.h"
40#include "fs_cmd.h"
41#include "fs_ft_pool.h"
42#include "diag/fs_tracepoint.h"
43#include "accel/ipsec.h"
44#include "fpga/ipsec.h"
45
46#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
49#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
54 .caps = caps_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57}
58
59#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 __VA_ARGS__)\
62
63#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67}
68
69#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 sizeof(long))
71
72#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73
74#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
76
77#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81
82#define FS_CHAINING_CAPS_EGRESS \
83 FS_REQUIRED_CAPS( \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89
90#define FS_CHAINING_CAPS_RDMA_TX \
91 FS_REQUIRED_CAPS( \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
97 .flow_table_modify))
98
99#define LEFTOVERS_NUM_LEVELS 1
100#define LEFTOVERS_NUM_PRIOS 1
101
102#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
104
105#define BY_PASS_PRIO_NUM_LEVELS 1
106#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
107 LEFTOVERS_NUM_PRIOS)
108
109#define ETHTOOL_PRIO_NUM_LEVELS 1
110#define ETHTOOL_NUM_PRIOS 11
111#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
112
113#define KERNEL_NIC_PRIO_NUM_LEVELS 7
114#define KERNEL_NIC_NUM_PRIOS 1
115
116#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
117
118#define KERNEL_NIC_TC_NUM_PRIOS 1
119#define KERNEL_NIC_TC_NUM_LEVELS 2
120
121#define ANCHOR_NUM_LEVELS 1
122#define ANCHOR_NUM_PRIOS 1
123#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
124
125#define OFFLOADS_MAX_FT 2
126#define OFFLOADS_NUM_PRIOS 2
127#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
128
129#define LAG_PRIO_NUM_LEVELS 1
130#define LAG_NUM_PRIOS 1
131#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
132
133#define KERNEL_TX_IPSEC_NUM_PRIOS 1
134#define KERNEL_TX_IPSEC_NUM_LEVELS 1
135#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
136
137struct node_caps {
138 size_t arr_sz;
139 long *caps;
140};
141
142static struct init_tree_node {
143 enum fs_node_type type;
144 struct init_tree_node *children;
145 int ar_size;
146 struct node_caps caps;
147 int min_ft_level;
148 int num_leaf_prios;
149 int prio;
150 int num_levels;
151 enum mlx5_flow_table_miss_action def_miss_action;
152} root_fs = {
153 .type = FS_TYPE_NAMESPACE,
154 .ar_size = 7,
155 .children = (struct init_tree_node[]){
156 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
159 BY_PASS_PRIO_NUM_LEVELS))),
160 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
163 LAG_PRIO_NUM_LEVELS))),
164 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
167 OFFLOADS_MAX_FT))),
168 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
169 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
171 ETHTOOL_PRIO_NUM_LEVELS))),
172 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
173 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
174 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
175 KERNEL_NIC_TC_NUM_LEVELS),
176 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
177 KERNEL_NIC_PRIO_NUM_LEVELS))),
178 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
181 LEFTOVERS_NUM_LEVELS))),
182 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
183 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
185 ANCHOR_NUM_LEVELS))),
186 }
187};
188
189static struct init_tree_node egress_root_fs = {
190 .type = FS_TYPE_NAMESPACE,
191#ifdef CONFIG_MLX5_IPSEC
192 .ar_size = 2,
193#else
194 .ar_size = 1,
195#endif
196 .children = (struct init_tree_node[]) {
197 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
198 FS_CHAINING_CAPS_EGRESS,
199 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
200 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
201 BY_PASS_PRIO_NUM_LEVELS))),
202#ifdef CONFIG_MLX5_IPSEC
203 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
204 FS_CHAINING_CAPS_EGRESS,
205 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
206 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
207 KERNEL_TX_IPSEC_NUM_LEVELS))),
208#endif
209 }
210};
211
212enum {
213 RDMA_RX_COUNTERS_PRIO,
214 RDMA_RX_BYPASS_PRIO,
215 RDMA_RX_KERNEL_PRIO,
216};
217
218#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
219#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
220#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
221
222static struct init_tree_node rdma_rx_root_fs = {
223 .type = FS_TYPE_NAMESPACE,
224 .ar_size = 3,
225 .children = (struct init_tree_node[]) {
226 [RDMA_RX_COUNTERS_PRIO] =
227 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
228 FS_CHAINING_CAPS,
229 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
230 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
231 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
232 [RDMA_RX_BYPASS_PRIO] =
233 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
234 FS_CHAINING_CAPS,
235 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
237 BY_PASS_PRIO_NUM_LEVELS))),
238 [RDMA_RX_KERNEL_PRIO] =
239 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
240 FS_CHAINING_CAPS,
241 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
242 ADD_MULTIPLE_PRIO(1, 1))),
243 }
244};
245
246enum {
247 RDMA_TX_COUNTERS_PRIO,
248 RDMA_TX_BYPASS_PRIO,
249};
250
251#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
252#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
253
254static struct init_tree_node rdma_tx_root_fs = {
255 .type = FS_TYPE_NAMESPACE,
256 .ar_size = 2,
257 .children = (struct init_tree_node[]) {
258 [RDMA_TX_COUNTERS_PRIO] =
259 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
260 FS_CHAINING_CAPS,
261 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
262 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
263 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
264 [RDMA_TX_BYPASS_PRIO] =
265 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
266 FS_CHAINING_CAPS_RDMA_TX,
267 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
268 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
269 BY_PASS_PRIO_NUM_LEVELS))),
270 }
271};
272
273enum fs_i_lock_class {
274 FS_LOCK_GRANDPARENT,
275 FS_LOCK_PARENT,
276 FS_LOCK_CHILD
277};
278
279static const struct rhashtable_params rhash_fte = {
280 .key_len = sizeof_field(struct fs_fte, val),
281 .key_offset = offsetof(struct fs_fte, val),
282 .head_offset = offsetof(struct fs_fte, hash),
283 .automatic_shrinking = true,
284 .min_size = 1,
285};
286
287static const struct rhashtable_params rhash_fg = {
288 .key_len = sizeof_field(struct mlx5_flow_group, mask),
289 .key_offset = offsetof(struct mlx5_flow_group, mask),
290 .head_offset = offsetof(struct mlx5_flow_group, hash),
291 .automatic_shrinking = true,
292 .min_size = 1,
293
294};
295
296static void del_hw_flow_table(struct fs_node *node);
297static void del_hw_flow_group(struct fs_node *node);
298static void del_hw_fte(struct fs_node *node);
299static void del_sw_flow_table(struct fs_node *node);
300static void del_sw_flow_group(struct fs_node *node);
301static void del_sw_fte(struct fs_node *node);
302static void del_sw_prio(struct fs_node *node);
303static void del_sw_ns(struct fs_node *node);
304
305
306
307static void del_sw_hw_rule(struct fs_node *node);
308static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
309 struct mlx5_flow_destination *d2);
310static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
311static struct mlx5_flow_rule *
312find_flow_rule(struct fs_fte *fte,
313 struct mlx5_flow_destination *dest);
314
315static void tree_init_node(struct fs_node *node,
316 void (*del_hw_func)(struct fs_node *),
317 void (*del_sw_func)(struct fs_node *))
318{
319 refcount_set(&node->refcount, 1);
320 INIT_LIST_HEAD(&node->list);
321 INIT_LIST_HEAD(&node->children);
322 init_rwsem(&node->lock);
323 node->del_hw_func = del_hw_func;
324 node->del_sw_func = del_sw_func;
325 node->active = false;
326}
327
328static void tree_add_node(struct fs_node *node, struct fs_node *parent)
329{
330 if (parent)
331 refcount_inc(&parent->refcount);
332 node->parent = parent;
333
334
335 if (!parent)
336 node->root = node;
337 else
338 node->root = parent->root;
339}
340
341static int tree_get_node(struct fs_node *node)
342{
343 return refcount_inc_not_zero(&node->refcount);
344}
345
346static void nested_down_read_ref_node(struct fs_node *node,
347 enum fs_i_lock_class class)
348{
349 if (node) {
350 down_read_nested(&node->lock, class);
351 refcount_inc(&node->refcount);
352 }
353}
354
355static void nested_down_write_ref_node(struct fs_node *node,
356 enum fs_i_lock_class class)
357{
358 if (node) {
359 down_write_nested(&node->lock, class);
360 refcount_inc(&node->refcount);
361 }
362}
363
364static void down_write_ref_node(struct fs_node *node, bool locked)
365{
366 if (node) {
367 if (!locked)
368 down_write(&node->lock);
369 refcount_inc(&node->refcount);
370 }
371}
372
373static void up_read_ref_node(struct fs_node *node)
374{
375 refcount_dec(&node->refcount);
376 up_read(&node->lock);
377}
378
379static void up_write_ref_node(struct fs_node *node, bool locked)
380{
381 refcount_dec(&node->refcount);
382 if (!locked)
383 up_write(&node->lock);
384}
385
386static void tree_put_node(struct fs_node *node, bool locked)
387{
388 struct fs_node *parent_node = node->parent;
389
390 if (refcount_dec_and_test(&node->refcount)) {
391 if (node->del_hw_func)
392 node->del_hw_func(node);
393 if (parent_node) {
394 down_write_ref_node(parent_node, locked);
395 list_del_init(&node->list);
396 }
397 node->del_sw_func(node);
398 if (parent_node)
399 up_write_ref_node(parent_node, locked);
400 node = NULL;
401 }
402 if (!node && parent_node)
403 tree_put_node(parent_node, locked);
404}
405
406static int tree_remove_node(struct fs_node *node, bool locked)
407{
408 if (refcount_read(&node->refcount) > 1) {
409 refcount_dec(&node->refcount);
410 return -EEXIST;
411 }
412 tree_put_node(node, locked);
413 return 0;
414}
415
416static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
417 unsigned int prio)
418{
419 struct fs_prio *iter_prio;
420
421 fs_for_each_prio(iter_prio, ns) {
422 if (iter_prio->prio == prio)
423 return iter_prio;
424 }
425
426 return NULL;
427}
428
429static bool is_fwd_next_action(u32 action)
430{
431 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
432 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
433}
434
435static bool check_valid_spec(const struct mlx5_flow_spec *spec)
436{
437 int i;
438
439 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
440 if (spec->match_value[i] & ~spec->match_criteria[i]) {
441 pr_warn("mlx5_core: match_value differs from match_criteria\n");
442 return false;
443 }
444
445 return true;
446}
447
448struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
449{
450 struct fs_node *root;
451 struct mlx5_flow_namespace *ns;
452
453 root = node->root;
454
455 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
456 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
457 return NULL;
458 }
459
460 ns = container_of(root, struct mlx5_flow_namespace, node);
461 return container_of(ns, struct mlx5_flow_root_namespace, ns);
462}
463
464static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
465{
466 struct mlx5_flow_root_namespace *root = find_root(node);
467
468 if (root)
469 return root->dev->priv.steering;
470 return NULL;
471}
472
473static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
474{
475 struct mlx5_flow_root_namespace *root = find_root(node);
476
477 if (root)
478 return root->dev;
479 return NULL;
480}
481
482static void del_sw_ns(struct fs_node *node)
483{
484 kfree(node);
485}
486
487static void del_sw_prio(struct fs_node *node)
488{
489 kfree(node);
490}
491
492static void del_hw_flow_table(struct fs_node *node)
493{
494 struct mlx5_flow_root_namespace *root;
495 struct mlx5_flow_table *ft;
496 struct mlx5_core_dev *dev;
497 int err;
498
499 fs_get_obj(ft, node);
500 dev = get_dev(&ft->node);
501 root = find_root(&ft->node);
502 trace_mlx5_fs_del_ft(ft);
503
504 if (node->active) {
505 err = root->cmds->destroy_flow_table(root, ft);
506 if (err)
507 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
508 }
509}
510
511static void del_sw_flow_table(struct fs_node *node)
512{
513 struct mlx5_flow_table *ft;
514 struct fs_prio *prio;
515
516 fs_get_obj(ft, node);
517
518 rhltable_destroy(&ft->fgs_hash);
519 if (ft->node.parent) {
520 fs_get_obj(prio, ft->node.parent);
521 prio->num_ft--;
522 }
523 kfree(ft);
524}
525
526static void modify_fte(struct fs_fte *fte)
527{
528 struct mlx5_flow_root_namespace *root;
529 struct mlx5_flow_table *ft;
530 struct mlx5_flow_group *fg;
531 struct mlx5_core_dev *dev;
532 int err;
533
534 fs_get_obj(fg, fte->node.parent);
535 fs_get_obj(ft, fg->node.parent);
536 dev = get_dev(&fte->node);
537
538 root = find_root(&ft->node);
539 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
540 if (err)
541 mlx5_core_warn(dev,
542 "%s can't del rule fg id=%d fte_index=%d\n",
543 __func__, fg->id, fte->index);
544 fte->modify_mask = 0;
545}
546
547static void del_sw_hw_rule(struct fs_node *node)
548{
549 struct mlx5_flow_rule *rule;
550 struct fs_fte *fte;
551
552 fs_get_obj(rule, node);
553 fs_get_obj(fte, rule->node.parent);
554 trace_mlx5_fs_del_rule(rule);
555 if (is_fwd_next_action(rule->sw_action)) {
556 mutex_lock(&rule->dest_attr.ft->lock);
557 list_del(&rule->next_ft);
558 mutex_unlock(&rule->dest_attr.ft->lock);
559 }
560
561 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
562 --fte->dests_size) {
563 fte->modify_mask |=
564 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
565 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
566 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
567 goto out;
568 }
569
570 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
571 --fte->dests_size) {
572 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
573 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
574 goto out;
575 }
576
577 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
578 --fte->dests_size) {
579 fte->modify_mask |=
580 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
581 }
582out:
583 kfree(rule);
584}
585
586static void del_hw_fte(struct fs_node *node)
587{
588 struct mlx5_flow_root_namespace *root;
589 struct mlx5_flow_table *ft;
590 struct mlx5_flow_group *fg;
591 struct mlx5_core_dev *dev;
592 struct fs_fte *fte;
593 int err;
594
595 fs_get_obj(fte, node);
596 fs_get_obj(fg, fte->node.parent);
597 fs_get_obj(ft, fg->node.parent);
598
599 trace_mlx5_fs_del_fte(fte);
600 dev = get_dev(&ft->node);
601 root = find_root(&ft->node);
602 if (node->active) {
603 err = root->cmds->delete_fte(root, ft, fte);
604 if (err)
605 mlx5_core_warn(dev,
606 "flow steering can't delete fte in index %d of flow group id %d\n",
607 fte->index, fg->id);
608 node->active = false;
609 }
610}
611
612static void del_sw_fte(struct fs_node *node)
613{
614 struct mlx5_flow_steering *steering = get_steering(node);
615 struct mlx5_flow_group *fg;
616 struct fs_fte *fte;
617 int err;
618
619 fs_get_obj(fte, node);
620 fs_get_obj(fg, fte->node.parent);
621
622 err = rhashtable_remove_fast(&fg->ftes_hash,
623 &fte->hash,
624 rhash_fte);
625 WARN_ON(err);
626 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
627 kmem_cache_free(steering->ftes_cache, fte);
628}
629
630static void del_hw_flow_group(struct fs_node *node)
631{
632 struct mlx5_flow_root_namespace *root;
633 struct mlx5_flow_group *fg;
634 struct mlx5_flow_table *ft;
635 struct mlx5_core_dev *dev;
636
637 fs_get_obj(fg, node);
638 fs_get_obj(ft, fg->node.parent);
639 dev = get_dev(&ft->node);
640 trace_mlx5_fs_del_fg(fg);
641
642 root = find_root(&ft->node);
643 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
644 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
645 fg->id, ft->id);
646}
647
648static void del_sw_flow_group(struct fs_node *node)
649{
650 struct mlx5_flow_steering *steering = get_steering(node);
651 struct mlx5_flow_group *fg;
652 struct mlx5_flow_table *ft;
653 int err;
654
655 fs_get_obj(fg, node);
656 fs_get_obj(ft, fg->node.parent);
657
658 rhashtable_destroy(&fg->ftes_hash);
659 ida_destroy(&fg->fte_allocator);
660 if (ft->autogroup.active &&
661 fg->max_ftes == ft->autogroup.group_size &&
662 fg->start_index < ft->autogroup.max_fte)
663 ft->autogroup.num_groups--;
664 err = rhltable_remove(&ft->fgs_hash,
665 &fg->hash,
666 rhash_fg);
667 WARN_ON(err);
668 kmem_cache_free(steering->fgs_cache, fg);
669}
670
671static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
672{
673 int index;
674 int ret;
675
676 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
677 if (index < 0)
678 return index;
679
680 fte->index = index + fg->start_index;
681 ret = rhashtable_insert_fast(&fg->ftes_hash,
682 &fte->hash,
683 rhash_fte);
684 if (ret)
685 goto err_ida_remove;
686
687 tree_add_node(&fte->node, &fg->node);
688 list_add_tail(&fte->node.list, &fg->node.children);
689 return 0;
690
691err_ida_remove:
692 ida_free(&fg->fte_allocator, index);
693 return ret;
694}
695
696static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
697 const struct mlx5_flow_spec *spec,
698 struct mlx5_flow_act *flow_act)
699{
700 struct mlx5_flow_steering *steering = get_steering(&ft->node);
701 struct fs_fte *fte;
702
703 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
704 if (!fte)
705 return ERR_PTR(-ENOMEM);
706
707 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
708 fte->node.type = FS_TYPE_FLOW_ENTRY;
709 fte->action = *flow_act;
710 fte->flow_context = spec->flow_context;
711
712 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
713
714 return fte;
715}
716
717static void dealloc_flow_group(struct mlx5_flow_steering *steering,
718 struct mlx5_flow_group *fg)
719{
720 rhashtable_destroy(&fg->ftes_hash);
721 kmem_cache_free(steering->fgs_cache, fg);
722}
723
724static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
725 u8 match_criteria_enable,
726 const void *match_criteria,
727 int start_index,
728 int end_index)
729{
730 struct mlx5_flow_group *fg;
731 int ret;
732
733 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
734 if (!fg)
735 return ERR_PTR(-ENOMEM);
736
737 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
738 if (ret) {
739 kmem_cache_free(steering->fgs_cache, fg);
740 return ERR_PTR(ret);
741 }
742
743 ida_init(&fg->fte_allocator);
744 fg->mask.match_criteria_enable = match_criteria_enable;
745 memcpy(&fg->mask.match_criteria, match_criteria,
746 sizeof(fg->mask.match_criteria));
747 fg->node.type = FS_TYPE_FLOW_GROUP;
748 fg->start_index = start_index;
749 fg->max_ftes = end_index - start_index + 1;
750
751 return fg;
752}
753
754static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
755 u8 match_criteria_enable,
756 const void *match_criteria,
757 int start_index,
758 int end_index,
759 struct list_head *prev)
760{
761 struct mlx5_flow_steering *steering = get_steering(&ft->node);
762 struct mlx5_flow_group *fg;
763 int ret;
764
765 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
766 start_index, end_index);
767 if (IS_ERR(fg))
768 return fg;
769
770
771 ret = rhltable_insert(&ft->fgs_hash,
772 &fg->hash,
773 rhash_fg);
774 if (ret) {
775 dealloc_flow_group(steering, fg);
776 return ERR_PTR(ret);
777 }
778
779 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
780 tree_add_node(&fg->node, &ft->node);
781
782 list_add(&fg->node.list, prev);
783 atomic_inc(&ft->node.version);
784
785 return fg;
786}
787
788static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
789 enum fs_flow_table_type table_type,
790 enum fs_flow_table_op_mod op_mod,
791 u32 flags)
792{
793 struct mlx5_flow_table *ft;
794 int ret;
795
796 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
797 if (!ft)
798 return ERR_PTR(-ENOMEM);
799
800 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
801 if (ret) {
802 kfree(ft);
803 return ERR_PTR(ret);
804 }
805
806 ft->level = level;
807 ft->node.type = FS_TYPE_FLOW_TABLE;
808 ft->op_mod = op_mod;
809 ft->type = table_type;
810 ft->vport = vport;
811 ft->flags = flags;
812 INIT_LIST_HEAD(&ft->fwd_rules);
813 mutex_init(&ft->lock);
814
815 return ft;
816}
817
818
819
820
821
822static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
823 struct list_head *start,
824 bool reverse)
825{
826#define list_advance_entry(pos, reverse) \
827 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
828
829#define list_for_each_advance_continue(pos, head, reverse) \
830 for (pos = list_advance_entry(pos, reverse); \
831 &pos->list != (head); \
832 pos = list_advance_entry(pos, reverse))
833
834 struct fs_node *iter = list_entry(start, struct fs_node, list);
835 struct mlx5_flow_table *ft = NULL;
836
837 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
838 return NULL;
839
840 list_for_each_advance_continue(iter, &root->children, reverse) {
841 if (iter->type == FS_TYPE_FLOW_TABLE) {
842 fs_get_obj(ft, iter);
843 return ft;
844 }
845 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
846 if (ft)
847 return ft;
848 }
849
850 return ft;
851}
852
853
854
855
856
857static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
858{
859 struct mlx5_flow_table *ft = NULL;
860 struct fs_node *curr_node;
861 struct fs_node *parent;
862
863 parent = prio->node.parent;
864 curr_node = &prio->node;
865 while (!ft && parent) {
866 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
867 curr_node = parent;
868 parent = curr_node->parent;
869 }
870 return ft;
871}
872
873
874static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
875{
876 return find_closest_ft(prio, false);
877}
878
879
880static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
881{
882 return find_closest_ft(prio, true);
883}
884
885static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
886 struct mlx5_flow_act *flow_act)
887{
888 struct fs_prio *prio;
889 bool next_ns;
890
891 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
892 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
893
894 return find_next_chained_ft(prio);
895}
896
897static int connect_fts_in_prio(struct mlx5_core_dev *dev,
898 struct fs_prio *prio,
899 struct mlx5_flow_table *ft)
900{
901 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
902 struct mlx5_flow_table *iter;
903 int err;
904
905 fs_for_each_ft(iter, prio) {
906 err = root->cmds->modify_flow_table(root, iter, ft);
907 if (err) {
908 mlx5_core_err(dev,
909 "Failed to modify flow table id %d, type %d, err %d\n",
910 iter->id, iter->type, err);
911
912 return err;
913 }
914 }
915 return 0;
916}
917
918
919static int connect_prev_fts(struct mlx5_core_dev *dev,
920 struct mlx5_flow_table *ft,
921 struct fs_prio *prio)
922{
923 struct mlx5_flow_table *prev_ft;
924
925 prev_ft = find_prev_chained_ft(prio);
926 if (prev_ft) {
927 struct fs_prio *prev_prio;
928
929 fs_get_obj(prev_prio, prev_ft->node.parent);
930 return connect_fts_in_prio(dev, prev_prio, ft);
931 }
932 return 0;
933}
934
935static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
936 *prio)
937{
938 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
939 struct mlx5_ft_underlay_qp *uqp;
940 int min_level = INT_MAX;
941 int err = 0;
942 u32 qpn;
943
944 if (root->root_ft)
945 min_level = root->root_ft->level;
946
947 if (ft->level >= min_level)
948 return 0;
949
950 if (list_empty(&root->underlay_qpns)) {
951
952 qpn = 0;
953 err = root->cmds->update_root_ft(root, ft, qpn, false);
954 } else {
955 list_for_each_entry(uqp, &root->underlay_qpns, list) {
956 qpn = uqp->qpn;
957 err = root->cmds->update_root_ft(root, ft,
958 qpn, false);
959 if (err)
960 break;
961 }
962 }
963
964 if (err)
965 mlx5_core_warn(root->dev,
966 "Update root flow table of id(%u) qpn(%d) failed\n",
967 ft->id, qpn);
968 else
969 root->root_ft = ft;
970
971 return err;
972}
973
974static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
975 struct mlx5_flow_destination *dest)
976{
977 struct mlx5_flow_root_namespace *root;
978 struct mlx5_flow_table *ft;
979 struct mlx5_flow_group *fg;
980 struct fs_fte *fte;
981 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
982 int err = 0;
983
984 fs_get_obj(fte, rule->node.parent);
985 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
986 return -EINVAL;
987 down_write_ref_node(&fte->node, false);
988 fs_get_obj(fg, fte->node.parent);
989 fs_get_obj(ft, fg->node.parent);
990
991 memcpy(&rule->dest_attr, dest, sizeof(*dest));
992 root = find_root(&ft->node);
993 err = root->cmds->update_fte(root, ft, fg,
994 modify_mask, fte);
995 up_write_ref_node(&fte->node, false);
996
997 return err;
998}
999
1000int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1001 struct mlx5_flow_destination *new_dest,
1002 struct mlx5_flow_destination *old_dest)
1003{
1004 int i;
1005
1006 if (!old_dest) {
1007 if (handle->num_rules != 1)
1008 return -EINVAL;
1009 return _mlx5_modify_rule_destination(handle->rule[0],
1010 new_dest);
1011 }
1012
1013 for (i = 0; i < handle->num_rules; i++) {
1014 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1015 return _mlx5_modify_rule_destination(handle->rule[i],
1016 new_dest);
1017 }
1018
1019 return -EINVAL;
1020}
1021
1022
1023static int connect_fwd_rules(struct mlx5_core_dev *dev,
1024 struct mlx5_flow_table *new_next_ft,
1025 struct mlx5_flow_table *old_next_ft)
1026{
1027 struct mlx5_flow_destination dest = {};
1028 struct mlx5_flow_rule *iter;
1029 int err = 0;
1030
1031
1032
1033
1034 if (!new_next_ft || !old_next_ft)
1035 return 0;
1036
1037 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1038 dest.ft = new_next_ft;
1039
1040 mutex_lock(&old_next_ft->lock);
1041 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1042 mutex_unlock(&old_next_ft->lock);
1043 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1044 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1045 iter->ft->ns == new_next_ft->ns)
1046 continue;
1047
1048 err = _mlx5_modify_rule_destination(iter, &dest);
1049 if (err)
1050 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1051 new_next_ft->id);
1052 }
1053 return 0;
1054}
1055
1056static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1057 struct fs_prio *prio)
1058{
1059 struct mlx5_flow_table *next_ft, *first_ft;
1060 int err = 0;
1061
1062
1063
1064 first_ft = list_first_entry_or_null(&prio->node.children,
1065 struct mlx5_flow_table, node.list);
1066 if (!first_ft || first_ft->level > ft->level) {
1067 err = connect_prev_fts(dev, ft, prio);
1068 if (err)
1069 return err;
1070
1071 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1072 err = connect_fwd_rules(dev, ft, next_ft);
1073 if (err)
1074 return err;
1075 }
1076
1077 if (MLX5_CAP_FLOWTABLE(dev,
1078 flow_table_properties_nic_receive.modify_root))
1079 err = update_root_ft_create(ft, prio);
1080 return err;
1081}
1082
1083static void list_add_flow_table(struct mlx5_flow_table *ft,
1084 struct fs_prio *prio)
1085{
1086 struct list_head *prev = &prio->node.children;
1087 struct mlx5_flow_table *iter;
1088
1089 fs_for_each_ft(iter, prio) {
1090 if (iter->level > ft->level)
1091 break;
1092 prev = &iter->node.list;
1093 }
1094 list_add(&ft->node.list, prev);
1095}
1096
1097static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1098 struct mlx5_flow_table_attr *ft_attr,
1099 enum fs_flow_table_op_mod op_mod,
1100 u16 vport)
1101{
1102 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1103 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1104 struct mlx5_flow_table *next_ft;
1105 struct fs_prio *fs_prio = NULL;
1106 struct mlx5_flow_table *ft;
1107 int err;
1108
1109 if (!root) {
1110 pr_err("mlx5: flow steering failed to find root of namespace\n");
1111 return ERR_PTR(-ENODEV);
1112 }
1113
1114 mutex_lock(&root->chain_lock);
1115 fs_prio = find_prio(ns, ft_attr->prio);
1116 if (!fs_prio) {
1117 err = -EINVAL;
1118 goto unlock_root;
1119 }
1120 if (!unmanaged) {
1121
1122
1123
1124 if (ft_attr->level >= fs_prio->num_levels) {
1125 err = -ENOSPC;
1126 goto unlock_root;
1127 }
1128
1129 ft_attr->level += fs_prio->start_level;
1130 }
1131
1132
1133
1134
1135 ft = alloc_flow_table(ft_attr->level,
1136 vport,
1137 root->table_type,
1138 op_mod, ft_attr->flags);
1139 if (IS_ERR(ft)) {
1140 err = PTR_ERR(ft);
1141 goto unlock_root;
1142 }
1143
1144 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1145 next_ft = unmanaged ? ft_attr->next_ft :
1146 find_next_chained_ft(fs_prio);
1147 ft->def_miss_action = ns->def_miss_action;
1148 ft->ns = ns;
1149 err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1150 if (err)
1151 goto free_ft;
1152
1153 if (!unmanaged) {
1154 err = connect_flow_table(root->dev, ft, fs_prio);
1155 if (err)
1156 goto destroy_ft;
1157 }
1158
1159 ft->node.active = true;
1160 down_write_ref_node(&fs_prio->node, false);
1161 if (!unmanaged) {
1162 tree_add_node(&ft->node, &fs_prio->node);
1163 list_add_flow_table(ft, fs_prio);
1164 } else {
1165 ft->node.root = fs_prio->node.root;
1166 }
1167 fs_prio->num_ft++;
1168 up_write_ref_node(&fs_prio->node, false);
1169 mutex_unlock(&root->chain_lock);
1170 trace_mlx5_fs_add_ft(ft);
1171 return ft;
1172destroy_ft:
1173 root->cmds->destroy_flow_table(root, ft);
1174free_ft:
1175 rhltable_destroy(&ft->fgs_hash);
1176 kfree(ft);
1177unlock_root:
1178 mutex_unlock(&root->chain_lock);
1179 return ERR_PTR(err);
1180}
1181
1182struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1183 struct mlx5_flow_table_attr *ft_attr)
1184{
1185 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1186}
1187EXPORT_SYMBOL(mlx5_create_flow_table);
1188
1189struct mlx5_flow_table *
1190mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1191 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1192{
1193 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1194}
1195
1196struct mlx5_flow_table*
1197mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1198 int prio, u32 level)
1199{
1200 struct mlx5_flow_table_attr ft_attr = {};
1201
1202 ft_attr.level = level;
1203 ft_attr.prio = prio;
1204 ft_attr.max_fte = 1;
1205
1206 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1207}
1208EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1209
1210#define MAX_FLOW_GROUP_SIZE BIT(24)
1211struct mlx5_flow_table*
1212mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1213 struct mlx5_flow_table_attr *ft_attr)
1214{
1215 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1216 int max_num_groups = ft_attr->autogroup.max_num_groups;
1217 struct mlx5_flow_table *ft;
1218 int autogroups_max_fte;
1219
1220 ft = mlx5_create_flow_table(ns, ft_attr);
1221 if (IS_ERR(ft))
1222 return ft;
1223
1224 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1225 if (max_num_groups > autogroups_max_fte)
1226 goto err_validate;
1227 if (num_reserved_entries > ft->max_fte)
1228 goto err_validate;
1229
1230
1231 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1232 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1233
1234 ft->autogroup.active = true;
1235 ft->autogroup.required_groups = max_num_groups;
1236 ft->autogroup.max_fte = autogroups_max_fte;
1237
1238 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1239
1240 return ft;
1241
1242err_validate:
1243 mlx5_destroy_flow_table(ft);
1244 return ERR_PTR(-ENOSPC);
1245}
1246EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1247
1248struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1249 u32 *fg_in)
1250{
1251 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1252 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1253 fg_in, match_criteria);
1254 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1255 fg_in,
1256 match_criteria_enable);
1257 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1258 start_flow_index);
1259 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1260 end_flow_index);
1261 struct mlx5_flow_group *fg;
1262 int err;
1263
1264 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1265 return ERR_PTR(-EPERM);
1266
1267 down_write_ref_node(&ft->node, false);
1268 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1269 start_index, end_index,
1270 ft->node.children.prev);
1271 up_write_ref_node(&ft->node, false);
1272 if (IS_ERR(fg))
1273 return fg;
1274
1275 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1276 if (err) {
1277 tree_put_node(&fg->node, false);
1278 return ERR_PTR(err);
1279 }
1280 trace_mlx5_fs_add_fg(fg);
1281 fg->node.active = true;
1282
1283 return fg;
1284}
1285EXPORT_SYMBOL(mlx5_create_flow_group);
1286
1287static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1288{
1289 struct mlx5_flow_rule *rule;
1290
1291 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1292 if (!rule)
1293 return NULL;
1294
1295 INIT_LIST_HEAD(&rule->next_ft);
1296 rule->node.type = FS_TYPE_FLOW_DEST;
1297 if (dest)
1298 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1299
1300 return rule;
1301}
1302
1303static struct mlx5_flow_handle *alloc_handle(int num_rules)
1304{
1305 struct mlx5_flow_handle *handle;
1306
1307 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1308 if (!handle)
1309 return NULL;
1310
1311 handle->num_rules = num_rules;
1312
1313 return handle;
1314}
1315
1316static void destroy_flow_handle(struct fs_fte *fte,
1317 struct mlx5_flow_handle *handle,
1318 struct mlx5_flow_destination *dest,
1319 int i)
1320{
1321 for (; --i >= 0;) {
1322 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1323 fte->dests_size--;
1324 list_del(&handle->rule[i]->node.list);
1325 kfree(handle->rule[i]);
1326 }
1327 }
1328 kfree(handle);
1329}
1330
1331static struct mlx5_flow_handle *
1332create_flow_handle(struct fs_fte *fte,
1333 struct mlx5_flow_destination *dest,
1334 int dest_num,
1335 int *modify_mask,
1336 bool *new_rule)
1337{
1338 struct mlx5_flow_handle *handle;
1339 struct mlx5_flow_rule *rule = NULL;
1340 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1341 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1342 int type;
1343 int i = 0;
1344
1345 handle = alloc_handle((dest_num) ? dest_num : 1);
1346 if (!handle)
1347 return ERR_PTR(-ENOMEM);
1348
1349 do {
1350 if (dest) {
1351 rule = find_flow_rule(fte, dest + i);
1352 if (rule) {
1353 refcount_inc(&rule->node.refcount);
1354 goto rule_found;
1355 }
1356 }
1357
1358 *new_rule = true;
1359 rule = alloc_rule(dest + i);
1360 if (!rule)
1361 goto free_rules;
1362
1363
1364
1365
1366 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1367 if (dest &&
1368 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1369 list_add(&rule->node.list, &fte->node.children);
1370 else
1371 list_add_tail(&rule->node.list, &fte->node.children);
1372 if (dest) {
1373 fte->dests_size++;
1374
1375 type = dest[i].type ==
1376 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1377 *modify_mask |= type ? count : dst;
1378 }
1379rule_found:
1380 handle->rule[i] = rule;
1381 } while (++i < dest_num);
1382
1383 return handle;
1384
1385free_rules:
1386 destroy_flow_handle(fte, handle, dest, i);
1387 return ERR_PTR(-ENOMEM);
1388}
1389
1390
1391static struct mlx5_flow_handle *
1392add_rule_fte(struct fs_fte *fte,
1393 struct mlx5_flow_group *fg,
1394 struct mlx5_flow_destination *dest,
1395 int dest_num,
1396 bool update_action)
1397{
1398 struct mlx5_flow_root_namespace *root;
1399 struct mlx5_flow_handle *handle;
1400 struct mlx5_flow_table *ft;
1401 int modify_mask = 0;
1402 int err;
1403 bool new_rule = false;
1404
1405 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1406 &new_rule);
1407 if (IS_ERR(handle) || !new_rule)
1408 goto out;
1409
1410 if (update_action)
1411 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1412
1413 fs_get_obj(ft, fg->node.parent);
1414 root = find_root(&fg->node);
1415 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1416 err = root->cmds->create_fte(root, ft, fg, fte);
1417 else
1418 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1419 if (err)
1420 goto free_handle;
1421
1422 fte->node.active = true;
1423 fte->status |= FS_FTE_STATUS_EXISTING;
1424 atomic_inc(&fg->node.version);
1425
1426out:
1427 return handle;
1428
1429free_handle:
1430 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1431 return ERR_PTR(err);
1432}
1433
1434static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1435 const struct mlx5_flow_spec *spec)
1436{
1437 struct list_head *prev = &ft->node.children;
1438 u32 max_fte = ft->autogroup.max_fte;
1439 unsigned int candidate_index = 0;
1440 unsigned int group_size = 0;
1441 struct mlx5_flow_group *fg;
1442
1443 if (!ft->autogroup.active)
1444 return ERR_PTR(-ENOENT);
1445
1446 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1447 group_size = ft->autogroup.group_size;
1448
1449
1450 if (group_size == 0)
1451 group_size = 1;
1452
1453
1454 fs_for_each_fg(fg, ft) {
1455 if (candidate_index + group_size > fg->start_index)
1456 candidate_index = fg->start_index + fg->max_ftes;
1457 else
1458 break;
1459 prev = &fg->node.list;
1460 }
1461
1462 if (candidate_index + group_size > max_fte)
1463 return ERR_PTR(-ENOSPC);
1464
1465 fg = alloc_insert_flow_group(ft,
1466 spec->match_criteria_enable,
1467 spec->match_criteria,
1468 candidate_index,
1469 candidate_index + group_size - 1,
1470 prev);
1471 if (IS_ERR(fg))
1472 goto out;
1473
1474 if (group_size == ft->autogroup.group_size)
1475 ft->autogroup.num_groups++;
1476
1477out:
1478 return fg;
1479}
1480
1481static int create_auto_flow_group(struct mlx5_flow_table *ft,
1482 struct mlx5_flow_group *fg)
1483{
1484 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1485 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1486 void *match_criteria_addr;
1487 u8 src_esw_owner_mask_on;
1488 void *misc;
1489 int err;
1490 u32 *in;
1491
1492 in = kvzalloc(inlen, GFP_KERNEL);
1493 if (!in)
1494 return -ENOMEM;
1495
1496 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1497 fg->mask.match_criteria_enable);
1498 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1499 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1500 fg->max_ftes - 1);
1501
1502 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1503 misc_parameters);
1504 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1505 source_eswitch_owner_vhca_id);
1506 MLX5_SET(create_flow_group_in, in,
1507 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1508
1509 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1510 in, match_criteria);
1511 memcpy(match_criteria_addr, fg->mask.match_criteria,
1512 sizeof(fg->mask.match_criteria));
1513
1514 err = root->cmds->create_flow_group(root, ft, in, fg);
1515 if (!err) {
1516 fg->node.active = true;
1517 trace_mlx5_fs_add_fg(fg);
1518 }
1519
1520 kvfree(in);
1521 return err;
1522}
1523
1524static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1525 struct mlx5_flow_destination *d2)
1526{
1527 if (d1->type == d2->type) {
1528 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1529 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1530 d1->vport.num == d2->vport.num &&
1531 d1->vport.flags == d2->vport.flags &&
1532 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1533 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1534 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1535 (d1->vport.pkt_reformat->id ==
1536 d2->vport.pkt_reformat->id) : true)) ||
1537 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1538 d1->ft == d2->ft) ||
1539 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1540 d1->tir_num == d2->tir_num) ||
1541 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1542 d1->ft_num == d2->ft_num) ||
1543 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1544 d1->sampler_id == d2->sampler_id))
1545 return true;
1546 }
1547
1548 return false;
1549}
1550
1551static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1552 struct mlx5_flow_destination *dest)
1553{
1554 struct mlx5_flow_rule *rule;
1555
1556 list_for_each_entry(rule, &fte->node.children, node.list) {
1557 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1558 return rule;
1559 }
1560 return NULL;
1561}
1562
1563static bool check_conflicting_actions(u32 action1, u32 action2)
1564{
1565 u32 xored_actions = action1 ^ action2;
1566
1567
1568 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1569 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1570 return false;
1571
1572 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1573 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1574 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1575 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1576 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1577 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1578 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1579 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1580 return true;
1581
1582 return false;
1583}
1584
1585static int check_conflicting_ftes(struct fs_fte *fte,
1586 const struct mlx5_flow_context *flow_context,
1587 const struct mlx5_flow_act *flow_act)
1588{
1589 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1590 mlx5_core_warn(get_dev(&fte->node),
1591 "Found two FTEs with conflicting actions\n");
1592 return -EEXIST;
1593 }
1594
1595 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1596 fte->flow_context.flow_tag != flow_context->flow_tag) {
1597 mlx5_core_warn(get_dev(&fte->node),
1598 "FTE flow tag %u already exists with different flow tag %u\n",
1599 fte->flow_context.flow_tag,
1600 flow_context->flow_tag);
1601 return -EEXIST;
1602 }
1603
1604 return 0;
1605}
1606
1607static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1608 const struct mlx5_flow_spec *spec,
1609 struct mlx5_flow_act *flow_act,
1610 struct mlx5_flow_destination *dest,
1611 int dest_num,
1612 struct fs_fte *fte)
1613{
1614 struct mlx5_flow_handle *handle;
1615 int old_action;
1616 int i;
1617 int ret;
1618
1619 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1620 if (ret)
1621 return ERR_PTR(ret);
1622
1623 old_action = fte->action.action;
1624 fte->action.action |= flow_act->action;
1625 handle = add_rule_fte(fte, fg, dest, dest_num,
1626 old_action != flow_act->action);
1627 if (IS_ERR(handle)) {
1628 fte->action.action = old_action;
1629 return handle;
1630 }
1631 trace_mlx5_fs_set_fte(fte, false);
1632
1633 for (i = 0; i < handle->num_rules; i++) {
1634 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1635 tree_add_node(&handle->rule[i]->node, &fte->node);
1636 trace_mlx5_fs_add_rule(handle->rule[i]);
1637 }
1638 }
1639 return handle;
1640}
1641
1642static bool counter_is_valid(u32 action)
1643{
1644 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1645 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1646 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1647}
1648
1649static bool dest_is_valid(struct mlx5_flow_destination *dest,
1650 struct mlx5_flow_act *flow_act,
1651 struct mlx5_flow_table *ft)
1652{
1653 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1654 u32 action = flow_act->action;
1655
1656 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1657 return counter_is_valid(action);
1658
1659 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1660 return true;
1661
1662 if (ignore_level) {
1663 if (ft->type != FS_FT_FDB &&
1664 ft->type != FS_FT_NIC_RX)
1665 return false;
1666
1667 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1668 ft->type != dest->ft->type)
1669 return false;
1670 }
1671
1672 if (!dest || ((dest->type ==
1673 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1674 (dest->ft->level <= ft->level && !ignore_level)))
1675 return false;
1676 return true;
1677}
1678
1679struct match_list {
1680 struct list_head list;
1681 struct mlx5_flow_group *g;
1682};
1683
1684static void free_match_list(struct match_list *head, bool ft_locked)
1685{
1686 struct match_list *iter, *match_tmp;
1687
1688 list_for_each_entry_safe(iter, match_tmp, &head->list,
1689 list) {
1690 tree_put_node(&iter->g->node, ft_locked);
1691 list_del(&iter->list);
1692 kfree(iter);
1693 }
1694}
1695
1696static int build_match_list(struct match_list *match_head,
1697 struct mlx5_flow_table *ft,
1698 const struct mlx5_flow_spec *spec,
1699 struct mlx5_flow_group *fg,
1700 bool ft_locked)
1701{
1702 struct rhlist_head *tmp, *list;
1703 struct mlx5_flow_group *g;
1704 int err = 0;
1705
1706 rcu_read_lock();
1707 INIT_LIST_HEAD(&match_head->list);
1708
1709 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1710
1711 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1712 struct match_list *curr_match;
1713
1714 if (fg && fg != g)
1715 continue;
1716
1717 if (unlikely(!tree_get_node(&g->node)))
1718 continue;
1719
1720 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1721 if (!curr_match) {
1722 rcu_read_unlock();
1723 free_match_list(match_head, ft_locked);
1724 return -ENOMEM;
1725 }
1726 curr_match->g = g;
1727 list_add_tail(&curr_match->list, &match_head->list);
1728 }
1729 rcu_read_unlock();
1730 return err;
1731}
1732
1733static u64 matched_fgs_get_version(struct list_head *match_head)
1734{
1735 struct match_list *iter;
1736 u64 version = 0;
1737
1738 list_for_each_entry(iter, match_head, list)
1739 version += (u64)atomic_read(&iter->g->node.version);
1740 return version;
1741}
1742
1743static struct fs_fte *
1744lookup_fte_locked(struct mlx5_flow_group *g,
1745 const u32 *match_value,
1746 bool take_write)
1747{
1748 struct fs_fte *fte_tmp;
1749
1750 if (take_write)
1751 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1752 else
1753 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1754 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1755 rhash_fte);
1756 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1757 fte_tmp = NULL;
1758 goto out;
1759 }
1760 if (!fte_tmp->node.active) {
1761 tree_put_node(&fte_tmp->node, false);
1762 fte_tmp = NULL;
1763 goto out;
1764 }
1765
1766 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1767out:
1768 if (take_write)
1769 up_write_ref_node(&g->node, false);
1770 else
1771 up_read_ref_node(&g->node);
1772 return fte_tmp;
1773}
1774
1775static struct mlx5_flow_handle *
1776try_add_to_existing_fg(struct mlx5_flow_table *ft,
1777 struct list_head *match_head,
1778 const struct mlx5_flow_spec *spec,
1779 struct mlx5_flow_act *flow_act,
1780 struct mlx5_flow_destination *dest,
1781 int dest_num,
1782 int ft_version)
1783{
1784 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1785 struct mlx5_flow_group *g;
1786 struct mlx5_flow_handle *rule;
1787 struct match_list *iter;
1788 bool take_write = false;
1789 struct fs_fte *fte;
1790 u64 version = 0;
1791 int err;
1792
1793 fte = alloc_fte(ft, spec, flow_act);
1794 if (IS_ERR(fte))
1795 return ERR_PTR(-ENOMEM);
1796
1797search_again_locked:
1798 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1799 goto skip_search;
1800 version = matched_fgs_get_version(match_head);
1801
1802
1803
1804 list_for_each_entry(iter, match_head, list) {
1805 struct fs_fte *fte_tmp;
1806
1807 g = iter->g;
1808 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1809 if (!fte_tmp)
1810 continue;
1811 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1812
1813 up_write_ref_node(&fte_tmp->node, false);
1814 tree_put_node(&fte_tmp->node, false);
1815 kmem_cache_free(steering->ftes_cache, fte);
1816 return rule;
1817 }
1818
1819skip_search:
1820
1821
1822
1823
1824
1825
1826
1827 if (atomic_read(&ft->node.version) != ft_version) {
1828 rule = ERR_PTR(-EAGAIN);
1829 goto out;
1830 }
1831
1832
1833
1834
1835
1836 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1837 version != matched_fgs_get_version(match_head)) {
1838 take_write = true;
1839 goto search_again_locked;
1840 }
1841
1842 list_for_each_entry(iter, match_head, list) {
1843 g = iter->g;
1844
1845 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1846
1847 if (!g->node.active) {
1848 up_write_ref_node(&g->node, false);
1849 continue;
1850 }
1851
1852 err = insert_fte(g, fte);
1853 if (err) {
1854 up_write_ref_node(&g->node, false);
1855 if (err == -ENOSPC)
1856 continue;
1857 kmem_cache_free(steering->ftes_cache, fte);
1858 return ERR_PTR(err);
1859 }
1860
1861 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1862 up_write_ref_node(&g->node, false);
1863 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1864 up_write_ref_node(&fte->node, false);
1865 if (IS_ERR(rule))
1866 tree_put_node(&fte->node, false);
1867 return rule;
1868 }
1869 rule = ERR_PTR(-ENOENT);
1870out:
1871 kmem_cache_free(steering->ftes_cache, fte);
1872 return rule;
1873}
1874
1875static struct mlx5_flow_handle *
1876_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1877 const struct mlx5_flow_spec *spec,
1878 struct mlx5_flow_act *flow_act,
1879 struct mlx5_flow_destination *dest,
1880 int dest_num)
1881
1882{
1883 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1884 struct mlx5_flow_handle *rule;
1885 struct match_list match_head;
1886 struct mlx5_flow_group *g;
1887 bool take_write = false;
1888 struct fs_fte *fte;
1889 int version;
1890 int err;
1891 int i;
1892
1893 if (!check_valid_spec(spec))
1894 return ERR_PTR(-EINVAL);
1895
1896 if (flow_act->fg && ft->autogroup.active)
1897 return ERR_PTR(-EINVAL);
1898
1899 for (i = 0; i < dest_num; i++) {
1900 if (!dest_is_valid(&dest[i], flow_act, ft))
1901 return ERR_PTR(-EINVAL);
1902 }
1903 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1904search_again_locked:
1905 version = atomic_read(&ft->node.version);
1906
1907
1908 err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
1909 if (err) {
1910 if (take_write)
1911 up_write_ref_node(&ft->node, false);
1912 else
1913 up_read_ref_node(&ft->node);
1914 return ERR_PTR(err);
1915 }
1916
1917 if (!take_write)
1918 up_read_ref_node(&ft->node);
1919
1920 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1921 dest_num, version);
1922 free_match_list(&match_head, take_write);
1923 if (!IS_ERR(rule) ||
1924 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1925 if (take_write)
1926 up_write_ref_node(&ft->node, false);
1927 return rule;
1928 }
1929
1930 if (!take_write) {
1931 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1932 take_write = true;
1933 }
1934
1935 if (PTR_ERR(rule) == -EAGAIN ||
1936 version != atomic_read(&ft->node.version))
1937 goto search_again_locked;
1938
1939 g = alloc_auto_flow_group(ft, spec);
1940 if (IS_ERR(g)) {
1941 rule = ERR_CAST(g);
1942 up_write_ref_node(&ft->node, false);
1943 return rule;
1944 }
1945
1946 fte = alloc_fte(ft, spec, flow_act);
1947 if (IS_ERR(fte)) {
1948 up_write_ref_node(&ft->node, false);
1949 err = PTR_ERR(fte);
1950 goto err_alloc_fte;
1951 }
1952
1953 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1954 up_write_ref_node(&ft->node, false);
1955
1956 err = create_auto_flow_group(ft, g);
1957 if (err)
1958 goto err_release_fg;
1959
1960 err = insert_fte(g, fte);
1961 if (err)
1962 goto err_release_fg;
1963
1964 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1965 up_write_ref_node(&g->node, false);
1966 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1967 up_write_ref_node(&fte->node, false);
1968 if (IS_ERR(rule))
1969 tree_put_node(&fte->node, false);
1970 tree_put_node(&g->node, false);
1971 return rule;
1972
1973err_release_fg:
1974 up_write_ref_node(&g->node, false);
1975 kmem_cache_free(steering->ftes_cache, fte);
1976err_alloc_fte:
1977 tree_put_node(&g->node, false);
1978 return ERR_PTR(err);
1979}
1980
1981static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1982{
1983 return ((ft->type == FS_FT_NIC_RX) &&
1984 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1985}
1986
1987struct mlx5_flow_handle *
1988mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1989 const struct mlx5_flow_spec *spec,
1990 struct mlx5_flow_act *flow_act,
1991 struct mlx5_flow_destination *dest,
1992 int num_dest)
1993{
1994 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1995 static const struct mlx5_flow_spec zero_spec = {};
1996 struct mlx5_flow_destination *gen_dest = NULL;
1997 struct mlx5_flow_table *next_ft = NULL;
1998 struct mlx5_flow_handle *handle = NULL;
1999 u32 sw_action = flow_act->action;
2000 int i;
2001
2002 if (!spec)
2003 spec = &zero_spec;
2004
2005 if (!is_fwd_next_action(sw_action))
2006 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2007
2008 if (!fwd_next_prio_supported(ft))
2009 return ERR_PTR(-EOPNOTSUPP);
2010
2011 mutex_lock(&root->chain_lock);
2012 next_ft = find_next_fwd_ft(ft, flow_act);
2013 if (!next_ft) {
2014 handle = ERR_PTR(-EOPNOTSUPP);
2015 goto unlock;
2016 }
2017
2018 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2019 GFP_KERNEL);
2020 if (!gen_dest) {
2021 handle = ERR_PTR(-ENOMEM);
2022 goto unlock;
2023 }
2024 for (i = 0; i < num_dest; i++)
2025 gen_dest[i] = dest[i];
2026 gen_dest[i].type =
2027 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2028 gen_dest[i].ft = next_ft;
2029 dest = gen_dest;
2030 num_dest++;
2031 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2032 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2033 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2034 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2035 if (IS_ERR(handle))
2036 goto unlock;
2037
2038 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2039 mutex_lock(&next_ft->lock);
2040 list_add(&handle->rule[num_dest - 1]->next_ft,
2041 &next_ft->fwd_rules);
2042 mutex_unlock(&next_ft->lock);
2043 handle->rule[num_dest - 1]->sw_action = sw_action;
2044 handle->rule[num_dest - 1]->ft = ft;
2045 }
2046unlock:
2047 mutex_unlock(&root->chain_lock);
2048 kfree(gen_dest);
2049 return handle;
2050}
2051EXPORT_SYMBOL(mlx5_add_flow_rules);
2052
2053void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2054{
2055 struct fs_fte *fte;
2056 int i;
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070 fs_get_obj(fte, handle->rule[0]->node.parent);
2071 down_write_ref_node(&fte->node, false);
2072 for (i = handle->num_rules - 1; i >= 0; i--)
2073 tree_remove_node(&handle->rule[i]->node, true);
2074 if (fte->dests_size) {
2075 if (fte->modify_mask)
2076 modify_fte(fte);
2077 up_write_ref_node(&fte->node, false);
2078 } else if (list_empty(&fte->node.children)) {
2079 del_hw_fte(&fte->node);
2080
2081 fte->node.del_hw_func = NULL;
2082 up_write_ref_node(&fte->node, false);
2083 tree_put_node(&fte->node, false);
2084 } else {
2085 up_write_ref_node(&fte->node, false);
2086 }
2087 kfree(handle);
2088}
2089EXPORT_SYMBOL(mlx5_del_flow_rules);
2090
2091
2092static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2093{
2094 struct fs_prio *prio;
2095
2096 fs_get_obj(prio, ft->node.parent);
2097
2098 if (!list_is_last(&ft->node.list, &prio->node.children))
2099 return list_next_entry(ft, node.list);
2100 return find_next_chained_ft(prio);
2101}
2102
2103static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2104{
2105 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2106 struct mlx5_ft_underlay_qp *uqp;
2107 struct mlx5_flow_table *new_root_ft = NULL;
2108 int err = 0;
2109 u32 qpn;
2110
2111 if (root->root_ft != ft)
2112 return 0;
2113
2114 new_root_ft = find_next_ft(ft);
2115 if (!new_root_ft) {
2116 root->root_ft = NULL;
2117 return 0;
2118 }
2119
2120 if (list_empty(&root->underlay_qpns)) {
2121
2122 qpn = 0;
2123 err = root->cmds->update_root_ft(root, new_root_ft,
2124 qpn, false);
2125 } else {
2126 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2127 qpn = uqp->qpn;
2128 err = root->cmds->update_root_ft(root,
2129 new_root_ft, qpn,
2130 false);
2131 if (err)
2132 break;
2133 }
2134 }
2135
2136 if (err)
2137 mlx5_core_warn(root->dev,
2138 "Update root flow table of id(%u) qpn(%d) failed\n",
2139 ft->id, qpn);
2140 else
2141 root->root_ft = new_root_ft;
2142
2143 return 0;
2144}
2145
2146
2147
2148
2149static int disconnect_flow_table(struct mlx5_flow_table *ft)
2150{
2151 struct mlx5_core_dev *dev = get_dev(&ft->node);
2152 struct mlx5_flow_table *next_ft;
2153 struct fs_prio *prio;
2154 int err = 0;
2155
2156 err = update_root_ft_destroy(ft);
2157 if (err)
2158 return err;
2159
2160 fs_get_obj(prio, ft->node.parent);
2161 if (!(list_first_entry(&prio->node.children,
2162 struct mlx5_flow_table,
2163 node.list) == ft))
2164 return 0;
2165
2166 next_ft = find_next_ft(ft);
2167 err = connect_fwd_rules(dev, next_ft, ft);
2168 if (err)
2169 return err;
2170
2171 err = connect_prev_fts(dev, next_ft, prio);
2172 if (err)
2173 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2174 ft->id);
2175 return err;
2176}
2177
2178int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2179{
2180 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2181 int err = 0;
2182
2183 mutex_lock(&root->chain_lock);
2184 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2185 err = disconnect_flow_table(ft);
2186 if (err) {
2187 mutex_unlock(&root->chain_lock);
2188 return err;
2189 }
2190 if (tree_remove_node(&ft->node, false))
2191 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2192 ft->id);
2193 mutex_unlock(&root->chain_lock);
2194
2195 return err;
2196}
2197EXPORT_SYMBOL(mlx5_destroy_flow_table);
2198
2199void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2200{
2201 if (tree_remove_node(&fg->node, false))
2202 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2203 fg->id);
2204}
2205EXPORT_SYMBOL(mlx5_destroy_flow_group);
2206
2207struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2208 int n)
2209{
2210 struct mlx5_flow_steering *steering = dev->priv.steering;
2211
2212 if (!steering || !steering->fdb_sub_ns)
2213 return NULL;
2214
2215 return steering->fdb_sub_ns[n];
2216}
2217EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2218
2219static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2220{
2221 switch (type) {
2222 case MLX5_FLOW_NAMESPACE_BYPASS:
2223 case MLX5_FLOW_NAMESPACE_LAG:
2224 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2225 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2226 case MLX5_FLOW_NAMESPACE_KERNEL:
2227 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2228 case MLX5_FLOW_NAMESPACE_ANCHOR:
2229 return true;
2230 default:
2231 return false;
2232 }
2233}
2234
2235struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2236 enum mlx5_flow_namespace_type type)
2237{
2238 struct mlx5_flow_steering *steering = dev->priv.steering;
2239 struct mlx5_flow_root_namespace *root_ns;
2240 int prio = 0;
2241 struct fs_prio *fs_prio;
2242 struct mlx5_flow_namespace *ns;
2243
2244 if (!steering)
2245 return NULL;
2246
2247 switch (type) {
2248 case MLX5_FLOW_NAMESPACE_FDB:
2249 if (steering->fdb_root_ns)
2250 return &steering->fdb_root_ns->ns;
2251 return NULL;
2252 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2253 if (steering->port_sel_root_ns)
2254 return &steering->port_sel_root_ns->ns;
2255 return NULL;
2256 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2257 if (steering->sniffer_rx_root_ns)
2258 return &steering->sniffer_rx_root_ns->ns;
2259 return NULL;
2260 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2261 if (steering->sniffer_tx_root_ns)
2262 return &steering->sniffer_tx_root_ns->ns;
2263 return NULL;
2264 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2265 root_ns = steering->fdb_root_ns;
2266 prio = FDB_BYPASS_PATH;
2267 break;
2268 case MLX5_FLOW_NAMESPACE_EGRESS:
2269 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
2270 root_ns = steering->egress_root_ns;
2271 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2272 break;
2273 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2274 root_ns = steering->rdma_rx_root_ns;
2275 prio = RDMA_RX_BYPASS_PRIO;
2276 break;
2277 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2278 root_ns = steering->rdma_rx_root_ns;
2279 prio = RDMA_RX_KERNEL_PRIO;
2280 break;
2281 case MLX5_FLOW_NAMESPACE_RDMA_TX:
2282 root_ns = steering->rdma_tx_root_ns;
2283 break;
2284 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2285 root_ns = steering->rdma_rx_root_ns;
2286 prio = RDMA_RX_COUNTERS_PRIO;
2287 break;
2288 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2289 root_ns = steering->rdma_tx_root_ns;
2290 prio = RDMA_TX_COUNTERS_PRIO;
2291 break;
2292 default:
2293 WARN_ON(!is_nic_rx_ns(type));
2294 root_ns = steering->root_ns;
2295 prio = type;
2296 break;
2297 }
2298
2299 if (!root_ns)
2300 return NULL;
2301
2302 fs_prio = find_prio(&root_ns->ns, prio);
2303 if (!fs_prio)
2304 return NULL;
2305
2306 ns = list_first_entry(&fs_prio->node.children,
2307 typeof(*ns),
2308 node.list);
2309
2310 return ns;
2311}
2312EXPORT_SYMBOL(mlx5_get_flow_namespace);
2313
2314struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2315 enum mlx5_flow_namespace_type type,
2316 int vport)
2317{
2318 struct mlx5_flow_steering *steering = dev->priv.steering;
2319
2320 if (!steering)
2321 return NULL;
2322
2323 switch (type) {
2324 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2325 if (vport >= steering->esw_egress_acl_vports)
2326 return NULL;
2327 if (steering->esw_egress_root_ns &&
2328 steering->esw_egress_root_ns[vport])
2329 return &steering->esw_egress_root_ns[vport]->ns;
2330 else
2331 return NULL;
2332 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2333 if (vport >= steering->esw_ingress_acl_vports)
2334 return NULL;
2335 if (steering->esw_ingress_root_ns &&
2336 steering->esw_ingress_root_ns[vport])
2337 return &steering->esw_ingress_root_ns[vport]->ns;
2338 else
2339 return NULL;
2340 default:
2341 return NULL;
2342 }
2343}
2344
2345static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2346 unsigned int prio,
2347 int num_levels,
2348 enum fs_node_type type)
2349{
2350 struct fs_prio *fs_prio;
2351
2352 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2353 if (!fs_prio)
2354 return ERR_PTR(-ENOMEM);
2355
2356 fs_prio->node.type = type;
2357 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2358 tree_add_node(&fs_prio->node, &ns->node);
2359 fs_prio->num_levels = num_levels;
2360 fs_prio->prio = prio;
2361 list_add_tail(&fs_prio->node.list, &ns->node.children);
2362
2363 return fs_prio;
2364}
2365
2366static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2367 unsigned int prio,
2368 int num_levels)
2369{
2370 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2371}
2372
2373static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2374 unsigned int prio, int num_levels)
2375{
2376 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2377}
2378
2379static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2380 *ns)
2381{
2382 ns->node.type = FS_TYPE_NAMESPACE;
2383
2384 return ns;
2385}
2386
2387static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2388 int def_miss_act)
2389{
2390 struct mlx5_flow_namespace *ns;
2391
2392 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2393 if (!ns)
2394 return ERR_PTR(-ENOMEM);
2395
2396 fs_init_namespace(ns);
2397 ns->def_miss_action = def_miss_act;
2398 tree_init_node(&ns->node, NULL, del_sw_ns);
2399 tree_add_node(&ns->node, &prio->node);
2400 list_add_tail(&ns->node.list, &prio->node.children);
2401
2402 return ns;
2403}
2404
2405static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2406 struct init_tree_node *prio_metadata)
2407{
2408 struct fs_prio *fs_prio;
2409 int i;
2410
2411 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2412 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2413 if (IS_ERR(fs_prio))
2414 return PTR_ERR(fs_prio);
2415 }
2416 return 0;
2417}
2418
2419#define FLOW_TABLE_BIT_SZ 1
2420#define GET_FLOW_TABLE_CAP(dev, offset) \
2421 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2422 offset / 32)) >> \
2423 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2424static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2425{
2426 int i;
2427
2428 for (i = 0; i < caps->arr_sz; i++) {
2429 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2430 return false;
2431 }
2432 return true;
2433}
2434
2435static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2436 struct init_tree_node *init_node,
2437 struct fs_node *fs_parent_node,
2438 struct init_tree_node *init_parent_node,
2439 int prio)
2440{
2441 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2442 flow_table_properties_nic_receive.
2443 max_ft_level);
2444 struct mlx5_flow_namespace *fs_ns;
2445 struct fs_prio *fs_prio;
2446 struct fs_node *base;
2447 int i;
2448 int err;
2449
2450 if (init_node->type == FS_TYPE_PRIO) {
2451 if ((init_node->min_ft_level > max_ft_level) ||
2452 !has_required_caps(steering->dev, &init_node->caps))
2453 return 0;
2454
2455 fs_get_obj(fs_ns, fs_parent_node);
2456 if (init_node->num_leaf_prios)
2457 return create_leaf_prios(fs_ns, prio, init_node);
2458 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2459 if (IS_ERR(fs_prio))
2460 return PTR_ERR(fs_prio);
2461 base = &fs_prio->node;
2462 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2463 fs_get_obj(fs_prio, fs_parent_node);
2464 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2465 if (IS_ERR(fs_ns))
2466 return PTR_ERR(fs_ns);
2467 base = &fs_ns->node;
2468 } else {
2469 return -EINVAL;
2470 }
2471 prio = 0;
2472 for (i = 0; i < init_node->ar_size; i++) {
2473 err = init_root_tree_recursive(steering, &init_node->children[i],
2474 base, init_node, prio);
2475 if (err)
2476 return err;
2477 if (init_node->children[i].type == FS_TYPE_PRIO &&
2478 init_node->children[i].num_leaf_prios) {
2479 prio += init_node->children[i].num_leaf_prios;
2480 }
2481 }
2482
2483 return 0;
2484}
2485
2486static int init_root_tree(struct mlx5_flow_steering *steering,
2487 struct init_tree_node *init_node,
2488 struct fs_node *fs_parent_node)
2489{
2490 int err;
2491 int i;
2492
2493 for (i = 0; i < init_node->ar_size; i++) {
2494 err = init_root_tree_recursive(steering, &init_node->children[i],
2495 fs_parent_node,
2496 init_node, i);
2497 if (err)
2498 return err;
2499 }
2500 return 0;
2501}
2502
2503static void del_sw_root_ns(struct fs_node *node)
2504{
2505 struct mlx5_flow_root_namespace *root_ns;
2506 struct mlx5_flow_namespace *ns;
2507
2508 fs_get_obj(ns, node);
2509 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2510 mutex_destroy(&root_ns->chain_lock);
2511 kfree(node);
2512}
2513
2514static struct mlx5_flow_root_namespace
2515*create_root_ns(struct mlx5_flow_steering *steering,
2516 enum fs_flow_table_type table_type)
2517{
2518 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2519 struct mlx5_flow_root_namespace *root_ns;
2520 struct mlx5_flow_namespace *ns;
2521
2522 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2523 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2524 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2525
2526
2527 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2528 if (!root_ns)
2529 return NULL;
2530
2531 root_ns->dev = steering->dev;
2532 root_ns->table_type = table_type;
2533 root_ns->cmds = cmds;
2534
2535 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2536
2537 ns = &root_ns->ns;
2538 fs_init_namespace(ns);
2539 mutex_init(&root_ns->chain_lock);
2540 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2541 tree_add_node(&ns->node, NULL);
2542
2543 return root_ns;
2544}
2545
2546static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2547
2548static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2549{
2550 struct fs_prio *prio;
2551
2552 fs_for_each_prio(prio, ns) {
2553
2554 set_prio_attrs_in_prio(prio, acc_level);
2555 acc_level += prio->num_levels;
2556 }
2557 return acc_level;
2558}
2559
2560static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2561{
2562 struct mlx5_flow_namespace *ns;
2563 int acc_level_ns = acc_level;
2564
2565 prio->start_level = acc_level;
2566 fs_for_each_ns(ns, prio) {
2567
2568 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2569
2570
2571
2572
2573 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2574 acc_level = acc_level_ns;
2575 }
2576
2577 if (!prio->num_levels)
2578 prio->num_levels = acc_level_ns - prio->start_level;
2579 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2580}
2581
2582static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2583{
2584 struct mlx5_flow_namespace *ns = &root_ns->ns;
2585 struct fs_prio *prio;
2586 int start_level = 0;
2587
2588 fs_for_each_prio(prio, ns) {
2589 set_prio_attrs_in_prio(prio, start_level);
2590 start_level += prio->num_levels;
2591 }
2592}
2593
2594#define ANCHOR_PRIO 0
2595#define ANCHOR_SIZE 1
2596#define ANCHOR_LEVEL 0
2597static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2598{
2599 struct mlx5_flow_namespace *ns = NULL;
2600 struct mlx5_flow_table_attr ft_attr = {};
2601 struct mlx5_flow_table *ft;
2602
2603 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2604 if (WARN_ON(!ns))
2605 return -EINVAL;
2606
2607 ft_attr.max_fte = ANCHOR_SIZE;
2608 ft_attr.level = ANCHOR_LEVEL;
2609 ft_attr.prio = ANCHOR_PRIO;
2610
2611 ft = mlx5_create_flow_table(ns, &ft_attr);
2612 if (IS_ERR(ft)) {
2613 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2614 return PTR_ERR(ft);
2615 }
2616 return 0;
2617}
2618
2619static int init_root_ns(struct mlx5_flow_steering *steering)
2620{
2621 int err;
2622
2623 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2624 if (!steering->root_ns)
2625 return -ENOMEM;
2626
2627 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2628 if (err)
2629 goto out_err;
2630
2631 set_prio_attrs(steering->root_ns);
2632 err = create_anchor_flow_table(steering);
2633 if (err)
2634 goto out_err;
2635
2636 return 0;
2637
2638out_err:
2639 cleanup_root_ns(steering->root_ns);
2640 steering->root_ns = NULL;
2641 return err;
2642}
2643
2644static void clean_tree(struct fs_node *node)
2645{
2646 if (node) {
2647 struct fs_node *iter;
2648 struct fs_node *temp;
2649
2650 tree_get_node(node);
2651 list_for_each_entry_safe(iter, temp, &node->children, list)
2652 clean_tree(iter);
2653 tree_put_node(node, false);
2654 tree_remove_node(node, false);
2655 }
2656}
2657
2658static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2659{
2660 if (!root_ns)
2661 return;
2662
2663 clean_tree(&root_ns->ns.node);
2664}
2665
2666static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2667{
2668 struct fs_prio *prio;
2669
2670 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2671 if (!steering->sniffer_tx_root_ns)
2672 return -ENOMEM;
2673
2674
2675 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2676 return PTR_ERR_OR_ZERO(prio);
2677}
2678
2679static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2680{
2681 struct fs_prio *prio;
2682
2683 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2684 if (!steering->sniffer_rx_root_ns)
2685 return -ENOMEM;
2686
2687
2688 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2689 return PTR_ERR_OR_ZERO(prio);
2690}
2691
2692#define PORT_SEL_NUM_LEVELS 3
2693static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2694{
2695 struct fs_prio *prio;
2696
2697 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2698 if (!steering->port_sel_root_ns)
2699 return -ENOMEM;
2700
2701
2702 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2703 PORT_SEL_NUM_LEVELS);
2704 return PTR_ERR_OR_ZERO(prio);
2705}
2706
2707static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2708{
2709 int err;
2710
2711 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2712 if (!steering->rdma_rx_root_ns)
2713 return -ENOMEM;
2714
2715 err = init_root_tree(steering, &rdma_rx_root_fs,
2716 &steering->rdma_rx_root_ns->ns.node);
2717 if (err)
2718 goto out_err;
2719
2720 set_prio_attrs(steering->rdma_rx_root_ns);
2721
2722 return 0;
2723
2724out_err:
2725 cleanup_root_ns(steering->rdma_rx_root_ns);
2726 steering->rdma_rx_root_ns = NULL;
2727 return err;
2728}
2729
2730static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2731{
2732 int err;
2733
2734 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2735 if (!steering->rdma_tx_root_ns)
2736 return -ENOMEM;
2737
2738 err = init_root_tree(steering, &rdma_tx_root_fs,
2739 &steering->rdma_tx_root_ns->ns.node);
2740 if (err)
2741 goto out_err;
2742
2743 set_prio_attrs(steering->rdma_tx_root_ns);
2744
2745 return 0;
2746
2747out_err:
2748 cleanup_root_ns(steering->rdma_tx_root_ns);
2749 steering->rdma_tx_root_ns = NULL;
2750 return err;
2751}
2752
2753
2754
2755
2756
2757
2758static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2759 struct mlx5_flow_namespace *ns)
2760{
2761 int chain = 0;
2762
2763 while (steering->fdb_sub_ns[chain])
2764 ++chain;
2765
2766 steering->fdb_sub_ns[chain] = ns;
2767}
2768
2769static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2770 struct fs_prio *maj_prio)
2771{
2772 struct mlx5_flow_namespace *ns;
2773 struct fs_prio *min_prio;
2774 int prio;
2775
2776 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2777 if (IS_ERR(ns))
2778 return PTR_ERR(ns);
2779
2780 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2781 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2782 if (IS_ERR(min_prio))
2783 return PTR_ERR(min_prio);
2784 }
2785
2786 store_fdb_sub_ns_prio_chain(steering, ns);
2787
2788 return 0;
2789}
2790
2791static int create_fdb_chains(struct mlx5_flow_steering *steering,
2792 int fs_prio,
2793 int chains)
2794{
2795 struct fs_prio *maj_prio;
2796 int levels;
2797 int chain;
2798 int err;
2799
2800 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2801 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2802 fs_prio,
2803 levels);
2804 if (IS_ERR(maj_prio))
2805 return PTR_ERR(maj_prio);
2806
2807 for (chain = 0; chain < chains; chain++) {
2808 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2809 if (err)
2810 return err;
2811 }
2812
2813 return 0;
2814}
2815
2816static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2817{
2818 int err;
2819
2820 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2821 sizeof(*steering->fdb_sub_ns),
2822 GFP_KERNEL);
2823 if (!steering->fdb_sub_ns)
2824 return -ENOMEM;
2825
2826 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2827 if (err)
2828 return err;
2829
2830 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2831 if (err)
2832 return err;
2833
2834 return 0;
2835}
2836
2837static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2838{
2839 struct mlx5_flow_namespace *ns;
2840 struct fs_prio *prio;
2841 int i;
2842
2843 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2844 if (IS_ERR(prio))
2845 return PTR_ERR(prio);
2846
2847 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2848 if (IS_ERR(ns))
2849 return PTR_ERR(ns);
2850
2851 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2852 prio = fs_create_prio(ns, i, 1);
2853 if (IS_ERR(prio))
2854 return PTR_ERR(prio);
2855 }
2856 return 0;
2857}
2858
2859static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2860{
2861 struct fs_prio *maj_prio;
2862 int err;
2863
2864 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2865 if (!steering->fdb_root_ns)
2866 return -ENOMEM;
2867
2868 err = create_fdb_bypass(steering);
2869 if (err)
2870 goto out_err;
2871
2872 err = create_fdb_fast_path(steering);
2873 if (err)
2874 goto out_err;
2875
2876 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2877 if (IS_ERR(maj_prio)) {
2878 err = PTR_ERR(maj_prio);
2879 goto out_err;
2880 }
2881
2882 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2883 if (IS_ERR(maj_prio)) {
2884 err = PTR_ERR(maj_prio);
2885 goto out_err;
2886 }
2887
2888 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2889 if (IS_ERR(maj_prio)) {
2890 err = PTR_ERR(maj_prio);
2891 goto out_err;
2892 }
2893
2894
2895
2896
2897
2898
2899 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2900 if (IS_ERR(maj_prio)) {
2901 err = PTR_ERR(maj_prio);
2902 goto out_err;
2903 }
2904
2905 set_prio_attrs(steering->fdb_root_ns);
2906 return 0;
2907
2908out_err:
2909 cleanup_root_ns(steering->fdb_root_ns);
2910 kfree(steering->fdb_sub_ns);
2911 steering->fdb_sub_ns = NULL;
2912 steering->fdb_root_ns = NULL;
2913 return err;
2914}
2915
2916static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2917{
2918 struct fs_prio *prio;
2919
2920 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2921 if (!steering->esw_egress_root_ns[vport])
2922 return -ENOMEM;
2923
2924
2925 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2926 return PTR_ERR_OR_ZERO(prio);
2927}
2928
2929static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2930{
2931 struct fs_prio *prio;
2932
2933 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2934 if (!steering->esw_ingress_root_ns[vport])
2935 return -ENOMEM;
2936
2937
2938 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2939 return PTR_ERR_OR_ZERO(prio);
2940}
2941
2942int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2943{
2944 struct mlx5_flow_steering *steering = dev->priv.steering;
2945 int err;
2946 int i;
2947
2948 steering->esw_egress_root_ns =
2949 kcalloc(total_vports,
2950 sizeof(*steering->esw_egress_root_ns),
2951 GFP_KERNEL);
2952 if (!steering->esw_egress_root_ns)
2953 return -ENOMEM;
2954
2955 for (i = 0; i < total_vports; i++) {
2956 err = init_egress_acl_root_ns(steering, i);
2957 if (err)
2958 goto cleanup_root_ns;
2959 }
2960 steering->esw_egress_acl_vports = total_vports;
2961 return 0;
2962
2963cleanup_root_ns:
2964 for (i--; i >= 0; i--)
2965 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2966 kfree(steering->esw_egress_root_ns);
2967 steering->esw_egress_root_ns = NULL;
2968 return err;
2969}
2970
2971void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2972{
2973 struct mlx5_flow_steering *steering = dev->priv.steering;
2974 int i;
2975
2976 if (!steering->esw_egress_root_ns)
2977 return;
2978
2979 for (i = 0; i < steering->esw_egress_acl_vports; i++)
2980 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2981
2982 kfree(steering->esw_egress_root_ns);
2983 steering->esw_egress_root_ns = NULL;
2984}
2985
2986int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2987{
2988 struct mlx5_flow_steering *steering = dev->priv.steering;
2989 int err;
2990 int i;
2991
2992 steering->esw_ingress_root_ns =
2993 kcalloc(total_vports,
2994 sizeof(*steering->esw_ingress_root_ns),
2995 GFP_KERNEL);
2996 if (!steering->esw_ingress_root_ns)
2997 return -ENOMEM;
2998
2999 for (i = 0; i < total_vports; i++) {
3000 err = init_ingress_acl_root_ns(steering, i);
3001 if (err)
3002 goto cleanup_root_ns;
3003 }
3004 steering->esw_ingress_acl_vports = total_vports;
3005 return 0;
3006
3007cleanup_root_ns:
3008 for (i--; i >= 0; i--)
3009 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3010 kfree(steering->esw_ingress_root_ns);
3011 steering->esw_ingress_root_ns = NULL;
3012 return err;
3013}
3014
3015void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3016{
3017 struct mlx5_flow_steering *steering = dev->priv.steering;
3018 int i;
3019
3020 if (!steering->esw_ingress_root_ns)
3021 return;
3022
3023 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3024 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3025
3026 kfree(steering->esw_ingress_root_ns);
3027 steering->esw_ingress_root_ns = NULL;
3028}
3029
3030u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3031{
3032 struct mlx5_flow_root_namespace *root;
3033 struct mlx5_flow_namespace *ns;
3034
3035 ns = mlx5_get_flow_namespace(dev, type);
3036 if (!ns)
3037 return 0;
3038
3039 root = find_root(&ns->node);
3040 if (!root)
3041 return 0;
3042
3043 return root->cmds->get_capabilities(root, root->table_type);
3044}
3045
3046static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3047{
3048 int err;
3049
3050 steering->egress_root_ns = create_root_ns(steering,
3051 FS_FT_NIC_TX);
3052 if (!steering->egress_root_ns)
3053 return -ENOMEM;
3054
3055 err = init_root_tree(steering, &egress_root_fs,
3056 &steering->egress_root_ns->ns.node);
3057 if (err)
3058 goto cleanup;
3059 set_prio_attrs(steering->egress_root_ns);
3060 return 0;
3061cleanup:
3062 cleanup_root_ns(steering->egress_root_ns);
3063 steering->egress_root_ns = NULL;
3064 return err;
3065}
3066
3067void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3068{
3069 struct mlx5_flow_steering *steering = dev->priv.steering;
3070
3071 cleanup_root_ns(steering->root_ns);
3072 cleanup_root_ns(steering->fdb_root_ns);
3073 steering->fdb_root_ns = NULL;
3074 kfree(steering->fdb_sub_ns);
3075 steering->fdb_sub_ns = NULL;
3076 cleanup_root_ns(steering->port_sel_root_ns);
3077 cleanup_root_ns(steering->sniffer_rx_root_ns);
3078 cleanup_root_ns(steering->sniffer_tx_root_ns);
3079 cleanup_root_ns(steering->rdma_rx_root_ns);
3080 cleanup_root_ns(steering->rdma_tx_root_ns);
3081 cleanup_root_ns(steering->egress_root_ns);
3082}
3083
3084int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3085{
3086 struct mlx5_flow_steering *steering = dev->priv.steering;
3087 int err = 0;
3088
3089 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3090 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3091 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3092 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3093 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3094 err = init_root_ns(steering);
3095 if (err)
3096 goto err;
3097 }
3098
3099 if (MLX5_ESWITCH_MANAGER(dev)) {
3100 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3101 err = init_fdb_root_ns(steering);
3102 if (err)
3103 goto err;
3104 }
3105 }
3106
3107 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3108 err = init_sniffer_rx_root_ns(steering);
3109 if (err)
3110 goto err;
3111 }
3112
3113 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3114 err = init_sniffer_tx_root_ns(steering);
3115 if (err)
3116 goto err;
3117 }
3118
3119 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3120 err = init_port_sel_root_ns(steering);
3121 if (err)
3122 goto err;
3123 }
3124
3125 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3126 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3127 err = init_rdma_rx_root_ns(steering);
3128 if (err)
3129 goto err;
3130 }
3131
3132 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3133 err = init_rdma_tx_root_ns(steering);
3134 if (err)
3135 goto err;
3136 }
3137
3138 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3139 MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3140 err = init_egress_root_ns(steering);
3141 if (err)
3142 goto err;
3143 }
3144
3145 return 0;
3146
3147err:
3148 mlx5_fs_core_cleanup(dev);
3149 return err;
3150}
3151
3152void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3153{
3154 struct mlx5_flow_steering *steering = dev->priv.steering;
3155
3156 kmem_cache_destroy(steering->ftes_cache);
3157 kmem_cache_destroy(steering->fgs_cache);
3158 kfree(steering);
3159 mlx5_ft_pool_destroy(dev);
3160 mlx5_cleanup_fc_stats(dev);
3161}
3162
3163int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3164{
3165 struct mlx5_flow_steering *steering;
3166 int err = 0;
3167
3168 err = mlx5_init_fc_stats(dev);
3169 if (err)
3170 return err;
3171
3172 err = mlx5_ft_pool_init(dev);
3173 if (err)
3174 goto err;
3175
3176 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3177 if (!steering) {
3178 err = -ENOMEM;
3179 goto err;
3180 }
3181
3182 steering->dev = dev;
3183 dev->priv.steering = steering;
3184
3185 if (mlx5_fs_dr_is_supported(dev))
3186 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3187 else
3188 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3189
3190 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3191 sizeof(struct mlx5_flow_group), 0,
3192 0, NULL);
3193 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3194 0, NULL);
3195 if (!steering->ftes_cache || !steering->fgs_cache) {
3196 err = -ENOMEM;
3197 goto err;
3198 }
3199
3200 return 0;
3201
3202err:
3203 mlx5_fs_core_free(dev);
3204 return err;
3205}
3206
3207int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3208{
3209 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3210 struct mlx5_ft_underlay_qp *new_uqp;
3211 int err = 0;
3212
3213 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3214 if (!new_uqp)
3215 return -ENOMEM;
3216
3217 mutex_lock(&root->chain_lock);
3218
3219 if (!root->root_ft) {
3220 err = -EINVAL;
3221 goto update_ft_fail;
3222 }
3223
3224 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3225 false);
3226 if (err) {
3227 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3228 underlay_qpn, err);
3229 goto update_ft_fail;
3230 }
3231
3232 new_uqp->qpn = underlay_qpn;
3233 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3234
3235 mutex_unlock(&root->chain_lock);
3236
3237 return 0;
3238
3239update_ft_fail:
3240 mutex_unlock(&root->chain_lock);
3241 kfree(new_uqp);
3242 return err;
3243}
3244EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3245
3246int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3247{
3248 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3249 struct mlx5_ft_underlay_qp *uqp;
3250 bool found = false;
3251 int err = 0;
3252
3253 mutex_lock(&root->chain_lock);
3254 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3255 if (uqp->qpn == underlay_qpn) {
3256 found = true;
3257 break;
3258 }
3259 }
3260
3261 if (!found) {
3262 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3263 underlay_qpn);
3264 err = -EINVAL;
3265 goto out;
3266 }
3267
3268 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3269 true);
3270 if (err)
3271 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3272 underlay_qpn, err);
3273
3274 list_del(&uqp->list);
3275 mutex_unlock(&root->chain_lock);
3276 kfree(uqp);
3277
3278 return 0;
3279
3280out:
3281 mutex_unlock(&root->chain_lock);
3282 return err;
3283}
3284EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3285
3286static struct mlx5_flow_root_namespace
3287*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3288{
3289 struct mlx5_flow_namespace *ns;
3290
3291 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3292 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3293 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3294 else
3295 ns = mlx5_get_flow_namespace(dev, ns_type);
3296 if (!ns)
3297 return NULL;
3298
3299 return find_root(&ns->node);
3300}
3301
3302struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3303 u8 ns_type, u8 num_actions,
3304 void *modify_actions)
3305{
3306 struct mlx5_flow_root_namespace *root;
3307 struct mlx5_modify_hdr *modify_hdr;
3308 int err;
3309
3310 root = get_root_namespace(dev, ns_type);
3311 if (!root)
3312 return ERR_PTR(-EOPNOTSUPP);
3313
3314 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3315 if (!modify_hdr)
3316 return ERR_PTR(-ENOMEM);
3317
3318 modify_hdr->ns_type = ns_type;
3319 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3320 modify_actions, modify_hdr);
3321 if (err) {
3322 kfree(modify_hdr);
3323 return ERR_PTR(err);
3324 }
3325
3326 return modify_hdr;
3327}
3328EXPORT_SYMBOL(mlx5_modify_header_alloc);
3329
3330void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3331 struct mlx5_modify_hdr *modify_hdr)
3332{
3333 struct mlx5_flow_root_namespace *root;
3334
3335 root = get_root_namespace(dev, modify_hdr->ns_type);
3336 if (WARN_ON(!root))
3337 return;
3338 root->cmds->modify_header_dealloc(root, modify_hdr);
3339 kfree(modify_hdr);
3340}
3341EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3342
3343struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3344 struct mlx5_pkt_reformat_params *params,
3345 enum mlx5_flow_namespace_type ns_type)
3346{
3347 struct mlx5_pkt_reformat *pkt_reformat;
3348 struct mlx5_flow_root_namespace *root;
3349 int err;
3350
3351 root = get_root_namespace(dev, ns_type);
3352 if (!root)
3353 return ERR_PTR(-EOPNOTSUPP);
3354
3355 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3356 if (!pkt_reformat)
3357 return ERR_PTR(-ENOMEM);
3358
3359 pkt_reformat->ns_type = ns_type;
3360 pkt_reformat->reformat_type = params->type;
3361 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3362 pkt_reformat);
3363 if (err) {
3364 kfree(pkt_reformat);
3365 return ERR_PTR(err);
3366 }
3367
3368 return pkt_reformat;
3369}
3370EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3371
3372void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3373 struct mlx5_pkt_reformat *pkt_reformat)
3374{
3375 struct mlx5_flow_root_namespace *root;
3376
3377 root = get_root_namespace(dev, pkt_reformat->ns_type);
3378 if (WARN_ON(!root))
3379 return;
3380 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3381 kfree(pkt_reformat);
3382}
3383EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3384
3385int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3386{
3387 return definer->id;
3388}
3389
3390struct mlx5_flow_definer *
3391mlx5_create_match_definer(struct mlx5_core_dev *dev,
3392 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3393 u32 *match_mask)
3394{
3395 struct mlx5_flow_root_namespace *root;
3396 struct mlx5_flow_definer *definer;
3397 int id;
3398
3399 root = get_root_namespace(dev, ns_type);
3400 if (!root)
3401 return ERR_PTR(-EOPNOTSUPP);
3402
3403 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3404 if (!definer)
3405 return ERR_PTR(-ENOMEM);
3406
3407 definer->ns_type = ns_type;
3408 id = root->cmds->create_match_definer(root, format_id, match_mask);
3409 if (id < 0) {
3410 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3411 kfree(definer);
3412 return ERR_PTR(id);
3413 }
3414 definer->id = id;
3415 return definer;
3416}
3417
3418void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3419 struct mlx5_flow_definer *definer)
3420{
3421 struct mlx5_flow_root_namespace *root;
3422
3423 root = get_root_namespace(dev, definer->ns_type);
3424 if (WARN_ON(!root))
3425 return;
3426
3427 root->cmds->destroy_match_definer(root, definer->id);
3428 kfree(definer);
3429}
3430
3431int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3432 struct mlx5_flow_root_namespace *peer_ns)
3433{
3434 if (peer_ns && ns->mode != peer_ns->mode) {
3435 mlx5_core_err(ns->dev,
3436 "Can't peer namespace of different steering mode\n");
3437 return -EINVAL;
3438 }
3439
3440 return ns->cmds->set_peer(ns, peer_ns);
3441}
3442
3443
3444
3445
3446
3447int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3448 enum mlx5_flow_steering_mode mode)
3449{
3450 struct mlx5_flow_root_namespace *root;
3451 const struct mlx5_flow_cmds *cmds;
3452 int err;
3453
3454 root = find_root(&ns->node);
3455 if (&root->ns != ns)
3456
3457 return -EINVAL;
3458
3459 if (root->table_type != FS_FT_FDB)
3460 return -EOPNOTSUPP;
3461
3462 if (root->mode == mode)
3463 return 0;
3464
3465 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3466 cmds = mlx5_fs_cmd_get_dr_cmds();
3467 else
3468 cmds = mlx5_fs_cmd_get_fw_cmds();
3469 if (!cmds)
3470 return -EOPNOTSUPP;
3471
3472 err = cmds->create_ns(root);
3473 if (err) {
3474 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3475 err);
3476 return err;
3477 }
3478
3479 root->cmds->destroy_ns(root);
3480 root->cmds = cmds;
3481 root->mode = mode;
3482
3483 return 0;
3484}
3485