1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
46struct mlx5_flow_handle *
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
50{
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
55 void *misc;
56 int i = 0;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
61 flow_act.action = attr->action;
62
63 if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
64 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
65 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
66 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
67 flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
68 flow_act.vlan.vid = attr->vlan_vid;
69 flow_act.vlan.prio = attr->vlan_prio;
70 }
71
72 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
74 dest[i].vport_num = attr->out_rep->vport;
75 i++;
76 }
77 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
78 counter = mlx5_fc_create(esw->dev, true);
79 if (IS_ERR(counter)) {
80 rule = ERR_CAST(counter);
81 goto err_counter_alloc;
82 }
83 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
84 dest[i].counter = counter;
85 i++;
86 }
87
88 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
89 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
90
91 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
92 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
93
94 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
95 MLX5_MATCH_MISC_PARAMETERS;
96 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
97 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
98
99 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
100 flow_act.modify_id = attr->mod_hdr_id;
101
102 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
103 flow_act.encap_id = attr->encap_id;
104
105 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
106 spec, &flow_act, dest, i);
107 if (IS_ERR(rule))
108 goto err_add_rule;
109 else
110 esw->offloads.num_flows++;
111
112 return rule;
113
114err_add_rule:
115 mlx5_fc_destroy(esw->dev, counter);
116err_counter_alloc:
117 return rule;
118}
119
120void
121mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
122 struct mlx5_flow_handle *rule,
123 struct mlx5_esw_flow_attr *attr)
124{
125 struct mlx5_fc *counter = NULL;
126
127 counter = mlx5_flow_rule_counter(rule);
128 mlx5_del_flow_rules(rule);
129 mlx5_fc_destroy(esw->dev, counter);
130 esw->offloads.num_flows--;
131}
132
133static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
134{
135 struct mlx5_eswitch_rep *rep;
136 int vf_vport, err = 0;
137
138 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
139 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
140 rep = &esw->offloads.vport_reps[vf_vport];
141 if (!rep->rep_if[REP_ETH].valid)
142 continue;
143
144 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
145 if (err)
146 goto out;
147 }
148
149out:
150 return err;
151}
152
153static struct mlx5_eswitch_rep *
154esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
155{
156 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
157
158 in_rep = attr->in_rep;
159 out_rep = attr->out_rep;
160
161 if (push)
162 vport = in_rep;
163 else if (pop)
164 vport = out_rep;
165 else
166 vport = in_rep;
167
168 return vport;
169}
170
171static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
172 bool push, bool pop, bool fwd)
173{
174 struct mlx5_eswitch_rep *in_rep, *out_rep;
175
176 if ((push || pop) && !fwd)
177 goto out_notsupp;
178
179 in_rep = attr->in_rep;
180 out_rep = attr->out_rep;
181
182 if (push && in_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
186 goto out_notsupp;
187
188
189 if (!push && !pop && fwd)
190 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
191 goto out_notsupp;
192
193
194
195
196 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
197 goto out_notsupp;
198
199 return 0;
200
201out_notsupp:
202 return -EOPNOTSUPP;
203}
204
205int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
206 struct mlx5_esw_flow_attr *attr)
207{
208 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
209 struct mlx5_eswitch_rep *vport = NULL;
210 bool push, pop, fwd;
211 int err = 0;
212
213
214 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
215 return 0;
216
217 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
218 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
219 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
220
221 err = esw_add_vlan_action_check(attr, push, pop, fwd);
222 if (err)
223 return err;
224
225 attr->vlan_handled = false;
226
227 vport = esw_vlan_action_get_vport(attr, push, pop);
228
229 if (!push && !pop && fwd) {
230
231 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
232 vport->vlan_refcount++;
233 attr->vlan_handled = true;
234 }
235
236 return 0;
237 }
238
239 if (!push && !pop)
240 return 0;
241
242 if (!(offloads->vlan_push_pop_refcount)) {
243
244 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
245 if (err)
246 goto out;
247 }
248 offloads->vlan_push_pop_refcount++;
249
250 if (push) {
251 if (vport->vlan_refcount)
252 goto skip_set_push;
253
254 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
255 SET_VLAN_INSERT | SET_VLAN_STRIP);
256 if (err)
257 goto out;
258 vport->vlan = attr->vlan_vid;
259skip_set_push:
260 vport->vlan_refcount++;
261 }
262out:
263 if (!err)
264 attr->vlan_handled = true;
265 return err;
266}
267
268int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
269 struct mlx5_esw_flow_attr *attr)
270{
271 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
272 struct mlx5_eswitch_rep *vport = NULL;
273 bool push, pop, fwd;
274 int err = 0;
275
276
277 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
278 return 0;
279
280 if (!attr->vlan_handled)
281 return 0;
282
283 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
284 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
285 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
286
287 vport = esw_vlan_action_get_vport(attr, push, pop);
288
289 if (!push && !pop && fwd) {
290
291 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
292 vport->vlan_refcount--;
293
294 return 0;
295 }
296
297 if (push) {
298 vport->vlan_refcount--;
299 if (vport->vlan_refcount)
300 goto skip_unset_push;
301
302 vport->vlan = 0;
303 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
304 0, 0, SET_VLAN_STRIP);
305 if (err)
306 goto out;
307 }
308
309skip_unset_push:
310 offloads->vlan_push_pop_refcount--;
311 if (offloads->vlan_push_pop_refcount)
312 return 0;
313
314
315 err = esw_set_global_vlan_pop(esw, 0);
316
317out:
318 return err;
319}
320
321struct mlx5_flow_handle *
322mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
323{
324 struct mlx5_flow_act flow_act = {0};
325 struct mlx5_flow_destination dest = {};
326 struct mlx5_flow_handle *flow_rule;
327 struct mlx5_flow_spec *spec;
328 void *misc;
329
330 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
331 if (!spec) {
332 flow_rule = ERR_PTR(-ENOMEM);
333 goto out;
334 }
335
336 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
337 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
338 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0);
339
340 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
341 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
342 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
343
344 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
345 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
346 dest.vport_num = vport;
347 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
348
349 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
350 &flow_act, &dest, 1);
351 if (IS_ERR(flow_rule))
352 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
353out:
354 kvfree(spec);
355 return flow_rule;
356}
357EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
358
359void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
360{
361 mlx5_del_flow_rules(rule);
362}
363
364static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
365{
366 struct mlx5_flow_act flow_act = {0};
367 struct mlx5_flow_destination dest = {};
368 struct mlx5_flow_handle *flow_rule = NULL;
369 struct mlx5_flow_spec *spec;
370 int err = 0;
371
372 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
373 if (!spec) {
374 err = -ENOMEM;
375 goto out;
376 }
377
378 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
379 dest.vport_num = 0;
380 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
381
382 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
383 &flow_act, &dest, 1);
384 if (IS_ERR(flow_rule)) {
385 err = PTR_ERR(flow_rule);
386 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
387 goto out;
388 }
389
390 esw->fdb_table.offloads.miss_rule = flow_rule;
391out:
392 kvfree(spec);
393 return err;
394}
395
396#define ESW_OFFLOADS_NUM_GROUPS 4
397
398static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
399{
400 struct mlx5_core_dev *dev = esw->dev;
401 struct mlx5_flow_namespace *root_ns;
402 struct mlx5_flow_table *fdb = NULL;
403 int esw_size, err = 0;
404 u32 flags = 0;
405 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
406 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
407
408 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
409 if (!root_ns) {
410 esw_warn(dev, "Failed to get FDB flow namespace\n");
411 err = -EOPNOTSUPP;
412 goto out;
413 }
414
415 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
416 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
417 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
418
419 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
420 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
421
422 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
423 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
424
425 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
426 esw_size,
427 ESW_OFFLOADS_NUM_GROUPS, 0,
428 flags);
429 if (IS_ERR(fdb)) {
430 err = PTR_ERR(fdb);
431 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
432 goto out;
433 }
434 esw->fdb_table.fdb = fdb;
435
436out:
437 return err;
438}
439
440static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
441{
442 mlx5_destroy_flow_table(esw->fdb_table.fdb);
443}
444
445#define MAX_PF_SQ 256
446
447static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
448{
449 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
450 struct mlx5_flow_table_attr ft_attr = {};
451 struct mlx5_core_dev *dev = esw->dev;
452 struct mlx5_flow_namespace *root_ns;
453 struct mlx5_flow_table *fdb = NULL;
454 int table_size, ix, err = 0;
455 struct mlx5_flow_group *g;
456 void *match_criteria;
457 u32 *flow_group_in;
458
459 esw_debug(esw->dev, "Create offloads FDB Tables\n");
460 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
461 if (!flow_group_in)
462 return -ENOMEM;
463
464 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
465 if (!root_ns) {
466 esw_warn(dev, "Failed to get FDB flow namespace\n");
467 err = -EOPNOTSUPP;
468 goto ns_err;
469 }
470
471 err = esw_create_offloads_fast_fdb_table(esw);
472 if (err)
473 goto fast_fdb_err;
474
475 table_size = nvports + MAX_PF_SQ + 1;
476
477 ft_attr.max_fte = table_size;
478 ft_attr.prio = FDB_SLOW_PATH;
479
480 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
481 if (IS_ERR(fdb)) {
482 err = PTR_ERR(fdb);
483 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
484 goto slow_fdb_err;
485 }
486 esw->fdb_table.offloads.fdb = fdb;
487
488
489 memset(flow_group_in, 0, inlen);
490 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
491 MLX5_MATCH_MISC_PARAMETERS);
492
493 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
494
495 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
496 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
497
498 ix = nvports + MAX_PF_SQ;
499 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
500 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
501
502 g = mlx5_create_flow_group(fdb, flow_group_in);
503 if (IS_ERR(g)) {
504 err = PTR_ERR(g);
505 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
506 goto send_vport_err;
507 }
508 esw->fdb_table.offloads.send_to_vport_grp = g;
509
510
511 memset(flow_group_in, 0, inlen);
512 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
513
514 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
515 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
516
517 g = mlx5_create_flow_group(fdb, flow_group_in);
518 if (IS_ERR(g)) {
519 err = PTR_ERR(g);
520 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
521 goto miss_err;
522 }
523 esw->fdb_table.offloads.miss_grp = g;
524
525 err = esw_add_fdb_miss_rule(esw);
526 if (err)
527 goto miss_rule_err;
528
529 return 0;
530
531miss_rule_err:
532 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
533miss_err:
534 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
535send_vport_err:
536 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
537slow_fdb_err:
538 mlx5_destroy_flow_table(esw->fdb_table.fdb);
539fast_fdb_err:
540ns_err:
541 kvfree(flow_group_in);
542 return err;
543}
544
545static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
546{
547 if (!esw->fdb_table.fdb)
548 return;
549
550 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
551 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
552 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
553 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
554
555 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
556 esw_destroy_offloads_fast_fdb_table(esw);
557}
558
559static int esw_create_offloads_table(struct mlx5_eswitch *esw)
560{
561 struct mlx5_flow_table_attr ft_attr = {};
562 struct mlx5_core_dev *dev = esw->dev;
563 struct mlx5_flow_table *ft_offloads;
564 struct mlx5_flow_namespace *ns;
565 int err = 0;
566
567 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
568 if (!ns) {
569 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
570 return -EOPNOTSUPP;
571 }
572
573 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
574
575 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
576 if (IS_ERR(ft_offloads)) {
577 err = PTR_ERR(ft_offloads);
578 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
579 return err;
580 }
581
582 esw->offloads.ft_offloads = ft_offloads;
583 return 0;
584}
585
586static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
587{
588 struct mlx5_esw_offload *offloads = &esw->offloads;
589
590 mlx5_destroy_flow_table(offloads->ft_offloads);
591}
592
593static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
594{
595 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
596 struct mlx5_flow_group *g;
597 struct mlx5_priv *priv = &esw->dev->priv;
598 u32 *flow_group_in;
599 void *match_criteria, *misc;
600 int err = 0;
601 int nvports = priv->sriov.num_vfs + 2;
602
603 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
604 if (!flow_group_in)
605 return -ENOMEM;
606
607
608 memset(flow_group_in, 0, inlen);
609 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
610 MLX5_MATCH_MISC_PARAMETERS);
611
612 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
613 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
614 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
615
616 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
617 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
618
619 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
620
621 if (IS_ERR(g)) {
622 err = PTR_ERR(g);
623 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
624 goto out;
625 }
626
627 esw->offloads.vport_rx_group = g;
628out:
629 kfree(flow_group_in);
630 return err;
631}
632
633static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
634{
635 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
636}
637
638struct mlx5_flow_handle *
639mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
640{
641 struct mlx5_flow_act flow_act = {0};
642 struct mlx5_flow_destination dest = {};
643 struct mlx5_flow_handle *flow_rule;
644 struct mlx5_flow_spec *spec;
645 void *misc;
646
647 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
648 if (!spec) {
649 flow_rule = ERR_PTR(-ENOMEM);
650 goto out;
651 }
652
653 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
654 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
655
656 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
657 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
658
659 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
660 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
661 dest.tir_num = tirn;
662
663 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
664 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
665 &flow_act, &dest, 1);
666 if (IS_ERR(flow_rule)) {
667 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
668 goto out;
669 }
670
671out:
672 kvfree(spec);
673 return flow_rule;
674}
675
676static int esw_offloads_start(struct mlx5_eswitch *esw)
677{
678 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
679
680 if (esw->mode != SRIOV_LEGACY) {
681 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
682 return -EINVAL;
683 }
684
685 mlx5_eswitch_disable_sriov(esw);
686 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
687 if (err) {
688 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
689 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
690 if (err1)
691 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
692 }
693 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
694 if (mlx5_eswitch_inline_mode_get(esw,
695 num_vfs,
696 &esw->offloads.inline_mode)) {
697 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
698 esw_warn(esw->dev, "Inline mode is different between vports\n");
699 }
700 }
701 return err;
702}
703
704void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
705{
706 kfree(esw->offloads.vport_reps);
707}
708
709int esw_offloads_init_reps(struct mlx5_eswitch *esw)
710{
711 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
712 struct mlx5_core_dev *dev = esw->dev;
713 struct mlx5_esw_offload *offloads;
714 struct mlx5_eswitch_rep *rep;
715 u8 hw_id[ETH_ALEN];
716 int vport;
717
718 esw->offloads.vport_reps = kcalloc(total_vfs,
719 sizeof(struct mlx5_eswitch_rep),
720 GFP_KERNEL);
721 if (!esw->offloads.vport_reps)
722 return -ENOMEM;
723
724 offloads = &esw->offloads;
725 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
726
727 for (vport = 0; vport < total_vfs; vport++) {
728 rep = &offloads->vport_reps[vport];
729
730 rep->vport = vport;
731 ether_addr_copy(rep->hw_id, hw_id);
732 }
733
734 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
735
736 return 0;
737}
738
739static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
740 u8 rep_type)
741{
742 struct mlx5_eswitch_rep *rep;
743 int vport;
744
745 for (vport = nvports - 1; vport >= 0; vport--) {
746 rep = &esw->offloads.vport_reps[vport];
747 if (!rep->rep_if[rep_type].valid)
748 continue;
749
750 rep->rep_if[rep_type].unload(rep);
751 }
752}
753
754static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
755{
756 u8 rep_type = NUM_REP_TYPES;
757
758 while (rep_type-- > 0)
759 esw_offloads_unload_reps_type(esw, nvports, rep_type);
760}
761
762static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
763 u8 rep_type)
764{
765 struct mlx5_eswitch_rep *rep;
766 int vport;
767 int err;
768
769 for (vport = 0; vport < nvports; vport++) {
770 rep = &esw->offloads.vport_reps[vport];
771 if (!rep->rep_if[rep_type].valid)
772 continue;
773
774 err = rep->rep_if[rep_type].load(esw->dev, rep);
775 if (err)
776 goto err_reps;
777 }
778
779 return 0;
780
781err_reps:
782 esw_offloads_unload_reps_type(esw, vport, rep_type);
783 return err;
784}
785
786static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
787{
788 u8 rep_type = 0;
789 int err;
790
791 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
792 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
793 if (err)
794 goto err_reps;
795 }
796
797 return err;
798
799err_reps:
800 while (rep_type-- > 0)
801 esw_offloads_unload_reps_type(esw, nvports, rep_type);
802 return err;
803}
804
805int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
806{
807 int err;
808
809
810 mlx5_dev_list_lock();
811 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
812 mlx5_dev_list_unlock();
813
814 err = esw_create_offloads_fdb_tables(esw, nvports);
815 if (err)
816 goto create_fdb_err;
817
818 err = esw_create_offloads_table(esw);
819 if (err)
820 goto create_ft_err;
821
822 err = esw_create_vport_rx_group(esw);
823 if (err)
824 goto create_fg_err;
825
826 err = esw_offloads_load_reps(esw, nvports);
827 if (err)
828 goto err_reps;
829
830 return 0;
831
832err_reps:
833 esw_destroy_vport_rx_group(esw);
834
835create_fg_err:
836 esw_destroy_offloads_table(esw);
837
838create_ft_err:
839 esw_destroy_offloads_fdb_tables(esw);
840
841create_fdb_err:
842
843 mlx5_dev_list_lock();
844 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
845 mlx5_dev_list_unlock();
846
847 return err;
848}
849
850static int esw_offloads_stop(struct mlx5_eswitch *esw)
851{
852 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
853
854 mlx5_eswitch_disable_sriov(esw);
855 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
856 if (err) {
857 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
858 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
859 if (err1)
860 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
861 }
862
863
864 mlx5_dev_list_lock();
865 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
866 mlx5_dev_list_unlock();
867
868 return err;
869}
870
871void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
872{
873 esw_offloads_unload_reps(esw, nvports);
874 esw_destroy_vport_rx_group(esw);
875 esw_destroy_offloads_table(esw);
876 esw_destroy_offloads_fdb_tables(esw);
877}
878
879static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
880{
881 switch (mode) {
882 case DEVLINK_ESWITCH_MODE_LEGACY:
883 *mlx5_mode = SRIOV_LEGACY;
884 break;
885 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
886 *mlx5_mode = SRIOV_OFFLOADS;
887 break;
888 default:
889 return -EINVAL;
890 }
891
892 return 0;
893}
894
895static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
896{
897 switch (mlx5_mode) {
898 case SRIOV_LEGACY:
899 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
900 break;
901 case SRIOV_OFFLOADS:
902 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
903 break;
904 default:
905 return -EINVAL;
906 }
907
908 return 0;
909}
910
911static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
912{
913 switch (mode) {
914 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
915 *mlx5_mode = MLX5_INLINE_MODE_NONE;
916 break;
917 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
918 *mlx5_mode = MLX5_INLINE_MODE_L2;
919 break;
920 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
921 *mlx5_mode = MLX5_INLINE_MODE_IP;
922 break;
923 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
924 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
925 break;
926 default:
927 return -EINVAL;
928 }
929
930 return 0;
931}
932
933static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
934{
935 switch (mlx5_mode) {
936 case MLX5_INLINE_MODE_NONE:
937 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
938 break;
939 case MLX5_INLINE_MODE_L2:
940 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
941 break;
942 case MLX5_INLINE_MODE_IP:
943 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
944 break;
945 case MLX5_INLINE_MODE_TCP_UDP:
946 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
947 break;
948 default:
949 return -EINVAL;
950 }
951
952 return 0;
953}
954
955static int mlx5_devlink_eswitch_check(struct devlink *devlink)
956{
957 struct mlx5_core_dev *dev = devlink_priv(devlink);
958
959 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
960 return -EOPNOTSUPP;
961
962 if(!MLX5_ESWITCH_MANAGER(dev))
963 return -EPERM;
964
965 if (dev->priv.eswitch->mode == SRIOV_NONE)
966 return -EOPNOTSUPP;
967
968 return 0;
969}
970
971int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
972{
973 struct mlx5_core_dev *dev = devlink_priv(devlink);
974 u16 cur_mlx5_mode, mlx5_mode = 0;
975 int err;
976
977 err = mlx5_devlink_eswitch_check(devlink);
978 if (err)
979 return err;
980
981 cur_mlx5_mode = dev->priv.eswitch->mode;
982
983 if (esw_mode_from_devlink(mode, &mlx5_mode))
984 return -EINVAL;
985
986 if (cur_mlx5_mode == mlx5_mode)
987 return 0;
988
989 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
990 return esw_offloads_start(dev->priv.eswitch);
991 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
992 return esw_offloads_stop(dev->priv.eswitch);
993 else
994 return -EINVAL;
995}
996
997int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
998{
999 struct mlx5_core_dev *dev = devlink_priv(devlink);
1000 int err;
1001
1002 err = mlx5_devlink_eswitch_check(devlink);
1003 if (err)
1004 return err;
1005
1006 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1007}
1008
1009int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1010{
1011 struct mlx5_core_dev *dev = devlink_priv(devlink);
1012 struct mlx5_eswitch *esw = dev->priv.eswitch;
1013 int err, vport;
1014 u8 mlx5_mode;
1015
1016 err = mlx5_devlink_eswitch_check(devlink);
1017 if (err)
1018 return err;
1019
1020 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1021 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1022 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1023 return 0;
1024
1025 case MLX5_CAP_INLINE_MODE_L2:
1026 esw_warn(dev, "Inline mode can't be set\n");
1027 return -EOPNOTSUPP;
1028 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1029 break;
1030 }
1031
1032 if (esw->offloads.num_flows > 0) {
1033 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1034 return -EOPNOTSUPP;
1035 }
1036
1037 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1038 if (err)
1039 goto out;
1040
1041 for (vport = 1; vport < esw->enabled_vports; vport++) {
1042 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1043 if (err) {
1044 esw_warn(dev, "Failed to set min inline on vport %d\n",
1045 vport);
1046 goto revert_inline_mode;
1047 }
1048 }
1049
1050 esw->offloads.inline_mode = mlx5_mode;
1051 return 0;
1052
1053revert_inline_mode:
1054 while (--vport > 0)
1055 mlx5_modify_nic_vport_min_inline(dev,
1056 vport,
1057 esw->offloads.inline_mode);
1058out:
1059 return err;
1060}
1061
1062int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1063{
1064 struct mlx5_core_dev *dev = devlink_priv(devlink);
1065 struct mlx5_eswitch *esw = dev->priv.eswitch;
1066 int err;
1067
1068 err = mlx5_devlink_eswitch_check(devlink);
1069 if (err)
1070 return err;
1071
1072 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1073}
1074
1075int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1076{
1077 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1078 struct mlx5_core_dev *dev = esw->dev;
1079 int vport;
1080
1081 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1082 return -EOPNOTSUPP;
1083
1084 if (esw->mode == SRIOV_NONE)
1085 return -EOPNOTSUPP;
1086
1087 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1088 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1089 mlx5_mode = MLX5_INLINE_MODE_NONE;
1090 goto out;
1091 case MLX5_CAP_INLINE_MODE_L2:
1092 mlx5_mode = MLX5_INLINE_MODE_L2;
1093 goto out;
1094 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1095 goto query_vports;
1096 }
1097
1098query_vports:
1099 for (vport = 1; vport <= nvfs; vport++) {
1100 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1101 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1102 return -EINVAL;
1103 prev_mlx5_mode = mlx5_mode;
1104 }
1105
1106out:
1107 *mode = mlx5_mode;
1108 return 0;
1109}
1110
1111int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1112{
1113 struct mlx5_core_dev *dev = devlink_priv(devlink);
1114 struct mlx5_eswitch *esw = dev->priv.eswitch;
1115 int err;
1116
1117 err = mlx5_devlink_eswitch_check(devlink);
1118 if (err)
1119 return err;
1120
1121 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1122 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1123 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1124 return -EOPNOTSUPP;
1125
1126 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1127 return -EOPNOTSUPP;
1128
1129 if (esw->mode == SRIOV_LEGACY) {
1130 esw->offloads.encap = encap;
1131 return 0;
1132 }
1133
1134 if (esw->offloads.encap == encap)
1135 return 0;
1136
1137 if (esw->offloads.num_flows > 0) {
1138 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1139 return -EOPNOTSUPP;
1140 }
1141
1142 esw_destroy_offloads_fast_fdb_table(esw);
1143
1144 esw->offloads.encap = encap;
1145 err = esw_create_offloads_fast_fdb_table(esw);
1146 if (err) {
1147 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1148 esw->offloads.encap = !encap;
1149 (void)esw_create_offloads_fast_fdb_table(esw);
1150 }
1151 return err;
1152}
1153
1154int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1155{
1156 struct mlx5_core_dev *dev = devlink_priv(devlink);
1157 struct mlx5_eswitch *esw = dev->priv.eswitch;
1158 int err;
1159
1160 err = mlx5_devlink_eswitch_check(devlink);
1161 if (err)
1162 return err;
1163
1164 *encap = esw->offloads.encap;
1165 return 0;
1166}
1167
1168void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1169 int vport_index,
1170 struct mlx5_eswitch_rep_if *__rep_if,
1171 u8 rep_type)
1172{
1173 struct mlx5_esw_offload *offloads = &esw->offloads;
1174 struct mlx5_eswitch_rep_if *rep_if;
1175
1176 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1177
1178 rep_if->load = __rep_if->load;
1179 rep_if->unload = __rep_if->unload;
1180 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1181 rep_if->priv = __rep_if->priv;
1182
1183 rep_if->valid = true;
1184}
1185EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1186
1187void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1188 int vport_index, u8 rep_type)
1189{
1190 struct mlx5_esw_offload *offloads = &esw->offloads;
1191 struct mlx5_eswitch_rep *rep;
1192
1193 rep = &offloads->vport_reps[vport_index];
1194
1195 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1196 rep->rep_if[rep_type].unload(rep);
1197
1198 rep->rep_if[rep_type].valid = false;
1199}
1200EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1201
1202void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1203{
1204#define UPLINK_REP_INDEX 0
1205 struct mlx5_esw_offload *offloads = &esw->offloads;
1206 struct mlx5_eswitch_rep *rep;
1207
1208 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1209 return rep->rep_if[rep_type].priv;
1210}
1211
1212void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1213 int vport,
1214 u8 rep_type)
1215{
1216 struct mlx5_esw_offload *offloads = &esw->offloads;
1217 struct mlx5_eswitch_rep *rep;
1218
1219 if (vport == FDB_UPLINK_VPORT)
1220 vport = UPLINK_REP_INDEX;
1221
1222 rep = &offloads->vport_reps[vport];
1223
1224 if (rep->rep_if[rep_type].valid &&
1225 rep->rep_if[rep_type].get_proto_dev)
1226 return rep->rep_if[rep_type].get_proto_dev(rep);
1227 return NULL;
1228}
1229EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1230
1231void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1232{
1233 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1234}
1235EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1236
1237struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1238 int vport)
1239{
1240 return &esw->offloads.vport_reps[vport];
1241}
1242EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
1243