1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
46struct mlx5_flow_handle *
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
50{
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
55 void *misc;
56 int i = 0;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
61
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
63
64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
70 counter = mlx5_fc_create(esw->dev, true);
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
90
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
95 flow_act.encap_id = attr->encap_id;
96
97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
98 spec, &flow_act, dest, i);
99 if (IS_ERR(rule))
100 goto err_add_rule;
101 else
102 esw->offloads.num_flows++;
103
104 return rule;
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
110}
111
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
123}
124
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185
186
187
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
194 return -EOPNOTSUPP;
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
305static struct mlx5_flow_handle *
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
308 struct mlx5_flow_act flow_act = {0};
309 struct mlx5_flow_destination dest = {};
310 struct mlx5_flow_handle *flow_rule;
311 struct mlx5_flow_spec *spec;
312 void *misc;
313
314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
315 if (!spec) {
316 flow_rule = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0);
323
324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327
328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
332
333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
334 &flow_act, &dest, 1);
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
337out:
338 kvfree(spec);
339 return flow_rule;
340}
341
342void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
343 struct mlx5_eswitch_rep *rep)
344{
345 struct mlx5_esw_sq *esw_sq, *tmp;
346
347 if (esw->mode != SRIOV_OFFLOADS)
348 return;
349
350 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
351 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
352 list_del(&esw_sq->list);
353 kfree(esw_sq);
354 }
355}
356
357int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
358 struct mlx5_eswitch_rep *rep,
359 u16 *sqns_array, int sqns_num)
360{
361 struct mlx5_flow_handle *flow_rule;
362 struct mlx5_esw_sq *esw_sq;
363 int err;
364 int i;
365
366 if (esw->mode != SRIOV_OFFLOADS)
367 return 0;
368
369 for (i = 0; i < sqns_num; i++) {
370 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
371 if (!esw_sq) {
372 err = -ENOMEM;
373 goto out_err;
374 }
375
376
377 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
378 rep->vport,
379 sqns_array[i]);
380 if (IS_ERR(flow_rule)) {
381 err = PTR_ERR(flow_rule);
382 kfree(esw_sq);
383 goto out_err;
384 }
385 esw_sq->send_to_vport_rule = flow_rule;
386 list_add(&esw_sq->list, &rep->vport_sqs_list);
387 }
388 return 0;
389
390out_err:
391 mlx5_eswitch_sqs2vport_stop(esw, rep);
392 return err;
393}
394
395static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
396{
397 struct mlx5_flow_act flow_act = {0};
398 struct mlx5_flow_destination dest = {};
399 struct mlx5_flow_handle *flow_rule = NULL;
400 struct mlx5_flow_spec *spec;
401 int err = 0;
402
403 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
404 if (!spec) {
405 err = -ENOMEM;
406 goto out;
407 }
408
409 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
410 dest.vport_num = 0;
411 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
412
413 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
414 &flow_act, &dest, 1);
415 if (IS_ERR(flow_rule)) {
416 err = PTR_ERR(flow_rule);
417 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
418 goto out;
419 }
420
421 esw->fdb_table.offloads.miss_rule = flow_rule;
422out:
423 kvfree(spec);
424 return err;
425}
426
427#define ESW_OFFLOADS_NUM_GROUPS 4
428
429static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
430{
431 struct mlx5_core_dev *dev = esw->dev;
432 struct mlx5_flow_namespace *root_ns;
433 struct mlx5_flow_table *fdb = NULL;
434 int esw_size, err = 0;
435 u32 flags = 0;
436 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
437 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
438
439 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
440 if (!root_ns) {
441 esw_warn(dev, "Failed to get FDB flow namespace\n");
442 err = -EOPNOTSUPP;
443 goto out;
444 }
445
446 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
447 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
448 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
449
450 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
451 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
452
453 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
454 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
455
456 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
457 esw_size,
458 ESW_OFFLOADS_NUM_GROUPS, 0,
459 flags);
460 if (IS_ERR(fdb)) {
461 err = PTR_ERR(fdb);
462 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
463 goto out;
464 }
465 esw->fdb_table.fdb = fdb;
466
467out:
468 return err;
469}
470
471static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
472{
473 mlx5_destroy_flow_table(esw->fdb_table.fdb);
474}
475
476#define MAX_PF_SQ 256
477
478static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
479{
480 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
481 struct mlx5_flow_table_attr ft_attr = {};
482 struct mlx5_core_dev *dev = esw->dev;
483 struct mlx5_flow_namespace *root_ns;
484 struct mlx5_flow_table *fdb = NULL;
485 int table_size, ix, err = 0;
486 struct mlx5_flow_group *g;
487 void *match_criteria;
488 u32 *flow_group_in;
489
490 esw_debug(esw->dev, "Create offloads FDB Tables\n");
491 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
492 if (!flow_group_in)
493 return -ENOMEM;
494
495 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
496 if (!root_ns) {
497 esw_warn(dev, "Failed to get FDB flow namespace\n");
498 err = -EOPNOTSUPP;
499 goto ns_err;
500 }
501
502 err = esw_create_offloads_fast_fdb_table(esw);
503 if (err)
504 goto fast_fdb_err;
505
506 table_size = nvports + MAX_PF_SQ + 1;
507
508 ft_attr.max_fte = table_size;
509 ft_attr.prio = FDB_SLOW_PATH;
510
511 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
512 if (IS_ERR(fdb)) {
513 err = PTR_ERR(fdb);
514 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
515 goto slow_fdb_err;
516 }
517 esw->fdb_table.offloads.fdb = fdb;
518
519
520 memset(flow_group_in, 0, inlen);
521 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
522 MLX5_MATCH_MISC_PARAMETERS);
523
524 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
525
526 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
527 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
528
529 ix = nvports + MAX_PF_SQ;
530 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
531 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
532
533 g = mlx5_create_flow_group(fdb, flow_group_in);
534 if (IS_ERR(g)) {
535 err = PTR_ERR(g);
536 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
537 goto send_vport_err;
538 }
539 esw->fdb_table.offloads.send_to_vport_grp = g;
540
541
542 memset(flow_group_in, 0, inlen);
543 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
544
545 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
546 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
547
548 g = mlx5_create_flow_group(fdb, flow_group_in);
549 if (IS_ERR(g)) {
550 err = PTR_ERR(g);
551 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
552 goto miss_err;
553 }
554 esw->fdb_table.offloads.miss_grp = g;
555
556 err = esw_add_fdb_miss_rule(esw);
557 if (err)
558 goto miss_rule_err;
559
560 return 0;
561
562miss_rule_err:
563 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
564miss_err:
565 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
566send_vport_err:
567 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
568slow_fdb_err:
569 mlx5_destroy_flow_table(esw->fdb_table.fdb);
570fast_fdb_err:
571ns_err:
572 kvfree(flow_group_in);
573 return err;
574}
575
576static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
577{
578 if (!esw->fdb_table.fdb)
579 return;
580
581 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
582 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
583 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
584 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
585
586 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
587 esw_destroy_offloads_fast_fdb_table(esw);
588}
589
590static int esw_create_offloads_table(struct mlx5_eswitch *esw)
591{
592 struct mlx5_flow_table_attr ft_attr = {};
593 struct mlx5_core_dev *dev = esw->dev;
594 struct mlx5_flow_table *ft_offloads;
595 struct mlx5_flow_namespace *ns;
596 int err = 0;
597
598 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
599 if (!ns) {
600 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
601 return -EOPNOTSUPP;
602 }
603
604 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
605
606 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
607 if (IS_ERR(ft_offloads)) {
608 err = PTR_ERR(ft_offloads);
609 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
610 return err;
611 }
612
613 esw->offloads.ft_offloads = ft_offloads;
614 return 0;
615}
616
617static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
618{
619 struct mlx5_esw_offload *offloads = &esw->offloads;
620
621 mlx5_destroy_flow_table(offloads->ft_offloads);
622}
623
624static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
625{
626 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
627 struct mlx5_flow_group *g;
628 struct mlx5_priv *priv = &esw->dev->priv;
629 u32 *flow_group_in;
630 void *match_criteria, *misc;
631 int err = 0;
632 int nvports = priv->sriov.num_vfs + 2;
633
634 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
635 if (!flow_group_in)
636 return -ENOMEM;
637
638
639 memset(flow_group_in, 0, inlen);
640 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
641 MLX5_MATCH_MISC_PARAMETERS);
642
643 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
644 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
645 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
646
647 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
648 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
649
650 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
651
652 if (IS_ERR(g)) {
653 err = PTR_ERR(g);
654 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
655 goto out;
656 }
657
658 esw->offloads.vport_rx_group = g;
659out:
660 kfree(flow_group_in);
661 return err;
662}
663
664static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
665{
666 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
667}
668
669struct mlx5_flow_handle *
670mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
671{
672 struct mlx5_flow_act flow_act = {0};
673 struct mlx5_flow_destination dest = {};
674 struct mlx5_flow_handle *flow_rule;
675 struct mlx5_flow_spec *spec;
676 void *misc;
677
678 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
679 if (!spec) {
680 flow_rule = ERR_PTR(-ENOMEM);
681 goto out;
682 }
683
684 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
685 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
686
687 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
688 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
689
690 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
691 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
692 dest.tir_num = tirn;
693
694 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
695 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
696 &flow_act, &dest, 1);
697 if (IS_ERR(flow_rule)) {
698 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
699 goto out;
700 }
701
702out:
703 kvfree(spec);
704 return flow_rule;
705}
706
707static int esw_offloads_start(struct mlx5_eswitch *esw)
708{
709 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
710
711 if (esw->mode != SRIOV_LEGACY) {
712 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
713 return -EINVAL;
714 }
715
716 mlx5_eswitch_disable_sriov(esw);
717 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
718 if (err) {
719 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
720 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
721 if (err1)
722 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
723 }
724 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
725 if (mlx5_eswitch_inline_mode_get(esw,
726 num_vfs,
727 &esw->offloads.inline_mode)) {
728 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
729 esw_warn(esw->dev, "Inline mode is different between vports\n");
730 }
731 }
732 return err;
733}
734
735int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
736{
737 struct mlx5_eswitch_rep *rep;
738 int vport;
739 int err;
740
741
742 mlx5_dev_list_lock();
743 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
744 mlx5_dev_list_unlock();
745
746 err = esw_create_offloads_fdb_tables(esw, nvports);
747 if (err)
748 goto create_fdb_err;
749
750 err = esw_create_offloads_table(esw);
751 if (err)
752 goto create_ft_err;
753
754 err = esw_create_vport_rx_group(esw);
755 if (err)
756 goto create_fg_err;
757
758 for (vport = 0; vport < nvports; vport++) {
759 rep = &esw->offloads.vport_reps[vport];
760 if (!rep->valid)
761 continue;
762
763 err = rep->load(esw, rep);
764 if (err)
765 goto err_reps;
766 }
767
768 return 0;
769
770err_reps:
771 for (vport--; vport >= 0; vport--) {
772 rep = &esw->offloads.vport_reps[vport];
773 if (!rep->valid)
774 continue;
775 rep->unload(esw, rep);
776 }
777 esw_destroy_vport_rx_group(esw);
778
779create_fg_err:
780 esw_destroy_offloads_table(esw);
781
782create_ft_err:
783 esw_destroy_offloads_fdb_tables(esw);
784
785create_fdb_err:
786
787 mlx5_dev_list_lock();
788 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
789 mlx5_dev_list_unlock();
790
791 return err;
792}
793
794static int esw_offloads_stop(struct mlx5_eswitch *esw)
795{
796 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
797
798 mlx5_eswitch_disable_sriov(esw);
799 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
800 if (err) {
801 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
802 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
803 if (err1)
804 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
805 }
806
807
808 mlx5_dev_list_lock();
809 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
810 mlx5_dev_list_unlock();
811
812 return err;
813}
814
815void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
816{
817 struct mlx5_eswitch_rep *rep;
818 int vport;
819
820 for (vport = nvports - 1; vport >= 0; vport--) {
821 rep = &esw->offloads.vport_reps[vport];
822 if (!rep->valid)
823 continue;
824 rep->unload(esw, rep);
825 }
826
827 esw_destroy_vport_rx_group(esw);
828 esw_destroy_offloads_table(esw);
829 esw_destroy_offloads_fdb_tables(esw);
830}
831
832static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
833{
834 switch (mode) {
835 case DEVLINK_ESWITCH_MODE_LEGACY:
836 *mlx5_mode = SRIOV_LEGACY;
837 break;
838 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
839 *mlx5_mode = SRIOV_OFFLOADS;
840 break;
841 default:
842 return -EINVAL;
843 }
844
845 return 0;
846}
847
848static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
849{
850 switch (mlx5_mode) {
851 case SRIOV_LEGACY:
852 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
853 break;
854 case SRIOV_OFFLOADS:
855 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
856 break;
857 default:
858 return -EINVAL;
859 }
860
861 return 0;
862}
863
864static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
865{
866 switch (mode) {
867 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
868 *mlx5_mode = MLX5_INLINE_MODE_NONE;
869 break;
870 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
871 *mlx5_mode = MLX5_INLINE_MODE_L2;
872 break;
873 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
874 *mlx5_mode = MLX5_INLINE_MODE_IP;
875 break;
876 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
877 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
878 break;
879 default:
880 return -EINVAL;
881 }
882
883 return 0;
884}
885
886static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
887{
888 switch (mlx5_mode) {
889 case MLX5_INLINE_MODE_NONE:
890 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
891 break;
892 case MLX5_INLINE_MODE_L2:
893 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
894 break;
895 case MLX5_INLINE_MODE_IP:
896 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
897 break;
898 case MLX5_INLINE_MODE_TCP_UDP:
899 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
900 break;
901 default:
902 return -EINVAL;
903 }
904
905 return 0;
906}
907
908static int mlx5_devlink_eswitch_check(struct devlink *devlink)
909{
910 struct mlx5_core_dev *dev = devlink_priv(devlink);
911
912 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
913 return -EOPNOTSUPP;
914
915 if (!MLX5_CAP_GEN(dev, vport_group_manager))
916 return -EOPNOTSUPP;
917
918 if (dev->priv.eswitch->mode == SRIOV_NONE)
919 return -EOPNOTSUPP;
920
921 return 0;
922}
923
924int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
925{
926 struct mlx5_core_dev *dev = devlink_priv(devlink);
927 u16 cur_mlx5_mode, mlx5_mode = 0;
928 int err;
929
930 err = mlx5_devlink_eswitch_check(devlink);
931 if (err)
932 return err;
933
934 cur_mlx5_mode = dev->priv.eswitch->mode;
935
936 if (esw_mode_from_devlink(mode, &mlx5_mode))
937 return -EINVAL;
938
939 if (cur_mlx5_mode == mlx5_mode)
940 return 0;
941
942 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
943 return esw_offloads_start(dev->priv.eswitch);
944 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
945 return esw_offloads_stop(dev->priv.eswitch);
946 else
947 return -EINVAL;
948}
949
950int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
951{
952 struct mlx5_core_dev *dev = devlink_priv(devlink);
953 int err;
954
955 err = mlx5_devlink_eswitch_check(devlink);
956 if (err)
957 return err;
958
959 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
960}
961
962int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
963{
964 struct mlx5_core_dev *dev = devlink_priv(devlink);
965 struct mlx5_eswitch *esw = dev->priv.eswitch;
966 int err, vport;
967 u8 mlx5_mode;
968
969 err = mlx5_devlink_eswitch_check(devlink);
970 if (err)
971 return err;
972
973 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
974 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
975 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
976 return 0;
977
978 case MLX5_CAP_INLINE_MODE_L2:
979 esw_warn(dev, "Inline mode can't be set\n");
980 return -EOPNOTSUPP;
981 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
982 break;
983 }
984
985 if (esw->offloads.num_flows > 0) {
986 esw_warn(dev, "Can't set inline mode when flows are configured\n");
987 return -EOPNOTSUPP;
988 }
989
990 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
991 if (err)
992 goto out;
993
994 for (vport = 1; vport < esw->enabled_vports; vport++) {
995 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
996 if (err) {
997 esw_warn(dev, "Failed to set min inline on vport %d\n",
998 vport);
999 goto revert_inline_mode;
1000 }
1001 }
1002
1003 esw->offloads.inline_mode = mlx5_mode;
1004 return 0;
1005
1006revert_inline_mode:
1007 while (--vport > 0)
1008 mlx5_modify_nic_vport_min_inline(dev,
1009 vport,
1010 esw->offloads.inline_mode);
1011out:
1012 return err;
1013}
1014
1015int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1016{
1017 struct mlx5_core_dev *dev = devlink_priv(devlink);
1018 struct mlx5_eswitch *esw = dev->priv.eswitch;
1019 int err;
1020
1021 err = mlx5_devlink_eswitch_check(devlink);
1022 if (err)
1023 return err;
1024
1025 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1026}
1027
1028int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1029{
1030 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1031 struct mlx5_core_dev *dev = esw->dev;
1032 int vport;
1033
1034 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1035 return -EOPNOTSUPP;
1036
1037 if (esw->mode == SRIOV_NONE)
1038 return -EOPNOTSUPP;
1039
1040 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1041 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1042 mlx5_mode = MLX5_INLINE_MODE_NONE;
1043 goto out;
1044 case MLX5_CAP_INLINE_MODE_L2:
1045 mlx5_mode = MLX5_INLINE_MODE_L2;
1046 goto out;
1047 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1048 goto query_vports;
1049 }
1050
1051query_vports:
1052 for (vport = 1; vport <= nvfs; vport++) {
1053 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1054 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1055 return -EINVAL;
1056 prev_mlx5_mode = mlx5_mode;
1057 }
1058
1059out:
1060 *mode = mlx5_mode;
1061 return 0;
1062}
1063
1064int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1065{
1066 struct mlx5_core_dev *dev = devlink_priv(devlink);
1067 struct mlx5_eswitch *esw = dev->priv.eswitch;
1068 int err;
1069
1070 err = mlx5_devlink_eswitch_check(devlink);
1071 if (err)
1072 return err;
1073
1074 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1075 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1076 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1077 return -EOPNOTSUPP;
1078
1079 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1080 return -EOPNOTSUPP;
1081
1082 if (esw->mode == SRIOV_LEGACY) {
1083 esw->offloads.encap = encap;
1084 return 0;
1085 }
1086
1087 if (esw->offloads.encap == encap)
1088 return 0;
1089
1090 if (esw->offloads.num_flows > 0) {
1091 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1092 return -EOPNOTSUPP;
1093 }
1094
1095 esw_destroy_offloads_fast_fdb_table(esw);
1096
1097 esw->offloads.encap = encap;
1098 err = esw_create_offloads_fast_fdb_table(esw);
1099 if (err) {
1100 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1101 esw->offloads.encap = !encap;
1102 (void)esw_create_offloads_fast_fdb_table(esw);
1103 }
1104 return err;
1105}
1106
1107int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1108{
1109 struct mlx5_core_dev *dev = devlink_priv(devlink);
1110 struct mlx5_eswitch *esw = dev->priv.eswitch;
1111 int err;
1112
1113 err = mlx5_devlink_eswitch_check(devlink);
1114 if (err)
1115 return err;
1116
1117 *encap = esw->offloads.encap;
1118 return 0;
1119}
1120
1121void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1122 int vport_index,
1123 struct mlx5_eswitch_rep *__rep)
1124{
1125 struct mlx5_esw_offload *offloads = &esw->offloads;
1126 struct mlx5_eswitch_rep *rep;
1127
1128 rep = &offloads->vport_reps[vport_index];
1129
1130 memset(rep, 0, sizeof(*rep));
1131
1132 rep->load = __rep->load;
1133 rep->unload = __rep->unload;
1134 rep->vport = __rep->vport;
1135 rep->netdev = __rep->netdev;
1136 ether_addr_copy(rep->hw_id, __rep->hw_id);
1137
1138 INIT_LIST_HEAD(&rep->vport_sqs_list);
1139 rep->valid = true;
1140}
1141
1142void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1143 int vport_index)
1144{
1145 struct mlx5_esw_offload *offloads = &esw->offloads;
1146 struct mlx5_eswitch_rep *rep;
1147
1148 rep = &offloads->vport_reps[vport_index];
1149
1150 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1151 rep->unload(esw, rep);
1152
1153 rep->valid = false;
1154}
1155
1156struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1157{
1158#define UPLINK_REP_INDEX 0
1159 struct mlx5_esw_offload *offloads = &esw->offloads;
1160 struct mlx5_eswitch_rep *rep;
1161
1162 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1163 return rep->netdev;
1164}
1165