1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40#include "en.h"
41#include "fs_core.h"
42#include "lib/devcom.h"
43
44#define fdb_prio_table(esw, chain, prio, level) \
45 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
46
47
48#define MLX5_VPORT_FIRST_VF 0x1
49
50#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
51 for ((vport) = MLX5_VPORT_FIRST_VF; \
52 (vport) <= (nvfs); (vport)++)
53
54#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
55 for ((vport) = (nvfs); \
56 (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
57
58#define UPLINK_REP_INDEX 0
59
60static struct mlx5_flow_table *
61esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
62static void
63esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
64
65bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
66{
67 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
68}
69
70u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
71{
72 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
73 return FDB_MAX_CHAIN;
74
75 return 0;
76}
77
78u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
79{
80 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
81 return FDB_MAX_PRIO;
82
83 return 1;
84}
85
86struct mlx5_flow_handle *
87mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
88 struct mlx5_flow_spec *spec,
89 struct mlx5_esw_flow_attr *attr)
90{
91 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
92 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
93 bool split = !!(attr->split_count);
94 struct mlx5_flow_handle *rule;
95 struct mlx5_flow_table *fdb;
96 int j, i = 0;
97 void *misc;
98
99 if (esw->mode != SRIOV_OFFLOADS)
100 return ERR_PTR(-EOPNOTSUPP);
101
102 flow_act.action = attr->action;
103
104 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
105 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
106 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
107 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
108 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
109 flow_act.vlan[0].vid = attr->vlan_vid[0];
110 flow_act.vlan[0].prio = attr->vlan_prio[0];
111 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
112 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
113 flow_act.vlan[1].vid = attr->vlan_vid[1];
114 flow_act.vlan[1].prio = attr->vlan_prio[1];
115 }
116 }
117
118 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
119 if (attr->dest_chain) {
120 struct mlx5_flow_table *ft;
121
122 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
123 if (IS_ERR(ft)) {
124 rule = ERR_CAST(ft);
125 goto err_create_goto_table;
126 }
127
128 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
129 dest[i].ft = ft;
130 i++;
131 } else {
132 for (j = attr->split_count; j < attr->out_count; j++) {
133 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
134 dest[i].vport.num = attr->dests[j].rep->vport;
135 dest[i].vport.vhca_id =
136 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
137 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
138 dest[i].vport.flags |=
139 MLX5_FLOW_DEST_VPORT_VHCA_ID;
140 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
141 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
142 flow_act.reformat_id = attr->dests[j].encap_id;
143 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
144 dest[i].vport.reformat_id =
145 attr->dests[j].encap_id;
146 }
147 i++;
148 }
149 }
150 }
151 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
152 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
153 dest[i].counter_id = mlx5_fc_id(attr->counter);
154 i++;
155 }
156
157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
158 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
159
160 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
161 MLX5_SET(fte_match_set_misc, misc,
162 source_eswitch_owner_vhca_id,
163 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
164
165 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
166 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
167 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
168 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
169 source_eswitch_owner_vhca_id);
170
171 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
173 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
174 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
175 if (attr->match_level != MLX5_MATCH_NONE)
176 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
177 } else if (attr->match_level != MLX5_MATCH_NONE) {
178 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
179 }
180
181 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
182 flow_act.modify_id = attr->mod_hdr_id;
183
184 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
185 if (IS_ERR(fdb)) {
186 rule = ERR_CAST(fdb);
187 goto err_esw_get;
188 }
189
190 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
191 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
192 &flow_act, dest, i);
193 else
194 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
195 if (IS_ERR(rule))
196 goto err_add_rule;
197 else
198 esw->offloads.num_flows++;
199
200 return rule;
201
202err_add_rule:
203 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
204err_esw_get:
205 if (attr->dest_chain)
206 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
207err_create_goto_table:
208 return rule;
209}
210
211struct mlx5_flow_handle *
212mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
213 struct mlx5_flow_spec *spec,
214 struct mlx5_esw_flow_attr *attr)
215{
216 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
217 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
218 struct mlx5_flow_table *fast_fdb;
219 struct mlx5_flow_table *fwd_fdb;
220 struct mlx5_flow_handle *rule;
221 void *misc;
222 int i;
223
224 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
225 if (IS_ERR(fast_fdb)) {
226 rule = ERR_CAST(fast_fdb);
227 goto err_get_fast;
228 }
229
230 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
231 if (IS_ERR(fwd_fdb)) {
232 rule = ERR_CAST(fwd_fdb);
233 goto err_get_fwd;
234 }
235
236 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
237 for (i = 0; i < attr->split_count; i++) {
238 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
239 dest[i].vport.num = attr->dests[i].rep->vport;
240 dest[i].vport.vhca_id =
241 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
242 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
243 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
244 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
245 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
246 dest[i].vport.reformat_id = attr->dests[i].encap_id;
247 }
248 }
249 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
250 dest[i].ft = fwd_fdb,
251 i++;
252
253 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
254 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
255
256 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
257 MLX5_SET(fte_match_set_misc, misc,
258 source_eswitch_owner_vhca_id,
259 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
260
261 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
262 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
263 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
264 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
265 source_eswitch_owner_vhca_id);
266
267 if (attr->match_level == MLX5_MATCH_NONE)
268 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
269 else
270 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
271 MLX5_MATCH_MISC_PARAMETERS;
272
273 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
274
275 if (IS_ERR(rule))
276 goto add_err;
277
278 esw->offloads.num_flows++;
279
280 return rule;
281add_err:
282 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
283err_get_fwd:
284 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
285err_get_fast:
286 return rule;
287}
288
289static void
290__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
291 struct mlx5_flow_handle *rule,
292 struct mlx5_esw_flow_attr *attr,
293 bool fwd_rule)
294{
295 bool split = (attr->split_count > 0);
296 int i;
297
298 mlx5_del_flow_rules(rule);
299
300
301 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
302 if (attr->dests[i].termtbl)
303 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
304 }
305
306 esw->offloads.num_flows--;
307
308 if (fwd_rule) {
309 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
310 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
311 } else {
312 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
313 if (attr->dest_chain)
314 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
315 }
316}
317
318void
319mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
320 struct mlx5_flow_handle *rule,
321 struct mlx5_esw_flow_attr *attr)
322{
323 __mlx5_eswitch_del_rule(esw, rule, attr, false);
324}
325
326void
327mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
328 struct mlx5_flow_handle *rule,
329 struct mlx5_esw_flow_attr *attr)
330{
331 __mlx5_eswitch_del_rule(esw, rule, attr, true);
332}
333
334static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
335{
336 struct mlx5_eswitch_rep *rep;
337 int vf_vport, err = 0;
338
339 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
340 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
341 rep = &esw->offloads.vport_reps[vf_vport];
342 if (!rep->rep_if[REP_ETH].valid)
343 continue;
344
345 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
346 if (err)
347 goto out;
348 }
349
350out:
351 return err;
352}
353
354static struct mlx5_eswitch_rep *
355esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
356{
357 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
358
359 in_rep = attr->in_rep;
360 out_rep = attr->dests[0].rep;
361
362 if (push)
363 vport = in_rep;
364 else if (pop)
365 vport = out_rep;
366 else
367 vport = in_rep;
368
369 return vport;
370}
371
372static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
373 bool push, bool pop, bool fwd)
374{
375 struct mlx5_eswitch_rep *in_rep, *out_rep;
376
377 if ((push || pop) && !fwd)
378 goto out_notsupp;
379
380 in_rep = attr->in_rep;
381 out_rep = attr->dests[0].rep;
382
383 if (push && in_rep->vport == FDB_UPLINK_VPORT)
384 goto out_notsupp;
385
386 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
387 goto out_notsupp;
388
389
390 if (!push && !pop && fwd)
391 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
392 goto out_notsupp;
393
394
395
396
397 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
398 goto out_notsupp;
399
400 return 0;
401
402out_notsupp:
403 return -EOPNOTSUPP;
404}
405
406int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
407 struct mlx5_esw_flow_attr *attr)
408{
409 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
410 struct mlx5_eswitch_rep *vport = NULL;
411 bool push, pop, fwd;
412 int err = 0;
413
414
415 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
416 return 0;
417
418 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
419 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
420 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
421 !attr->dest_chain);
422
423 err = esw_add_vlan_action_check(attr, push, pop, fwd);
424 if (err)
425 return err;
426
427 attr->vlan_handled = false;
428
429 vport = esw_vlan_action_get_vport(attr, push, pop);
430
431 if (!push && !pop && fwd) {
432
433 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
434 vport->vlan_refcount++;
435 attr->vlan_handled = true;
436 }
437
438 return 0;
439 }
440
441 if (!push && !pop)
442 return 0;
443
444 if (!(offloads->vlan_push_pop_refcount)) {
445
446 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
447 if (err)
448 goto out;
449 }
450 offloads->vlan_push_pop_refcount++;
451
452 if (push) {
453 if (vport->vlan_refcount)
454 goto skip_set_push;
455
456 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
457 SET_VLAN_INSERT | SET_VLAN_STRIP);
458 if (err)
459 goto out;
460 vport->vlan = attr->vlan_vid[0];
461skip_set_push:
462 vport->vlan_refcount++;
463 }
464out:
465 if (!err)
466 attr->vlan_handled = true;
467 return err;
468}
469
470int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
471 struct mlx5_esw_flow_attr *attr)
472{
473 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
474 struct mlx5_eswitch_rep *vport = NULL;
475 bool push, pop, fwd;
476 int err = 0;
477
478
479 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
480 return 0;
481
482 if (!attr->vlan_handled)
483 return 0;
484
485 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
486 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
487 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
488
489 vport = esw_vlan_action_get_vport(attr, push, pop);
490
491 if (!push && !pop && fwd) {
492
493 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
494 vport->vlan_refcount--;
495
496 return 0;
497 }
498
499 if (push) {
500 vport->vlan_refcount--;
501 if (vport->vlan_refcount)
502 goto skip_unset_push;
503
504 vport->vlan = 0;
505 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
506 0, 0, SET_VLAN_STRIP);
507 if (err)
508 goto out;
509 }
510
511skip_unset_push:
512 offloads->vlan_push_pop_refcount--;
513 if (offloads->vlan_push_pop_refcount)
514 return 0;
515
516
517 err = esw_set_global_vlan_pop(esw, 0);
518
519out:
520 return err;
521}
522
523struct mlx5_flow_handle *
524mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
525{
526 struct mlx5_flow_act flow_act = {0};
527 struct mlx5_flow_destination dest = {};
528 struct mlx5_flow_handle *flow_rule;
529 struct mlx5_flow_spec *spec;
530 void *misc;
531
532 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
533 if (!spec) {
534 flow_rule = ERR_PTR(-ENOMEM);
535 goto out;
536 }
537
538 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
539 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
540 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0);
541
542 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
543 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
544 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
545
546 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
547 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
548 dest.vport.num = vport;
549 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
550
551 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
552 &flow_act, &dest, 1);
553 if (IS_ERR(flow_rule))
554 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
555out:
556 kvfree(spec);
557 return flow_rule;
558}
559EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
560
561void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
562{
563 mlx5_del_flow_rules(rule);
564}
565
566static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
567 struct mlx5_flow_spec *spec,
568 struct mlx5_flow_destination *dest)
569{
570 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
571 misc_parameters);
572
573 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
574 MLX5_CAP_GEN(peer_dev, vhca_id));
575
576 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
577
578 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
579 misc_parameters);
580 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
581 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
582 source_eswitch_owner_vhca_id);
583
584 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
585 dest->vport.num = 0;
586 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
587 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
588}
589
590static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
591 struct mlx5_core_dev *peer_dev)
592{
593 struct mlx5_flow_destination dest = {};
594 struct mlx5_flow_act flow_act = {0};
595 struct mlx5_flow_handle **flows;
596 struct mlx5_flow_handle *flow;
597 struct mlx5_flow_spec *spec;
598
599 int nvports = esw->total_vports;
600 void *misc;
601 int err, i;
602
603 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
604 if (!spec)
605 return -ENOMEM;
606
607 peer_miss_rules_setup(peer_dev, spec, &dest);
608
609 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
610 if (!flows) {
611 err = -ENOMEM;
612 goto alloc_flows_err;
613 }
614
615 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
616 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
617 misc_parameters);
618
619 for (i = 1; i < nvports; i++) {
620 MLX5_SET(fte_match_set_misc, misc, source_port, i);
621 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
622 spec, &flow_act, &dest, 1);
623 if (IS_ERR(flow)) {
624 err = PTR_ERR(flow);
625 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
626 goto add_flow_err;
627 }
628 flows[i] = flow;
629 }
630
631 esw->fdb_table.offloads.peer_miss_rules = flows;
632
633 kvfree(spec);
634 return 0;
635
636add_flow_err:
637 for (i--; i > 0; i--)
638 mlx5_del_flow_rules(flows[i]);
639 kvfree(flows);
640alloc_flows_err:
641 kvfree(spec);
642 return err;
643}
644
645static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
646{
647 struct mlx5_flow_handle **flows;
648 int i;
649
650 flows = esw->fdb_table.offloads.peer_miss_rules;
651
652 for (i = 1; i < esw->total_vports; i++)
653 mlx5_del_flow_rules(flows[i]);
654
655 kvfree(flows);
656}
657
658static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
659{
660 struct mlx5_flow_act flow_act = {0};
661 struct mlx5_flow_destination dest = {};
662 struct mlx5_flow_handle *flow_rule = NULL;
663 struct mlx5_flow_spec *spec;
664 void *headers_c;
665 void *headers_v;
666 int err = 0;
667 u8 *dmac_c;
668 u8 *dmac_v;
669
670 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
671 if (!spec) {
672 err = -ENOMEM;
673 goto out;
674 }
675
676 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
677 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
678 outer_headers);
679 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
680 outer_headers.dmac_47_16);
681 dmac_c[0] = 0x01;
682
683 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
684 dest.vport.num = 0;
685 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
686
687 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
688 &flow_act, &dest, 1);
689 if (IS_ERR(flow_rule)) {
690 err = PTR_ERR(flow_rule);
691 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
692 goto out;
693 }
694
695 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
696
697 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
698 outer_headers);
699 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
700 outer_headers.dmac_47_16);
701 dmac_v[0] = 0x01;
702 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
703 &flow_act, &dest, 1);
704 if (IS_ERR(flow_rule)) {
705 err = PTR_ERR(flow_rule);
706 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
707 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
708 goto out;
709 }
710
711 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
712
713out:
714 kvfree(spec);
715 return err;
716}
717
718#define ESW_OFFLOADS_NUM_GROUPS 4
719
720
721
722
723
724
725
726
727#define ESW_SIZE (16 * 1024 * 1024)
728const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
729 64 * 1024, 4 * 1024 };
730
731static int
732get_sz_from_pool(struct mlx5_eswitch *esw)
733{
734 int sz = 0, i;
735
736 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
737 if (esw->fdb_table.offloads.fdb_left[i]) {
738 --esw->fdb_table.offloads.fdb_left[i];
739 sz = ESW_POOLS[i];
740 break;
741 }
742 }
743
744 return sz;
745}
746
747static void
748put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
749{
750 int i;
751
752 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
753 if (sz >= ESW_POOLS[i]) {
754 ++esw->fdb_table.offloads.fdb_left[i];
755 break;
756 }
757 }
758}
759
760static struct mlx5_flow_table *
761create_next_size_table(struct mlx5_eswitch *esw,
762 struct mlx5_flow_namespace *ns,
763 u16 table_prio,
764 int level,
765 u32 flags)
766{
767 struct mlx5_flow_table *fdb;
768 int sz;
769
770 sz = get_sz_from_pool(esw);
771 if (!sz)
772 return ERR_PTR(-ENOSPC);
773
774 fdb = mlx5_create_auto_grouped_flow_table(ns,
775 table_prio,
776 sz,
777 ESW_OFFLOADS_NUM_GROUPS,
778 level,
779 flags);
780 if (IS_ERR(fdb)) {
781 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
782 (int)PTR_ERR(fdb), table_prio, level, sz);
783 put_sz_to_pool(esw, sz);
784 }
785
786 return fdb;
787}
788
789static struct mlx5_flow_table *
790esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
791{
792 struct mlx5_core_dev *dev = esw->dev;
793 struct mlx5_flow_table *fdb = NULL;
794 struct mlx5_flow_namespace *ns;
795 int table_prio, l = 0;
796 u32 flags = 0;
797
798 if (chain == FDB_SLOW_PATH_CHAIN)
799 return esw->fdb_table.offloads.slow_fdb;
800
801 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
802
803 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
804 if (fdb) {
805
806 while (level >= 0)
807 fdb_prio_table(esw, chain, prio, level--).num_rules++;
808 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
809 return fdb;
810 }
811
812 ns = mlx5_get_fdb_sub_ns(dev, chain);
813 if (!ns) {
814 esw_warn(dev, "Failed to get FDB sub namespace\n");
815 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
816 return ERR_PTR(-EOPNOTSUPP);
817 }
818
819 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
820 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
821 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
822
823 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
824
825
826
827
828 for (l = 0; l <= level; l++) {
829 if (fdb_prio_table(esw, chain, prio, l).fdb) {
830 fdb_prio_table(esw, chain, prio, l).num_rules++;
831 continue;
832 }
833
834 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
835 if (IS_ERR(fdb)) {
836 l--;
837 goto err_create_fdb;
838 }
839
840 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
841 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
842 }
843
844 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
845 return fdb;
846
847err_create_fdb:
848 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
849 if (l >= 0)
850 esw_put_prio_table(esw, chain, prio, l);
851
852 return fdb;
853}
854
855static void
856esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
857{
858 int l;
859
860 if (chain == FDB_SLOW_PATH_CHAIN)
861 return;
862
863 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
864
865 for (l = level; l >= 0; l--) {
866 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
867 continue;
868
869 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
870 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
871 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
872 }
873
874 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
875}
876
877static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
878{
879
880 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
881 esw_put_prio_table(esw, 0, 1, 1);
882 esw_put_prio_table(esw, 0, 1, 0);
883 }
884}
885
886#define MAX_PF_SQ 256
887#define MAX_SQ_NVPORTS 32
888
889static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
890{
891 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
892 struct mlx5_flow_table_attr ft_attr = {};
893 struct mlx5_core_dev *dev = esw->dev;
894 u32 *flow_group_in, max_flow_counter;
895 struct mlx5_flow_namespace *root_ns;
896 struct mlx5_flow_table *fdb = NULL;
897 int table_size, ix, err = 0, i;
898 struct mlx5_flow_group *g;
899 u32 flags = 0, fdb_max;
900 void *match_criteria;
901 u8 *dmac;
902
903 esw_debug(esw->dev, "Create offloads FDB Tables\n");
904 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
905 if (!flow_group_in)
906 return -ENOMEM;
907
908 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
909 if (!root_ns) {
910 esw_warn(dev, "Failed to get FDB flow namespace\n");
911 err = -EOPNOTSUPP;
912 goto ns_err;
913 }
914
915 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
916 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
917 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
918
919 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
920 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
921 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
922 fdb_max);
923
924 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
925 esw->fdb_table.offloads.fdb_left[i] =
926 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
927
928 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
929 esw->total_vports;
930
931
932
933
934 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
935 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
936 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
937
938 ft_attr.flags = flags;
939 ft_attr.max_fte = table_size;
940 ft_attr.prio = FDB_SLOW_PATH;
941
942 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
943 if (IS_ERR(fdb)) {
944 err = PTR_ERR(fdb);
945 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
946 goto slow_fdb_err;
947 }
948 esw->fdb_table.offloads.slow_fdb = fdb;
949
950
951 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
952 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
953 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
954 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
955 esw_get_prio_table(esw, 0, 1, 0);
956 esw_get_prio_table(esw, 0, 1, 1);
957 } else {
958 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
959 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
960 }
961
962
963 memset(flow_group_in, 0, inlen);
964 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
965 MLX5_MATCH_MISC_PARAMETERS);
966
967 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
968
969 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
970 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
971
972 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
973 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
974 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
975
976 g = mlx5_create_flow_group(fdb, flow_group_in);
977 if (IS_ERR(g)) {
978 err = PTR_ERR(g);
979 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
980 goto send_vport_err;
981 }
982 esw->fdb_table.offloads.send_to_vport_grp = g;
983
984
985 memset(flow_group_in, 0, inlen);
986 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
987 MLX5_MATCH_MISC_PARAMETERS);
988
989 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
990 match_criteria);
991
992 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
993 misc_parameters.source_port);
994 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
995 misc_parameters.source_eswitch_owner_vhca_id);
996
997 MLX5_SET(create_flow_group_in, flow_group_in,
998 source_eswitch_owner_vhca_id_valid, 1);
999 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1000 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1001 ix + esw->total_vports - 1);
1002 ix += esw->total_vports;
1003
1004 g = mlx5_create_flow_group(fdb, flow_group_in);
1005 if (IS_ERR(g)) {
1006 err = PTR_ERR(g);
1007 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1008 goto peer_miss_err;
1009 }
1010 esw->fdb_table.offloads.peer_miss_grp = g;
1011
1012
1013 memset(flow_group_in, 0, inlen);
1014 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1015 MLX5_MATCH_OUTER_HEADERS);
1016 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1017 match_criteria);
1018 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1019 outer_headers.dmac_47_16);
1020 dmac[0] = 0x01;
1021
1022 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1023 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
1024
1025 g = mlx5_create_flow_group(fdb, flow_group_in);
1026 if (IS_ERR(g)) {
1027 err = PTR_ERR(g);
1028 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1029 goto miss_err;
1030 }
1031 esw->fdb_table.offloads.miss_grp = g;
1032
1033 err = esw_add_fdb_miss_rule(esw);
1034 if (err)
1035 goto miss_rule_err;
1036
1037 esw->nvports = nvports;
1038 kvfree(flow_group_in);
1039 return 0;
1040
1041miss_rule_err:
1042 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1043miss_err:
1044 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1045peer_miss_err:
1046 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1047send_vport_err:
1048 esw_destroy_offloads_fast_fdb_tables(esw);
1049 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1050slow_fdb_err:
1051ns_err:
1052 kvfree(flow_group_in);
1053 return err;
1054}
1055
1056static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1057{
1058 if (!esw->fdb_table.offloads.slow_fdb)
1059 return;
1060
1061 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1062 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1063 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1064 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1065 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1066 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1067
1068 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1069 esw_destroy_offloads_fast_fdb_tables(esw);
1070}
1071
1072static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1073{
1074 struct mlx5_flow_table_attr ft_attr = {};
1075 struct mlx5_core_dev *dev = esw->dev;
1076 struct mlx5_flow_table *ft_offloads;
1077 struct mlx5_flow_namespace *ns;
1078 int err = 0;
1079
1080 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1081 if (!ns) {
1082 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1083 return -EOPNOTSUPP;
1084 }
1085
1086 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
1087
1088 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1089 if (IS_ERR(ft_offloads)) {
1090 err = PTR_ERR(ft_offloads);
1091 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1092 return err;
1093 }
1094
1095 esw->offloads.ft_offloads = ft_offloads;
1096 return 0;
1097}
1098
1099static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1100{
1101 struct mlx5_esw_offload *offloads = &esw->offloads;
1102
1103 mlx5_destroy_flow_table(offloads->ft_offloads);
1104}
1105
1106static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1107{
1108 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1109 struct mlx5_flow_group *g;
1110 struct mlx5_priv *priv = &esw->dev->priv;
1111 u32 *flow_group_in;
1112 void *match_criteria, *misc;
1113 int err = 0;
1114 int nvports = priv->sriov.num_vfs + 2;
1115
1116 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1117 if (!flow_group_in)
1118 return -ENOMEM;
1119
1120
1121 memset(flow_group_in, 0, inlen);
1122 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1123 MLX5_MATCH_MISC_PARAMETERS);
1124
1125 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1126 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1127 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1128
1129 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1130 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1131
1132 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1133
1134 if (IS_ERR(g)) {
1135 err = PTR_ERR(g);
1136 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1137 goto out;
1138 }
1139
1140 esw->offloads.vport_rx_group = g;
1141out:
1142 kvfree(flow_group_in);
1143 return err;
1144}
1145
1146static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1147{
1148 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1149}
1150
1151struct mlx5_flow_handle *
1152mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1153 struct mlx5_flow_destination *dest)
1154{
1155 struct mlx5_flow_act flow_act = {0};
1156 struct mlx5_flow_handle *flow_rule;
1157 struct mlx5_flow_spec *spec;
1158 void *misc;
1159
1160 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1161 if (!spec) {
1162 flow_rule = ERR_PTR(-ENOMEM);
1163 goto out;
1164 }
1165
1166 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1167 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1168
1169 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1170 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1171
1172 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1173
1174 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1175 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1176 &flow_act, dest, 1);
1177 if (IS_ERR(flow_rule)) {
1178 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1179 goto out;
1180 }
1181
1182out:
1183 kvfree(spec);
1184 return flow_rule;
1185}
1186
1187static int esw_offloads_start(struct mlx5_eswitch *esw)
1188{
1189 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1190
1191 if (esw->mode != SRIOV_LEGACY) {
1192 NL_SET_ERR_MSG_MOD(extack,
1193 "Can't set offloads mode, SRIOV legacy not enabled");
1194 return -EINVAL;
1195 }
1196
1197 mlx5_eswitch_disable_sriov(esw);
1198 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1199 if (err) {
1200 NL_SET_ERR_MSG_MOD(extack,
1201 "Failed setting eswitch to offloads");
1202 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1203 if (err1) {
1204 NL_SET_ERR_MSG_MOD(extack,
1205 "Failed setting eswitch back to legacy");
1206 }
1207 }
1208 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1209 if (mlx5_eswitch_inline_mode_get(esw,
1210 num_vfs,
1211 &esw->offloads.inline_mode)) {
1212 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1213 NL_SET_ERR_MSG_MOD(extack,
1214 "Inline mode is different between vports");
1215 }
1216 }
1217 return err;
1218}
1219
1220void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1221{
1222 kfree(esw->offloads.vport_reps);
1223}
1224
1225int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1226{
1227 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1228 struct mlx5_core_dev *dev = esw->dev;
1229 struct mlx5_esw_offload *offloads;
1230 struct mlx5_eswitch_rep *rep;
1231 u8 hw_id[ETH_ALEN];
1232 int vport;
1233
1234 esw->offloads.vport_reps = kcalloc(total_vfs,
1235 sizeof(struct mlx5_eswitch_rep),
1236 GFP_KERNEL);
1237 if (!esw->offloads.vport_reps)
1238 return -ENOMEM;
1239
1240 offloads = &esw->offloads;
1241 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1242
1243 for (vport = 0; vport < total_vfs; vport++) {
1244 rep = &offloads->vport_reps[vport];
1245
1246 rep->vport = vport;
1247 ether_addr_copy(rep->hw_id, hw_id);
1248 }
1249
1250 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
1251
1252 return 0;
1253}
1254
1255static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1256 struct mlx5_eswitch_rep *rep, u8 rep_type)
1257{
1258 if (!rep->rep_if[rep_type].valid)
1259 return;
1260
1261 rep->rep_if[rep_type].unload(rep);
1262}
1263
1264static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
1265 u8 rep_type)
1266{
1267 struct mlx5_eswitch_rep *rep;
1268 int vport;
1269
1270 for (vport = nvports; vport >= MLX5_VPORT_FIRST_VF; vport--) {
1271 rep = &esw->offloads.vport_reps[vport];
1272 __esw_offloads_unload_rep(esw, rep, rep_type);
1273 }
1274
1275 rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
1276 __esw_offloads_unload_rep(esw, rep, rep_type);
1277}
1278
1279static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
1280{
1281 u8 rep_type = NUM_REP_TYPES;
1282
1283 while (rep_type-- > 0)
1284 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1285}
1286
1287static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1288 struct mlx5_eswitch_rep *rep, u8 rep_type)
1289{
1290 if (!rep->rep_if[rep_type].valid)
1291 return 0;
1292
1293 return rep->rep_if[rep_type].load(esw->dev, rep);
1294}
1295
1296static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
1297 u8 rep_type)
1298{
1299 struct mlx5_eswitch_rep *rep;
1300 int vport;
1301 int err;
1302
1303 rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
1304 err = __esw_offloads_load_rep(esw, rep, rep_type);
1305 if (err)
1306 goto out;
1307
1308 for (vport = MLX5_VPORT_FIRST_VF; vport <= nvports; vport++) {
1309 rep = &esw->offloads.vport_reps[vport];
1310 err = __esw_offloads_load_rep(esw, rep, rep_type);
1311 if (err)
1312 goto err_reps;
1313 }
1314
1315 return 0;
1316
1317err_reps:
1318 esw_offloads_unload_reps_type(esw, vport, rep_type);
1319out:
1320 return err;
1321}
1322
1323static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1324{
1325 u8 rep_type = 0;
1326 int err;
1327
1328 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1329 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
1330 if (err)
1331 goto err_reps;
1332 }
1333
1334 return err;
1335
1336err_reps:
1337 while (rep_type-- > 0)
1338 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1339 return err;
1340}
1341
1342#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1343#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1344
1345static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1346 struct mlx5_eswitch *peer_esw)
1347{
1348 int err;
1349
1350 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1351 if (err)
1352 return err;
1353
1354 return 0;
1355}
1356
1357static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1358{
1359 mlx5e_tc_clean_fdb_peer_flows(esw);
1360 esw_del_fdb_peer_miss_rules(esw);
1361}
1362
1363static int mlx5_esw_offloads_devcom_event(int event,
1364 void *my_data,
1365 void *event_data)
1366{
1367 struct mlx5_eswitch *esw = my_data;
1368 struct mlx5_eswitch *peer_esw = event_data;
1369 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1370 int err;
1371
1372 switch (event) {
1373 case ESW_OFFLOADS_DEVCOM_PAIR:
1374 err = mlx5_esw_offloads_pair(esw, peer_esw);
1375 if (err)
1376 goto err_out;
1377
1378 err = mlx5_esw_offloads_pair(peer_esw, esw);
1379 if (err)
1380 goto err_pair;
1381
1382 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1383 break;
1384
1385 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1386 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1387 break;
1388
1389 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1390 mlx5_esw_offloads_unpair(peer_esw);
1391 mlx5_esw_offloads_unpair(esw);
1392 break;
1393 }
1394
1395 return 0;
1396
1397err_pair:
1398 mlx5_esw_offloads_unpair(esw);
1399
1400err_out:
1401 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1402 event, err);
1403 return err;
1404}
1405
1406static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1407{
1408 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1409
1410 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1411 mutex_init(&esw->offloads.peer_mutex);
1412
1413 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1414 return;
1415
1416 mlx5_devcom_register_component(devcom,
1417 MLX5_DEVCOM_ESW_OFFLOADS,
1418 mlx5_esw_offloads_devcom_event,
1419 esw);
1420
1421 mlx5_devcom_send_event(devcom,
1422 MLX5_DEVCOM_ESW_OFFLOADS,
1423 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1424}
1425
1426static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1427{
1428 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1429
1430 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1431 return;
1432
1433 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1434 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1435
1436 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1437}
1438
1439static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1440 struct mlx5_vport *vport)
1441{
1442 struct mlx5_core_dev *dev = esw->dev;
1443 struct mlx5_flow_act flow_act = {0};
1444 struct mlx5_flow_spec *spec;
1445 int err = 0;
1446
1447
1448
1449
1450
1451
1452 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1453 return -EOPNOTSUPP;
1454
1455 esw_vport_cleanup_ingress_rules(esw, vport);
1456
1457 err = esw_vport_enable_ingress_acl(esw, vport);
1458 if (err) {
1459 mlx5_core_warn(esw->dev,
1460 "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1461 err, vport->vport);
1462 return err;
1463 }
1464
1465 esw_debug(esw->dev,
1466 "vport[%d] configure ingress rules\n", vport->vport);
1467
1468 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1469 if (!spec) {
1470 err = -ENOMEM;
1471 goto out_no_mem;
1472 }
1473
1474
1475 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1476 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1477 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1478 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1479 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1480 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1481 flow_act.vlan[0].vid = 0;
1482 flow_act.vlan[0].prio = 0;
1483 vport->ingress.allow_rule =
1484 mlx5_add_flow_rules(vport->ingress.acl, spec,
1485 &flow_act, NULL, 0);
1486 if (IS_ERR(vport->ingress.allow_rule)) {
1487 err = PTR_ERR(vport->ingress.allow_rule);
1488 esw_warn(esw->dev,
1489 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1490 vport->vport, err);
1491 vport->ingress.allow_rule = NULL;
1492 goto out;
1493 }
1494
1495out:
1496 kvfree(spec);
1497out_no_mem:
1498 if (err)
1499 esw_vport_cleanup_ingress_rules(esw, vport);
1500 return err;
1501}
1502
1503static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1504 struct mlx5_vport *vport)
1505{
1506 struct mlx5_flow_act flow_act = {0};
1507 struct mlx5_flow_spec *spec;
1508 int err = 0;
1509
1510
1511
1512
1513
1514
1515 esw_vport_cleanup_egress_rules(esw, vport);
1516
1517 err = esw_vport_enable_egress_acl(esw, vport);
1518 if (err) {
1519 mlx5_core_warn(esw->dev,
1520 "failed to enable egress acl (%d) on vport[%d]\n",
1521 err, vport->vport);
1522 return err;
1523 }
1524
1525 esw_debug(esw->dev,
1526 "vport[%d] configure prio tag egress rules\n", vport->vport);
1527
1528 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1529 if (!spec) {
1530 err = -ENOMEM;
1531 goto out_no_mem;
1532 }
1533
1534
1535 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1536 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1537 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1538 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1539
1540 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1541 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1542 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1543 vport->egress.allowed_vlan =
1544 mlx5_add_flow_rules(vport->egress.acl, spec,
1545 &flow_act, NULL, 0);
1546 if (IS_ERR(vport->egress.allowed_vlan)) {
1547 err = PTR_ERR(vport->egress.allowed_vlan);
1548 esw_warn(esw->dev,
1549 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1550 vport->vport, err);
1551 vport->egress.allowed_vlan = NULL;
1552 goto out;
1553 }
1554
1555out:
1556 kvfree(spec);
1557out_no_mem:
1558 if (err)
1559 esw_vport_cleanup_egress_rules(esw, vport);
1560 return err;
1561}
1562
1563static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1564{
1565 int i, j;
1566 int err;
1567
1568 mlx5_esw_for_each_vf_vport(esw, i, nvports) {
1569 err = esw_vport_ingress_prio_tag_config(esw, &esw->vports[i]);
1570 if (err)
1571 goto err_ingress;
1572 err = esw_vport_egress_prio_tag_config(esw, &esw->vports[i]);
1573 if (err)
1574 goto err_egress;
1575 }
1576
1577 return 0;
1578
1579err_egress:
1580 esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1581err_ingress:
1582 mlx5_esw_for_each_vf_vport_reverse(esw, j, i - 1) {
1583 esw_vport_disable_egress_acl(esw, &esw->vports[j]);
1584 esw_vport_disable_ingress_acl(esw, &esw->vports[j]);
1585 }
1586
1587 return err;
1588}
1589
1590static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1591{
1592 int i;
1593
1594 mlx5_esw_for_each_vf_vport(esw, i, esw->dev->priv.sriov.num_vfs) {
1595 esw_vport_disable_egress_acl(esw, &esw->vports[i]);
1596 esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1597 }
1598}
1599
1600static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
1601 int nvports)
1602{
1603 int err;
1604
1605 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1606
1607 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1608 err = esw_prio_tag_acls_config(esw, vf_nvports);
1609 if (err)
1610 return err;
1611 }
1612
1613 err = esw_create_offloads_fdb_tables(esw, nvports);
1614 if (err)
1615 return err;
1616
1617 err = esw_create_offloads_table(esw);
1618 if (err)
1619 goto create_ft_err;
1620
1621 err = esw_create_vport_rx_group(esw);
1622 if (err)
1623 goto create_fg_err;
1624
1625 return 0;
1626
1627create_fg_err:
1628 esw_destroy_offloads_table(esw);
1629
1630create_ft_err:
1631 esw_destroy_offloads_fdb_tables(esw);
1632
1633 return err;
1634}
1635
1636static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1637{
1638 esw_destroy_vport_rx_group(esw);
1639 esw_destroy_offloads_table(esw);
1640 esw_destroy_offloads_fdb_tables(esw);
1641 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1642 esw_prio_tag_acls_cleanup(esw);
1643}
1644
1645int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1646 int total_nvports)
1647{
1648 int err;
1649
1650 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
1651 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
1652 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1653 else
1654 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1655
1656 err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
1657 if (err)
1658 return err;
1659
1660 err = esw_offloads_load_reps(esw, vf_nvports);
1661 if (err)
1662 goto err_reps;
1663
1664 esw_offloads_devcom_init(esw);
1665 mutex_init(&esw->offloads.termtbl_mutex);
1666 return 0;
1667
1668err_reps:
1669 esw_offloads_steering_cleanup(esw);
1670 return err;
1671}
1672
1673static int esw_offloads_stop(struct mlx5_eswitch *esw)
1674{
1675 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1676
1677 mlx5_eswitch_disable_sriov(esw);
1678 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1679 if (err) {
1680 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
1681 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1682 if (err1) {
1683 NL_SET_ERR_MSG_MOD(extack,
1684 "Failed setting eswitch back to offloads");
1685 }
1686 }
1687
1688
1689 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1690
1691 return err;
1692}
1693
1694void esw_offloads_cleanup(struct mlx5_eswitch *esw)
1695{
1696 u16 num_vfs = esw->dev->priv.sriov.num_vfs;
1697
1698 esw_offloads_devcom_cleanup(esw);
1699 esw_offloads_unload_reps(esw, num_vfs);
1700 esw_offloads_steering_cleanup(esw);
1701 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1702}
1703
1704static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1705{
1706 switch (mode) {
1707 case DEVLINK_ESWITCH_MODE_LEGACY:
1708 *mlx5_mode = SRIOV_LEGACY;
1709 break;
1710 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1711 *mlx5_mode = SRIOV_OFFLOADS;
1712 break;
1713 default:
1714 return -EINVAL;
1715 }
1716
1717 return 0;
1718}
1719
1720static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1721{
1722 switch (mlx5_mode) {
1723 case SRIOV_LEGACY:
1724 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1725 break;
1726 case SRIOV_OFFLOADS:
1727 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1728 break;
1729 default:
1730 return -EINVAL;
1731 }
1732
1733 return 0;
1734}
1735
1736static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1737{
1738 switch (mode) {
1739 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1740 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1741 break;
1742 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1743 *mlx5_mode = MLX5_INLINE_MODE_L2;
1744 break;
1745 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1746 *mlx5_mode = MLX5_INLINE_MODE_IP;
1747 break;
1748 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1749 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1750 break;
1751 default:
1752 return -EINVAL;
1753 }
1754
1755 return 0;
1756}
1757
1758static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1759{
1760 switch (mlx5_mode) {
1761 case MLX5_INLINE_MODE_NONE:
1762 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1763 break;
1764 case MLX5_INLINE_MODE_L2:
1765 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1766 break;
1767 case MLX5_INLINE_MODE_IP:
1768 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1769 break;
1770 case MLX5_INLINE_MODE_TCP_UDP:
1771 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1772 break;
1773 default:
1774 return -EINVAL;
1775 }
1776
1777 return 0;
1778}
1779
1780static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1781{
1782 struct mlx5_core_dev *dev = devlink_priv(devlink);
1783
1784 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1785 return -EOPNOTSUPP;
1786
1787 if(!MLX5_ESWITCH_MANAGER(dev))
1788 return -EPERM;
1789
1790 if (dev->priv.eswitch->mode == SRIOV_NONE)
1791 return -EOPNOTSUPP;
1792
1793 return 0;
1794}
1795
1796int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1797{
1798 struct mlx5_core_dev *dev = devlink_priv(devlink);
1799 u16 cur_mlx5_mode, mlx5_mode = 0;
1800 int err;
1801
1802 err = mlx5_devlink_eswitch_check(devlink);
1803 if (err)
1804 return err;
1805
1806 cur_mlx5_mode = dev->priv.eswitch->mode;
1807
1808 if (esw_mode_from_devlink(mode, &mlx5_mode))
1809 return -EINVAL;
1810
1811 if (cur_mlx5_mode == mlx5_mode)
1812 return 0;
1813
1814 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1815 return esw_offloads_start(dev->priv.eswitch);
1816 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1817 return esw_offloads_stop(dev->priv.eswitch);
1818 else
1819 return -EINVAL;
1820}
1821
1822int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1823{
1824 struct mlx5_core_dev *dev = devlink_priv(devlink);
1825 int err;
1826
1827 err = mlx5_devlink_eswitch_check(devlink);
1828 if (err)
1829 return err;
1830
1831 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1832}
1833
1834int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1835{
1836 struct mlx5_core_dev *dev = devlink_priv(devlink);
1837 struct mlx5_eswitch *esw = dev->priv.eswitch;
1838 int err, vport;
1839 u8 mlx5_mode;
1840
1841 err = mlx5_devlink_eswitch_check(devlink);
1842 if (err)
1843 return err;
1844
1845 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1846 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1847 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1848 return 0;
1849
1850 case MLX5_CAP_INLINE_MODE_L2:
1851 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
1852 return -EOPNOTSUPP;
1853 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1854 break;
1855 }
1856
1857 if (esw->offloads.num_flows > 0) {
1858 NL_SET_ERR_MSG_MOD(extack,
1859 "Can't set inline mode when flows are configured");
1860 return -EOPNOTSUPP;
1861 }
1862
1863 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1864 if (err)
1865 goto out;
1866
1867 for (vport = 1; vport < esw->enabled_vports; vport++) {
1868 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1869 if (err) {
1870 NL_SET_ERR_MSG_MOD(extack,
1871 "Failed to set min inline on vport");
1872 goto revert_inline_mode;
1873 }
1874 }
1875
1876 esw->offloads.inline_mode = mlx5_mode;
1877 return 0;
1878
1879revert_inline_mode:
1880 while (--vport > 0)
1881 mlx5_modify_nic_vport_min_inline(dev,
1882 vport,
1883 esw->offloads.inline_mode);
1884out:
1885 return err;
1886}
1887
1888int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1889{
1890 struct mlx5_core_dev *dev = devlink_priv(devlink);
1891 struct mlx5_eswitch *esw = dev->priv.eswitch;
1892 int err;
1893
1894 err = mlx5_devlink_eswitch_check(devlink);
1895 if (err)
1896 return err;
1897
1898 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1899}
1900
1901int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1902{
1903 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1904 struct mlx5_core_dev *dev = esw->dev;
1905 int vport;
1906
1907 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1908 return -EOPNOTSUPP;
1909
1910 if (esw->mode == SRIOV_NONE)
1911 return -EOPNOTSUPP;
1912
1913 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1914 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1915 mlx5_mode = MLX5_INLINE_MODE_NONE;
1916 goto out;
1917 case MLX5_CAP_INLINE_MODE_L2:
1918 mlx5_mode = MLX5_INLINE_MODE_L2;
1919 goto out;
1920 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1921 goto query_vports;
1922 }
1923
1924query_vports:
1925 for (vport = 1; vport <= nvfs; vport++) {
1926 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1927 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1928 return -EINVAL;
1929 prev_mlx5_mode = mlx5_mode;
1930 }
1931
1932out:
1933 *mode = mlx5_mode;
1934 return 0;
1935}
1936
1937int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1938{
1939 struct mlx5_core_dev *dev = devlink_priv(devlink);
1940 struct mlx5_eswitch *esw = dev->priv.eswitch;
1941 int err;
1942
1943 err = mlx5_devlink_eswitch_check(devlink);
1944 if (err)
1945 return err;
1946
1947 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1948 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
1949 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1950 return -EOPNOTSUPP;
1951
1952 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1953 return -EOPNOTSUPP;
1954
1955 if (esw->mode == SRIOV_LEGACY) {
1956 esw->offloads.encap = encap;
1957 return 0;
1958 }
1959
1960 if (esw->offloads.encap == encap)
1961 return 0;
1962
1963 if (esw->offloads.num_flows > 0) {
1964 NL_SET_ERR_MSG_MOD(extack,
1965 "Can't set encapsulation when flows are configured");
1966 return -EOPNOTSUPP;
1967 }
1968
1969 esw_destroy_offloads_fdb_tables(esw);
1970
1971 esw->offloads.encap = encap;
1972
1973 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
1974
1975 if (err) {
1976 NL_SET_ERR_MSG_MOD(extack,
1977 "Failed re-creating fast FDB table");
1978 esw->offloads.encap = !encap;
1979 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
1980 }
1981
1982 return err;
1983}
1984
1985int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1986{
1987 struct mlx5_core_dev *dev = devlink_priv(devlink);
1988 struct mlx5_eswitch *esw = dev->priv.eswitch;
1989 int err;
1990
1991 err = mlx5_devlink_eswitch_check(devlink);
1992 if (err)
1993 return err;
1994
1995 *encap = esw->offloads.encap;
1996 return 0;
1997}
1998
1999void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
2000 int vport_index,
2001 struct mlx5_eswitch_rep_if *__rep_if,
2002 u8 rep_type)
2003{
2004 struct mlx5_esw_offload *offloads = &esw->offloads;
2005 struct mlx5_eswitch_rep_if *rep_if;
2006
2007 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
2008
2009 rep_if->load = __rep_if->load;
2010 rep_if->unload = __rep_if->unload;
2011 rep_if->get_proto_dev = __rep_if->get_proto_dev;
2012 rep_if->priv = __rep_if->priv;
2013
2014 rep_if->valid = true;
2015}
2016EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
2017
2018void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
2019 int vport_index, u8 rep_type)
2020{
2021 struct mlx5_esw_offload *offloads = &esw->offloads;
2022 struct mlx5_eswitch_rep *rep;
2023
2024 rep = &offloads->vport_reps[vport_index];
2025
2026 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
2027 rep->rep_if[rep_type].unload(rep);
2028
2029 rep->rep_if[rep_type].valid = false;
2030}
2031EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
2032
2033void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2034{
2035 struct mlx5_esw_offload *offloads = &esw->offloads;
2036 struct mlx5_eswitch_rep *rep;
2037
2038 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
2039 return rep->rep_if[rep_type].priv;
2040}
2041
2042void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2043 int vport,
2044 u8 rep_type)
2045{
2046 struct mlx5_esw_offload *offloads = &esw->offloads;
2047 struct mlx5_eswitch_rep *rep;
2048
2049 if (vport == FDB_UPLINK_VPORT)
2050 vport = UPLINK_REP_INDEX;
2051
2052 rep = &offloads->vport_reps[vport];
2053
2054 if (rep->rep_if[rep_type].valid &&
2055 rep->rep_if[rep_type].get_proto_dev)
2056 return rep->rep_if[rep_type].get_proto_dev(rep);
2057 return NULL;
2058}
2059EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2060
2061void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2062{
2063 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
2064}
2065EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2066
2067struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2068 int vport)
2069{
2070 return &esw->offloads.vport_reps[vport];
2071}
2072EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2073