1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40#include "rdma.h"
41#include "en.h"
42#include "fs_core.h"
43#include "lib/devcom.h"
44#include "lib/eq.h"
45
46
47
48
49#define MLX5_ESW_MISS_FLOWS (2)
50
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
54#define UPLINK_REP_INDEX 0
55
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
78 return FDB_MAX_CHAIN;
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
86 return FDB_MAX_PRIO;
87
88 return 1;
89}
90
91static void
92mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
95{
96 void *misc2;
97 void *misc;
98
99
100
101
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
107
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
110
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
115 } else {
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
118
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
123
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
129
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
131 }
132
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
136}
137
138struct mlx5_flow_handle *
139mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
141 struct mlx5_esw_flow_attr *attr)
142{
143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
145 bool split = !!(attr->split_count);
146 struct mlx5_flow_handle *rule;
147 struct mlx5_flow_table *fdb;
148 int j, i = 0;
149
150 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
151 return ERR_PTR(-EOPNOTSUPP);
152
153 flow_act.action = attr->action;
154
155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
166 }
167 }
168
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
172
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
174 if (IS_ERR(ft)) {
175 rule = ERR_CAST(ft);
176 goto err_create_goto_table;
177 }
178
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
180 dest[i].ft = ft;
181 i++;
182 } else {
183 for (j = attr->split_count; j < attr->out_count; j++) {
184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
185 dest[i].vport.num = attr->dests[j].rep->vport;
186 dest[i].vport.vhca_id =
187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
193 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
195 dest[i].vport.pkt_reformat =
196 attr->dests[j].pkt_reformat;
197 }
198 i++;
199 }
200 }
201 }
202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
204 dest[i].counter_id = mlx5_fc_id(attr->counter);
205 i++;
206 }
207
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
209
210 if (attr->outer_match_level != MLX5_MATCH_NONE)
211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
214
215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
216 flow_act.modify_hdr = attr->modify_hdr;
217
218 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
219 if (IS_ERR(fdb)) {
220 rule = ERR_CAST(fdb);
221 goto err_esw_get;
222 }
223
224 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
225 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
226 &flow_act, dest, i);
227 else
228 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
229 if (IS_ERR(rule))
230 goto err_add_rule;
231 else
232 atomic64_inc(&esw->offloads.num_flows);
233
234 return rule;
235
236err_add_rule:
237 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
238err_esw_get:
239 if (attr->dest_chain)
240 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
241err_create_goto_table:
242 return rule;
243}
244
245struct mlx5_flow_handle *
246mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
251 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
252 struct mlx5_flow_table *fast_fdb;
253 struct mlx5_flow_table *fwd_fdb;
254 struct mlx5_flow_handle *rule;
255 int i;
256
257 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
258 if (IS_ERR(fast_fdb)) {
259 rule = ERR_CAST(fast_fdb);
260 goto err_get_fast;
261 }
262
263 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
264 if (IS_ERR(fwd_fdb)) {
265 rule = ERR_CAST(fwd_fdb);
266 goto err_get_fwd;
267 }
268
269 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
270 for (i = 0; i < attr->split_count; i++) {
271 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
272 dest[i].vport.num = attr->dests[i].rep->vport;
273 dest[i].vport.vhca_id =
274 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
275 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
276 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
277 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
278 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
279 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
280 }
281 }
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
283 dest[i].ft = fwd_fdb,
284 i++;
285
286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
287
288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
289 if (attr->outer_match_level != MLX5_MATCH_NONE)
290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
291
292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
293
294 if (IS_ERR(rule))
295 goto add_err;
296
297 atomic64_inc(&esw->offloads.num_flows);
298
299 return rule;
300add_err:
301 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
302err_get_fwd:
303 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
304err_get_fast:
305 return rule;
306}
307
308static void
309__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
310 struct mlx5_flow_handle *rule,
311 struct mlx5_esw_flow_attr *attr,
312 bool fwd_rule)
313{
314 bool split = (attr->split_count > 0);
315 int i;
316
317 mlx5_del_flow_rules(rule);
318
319
320 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
321 if (attr->dests[i].termtbl)
322 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
323 }
324
325 atomic64_dec(&esw->offloads.num_flows);
326
327 if (fwd_rule) {
328 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
329 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
330 } else {
331 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
332 if (attr->dest_chain)
333 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
334 }
335}
336
337void
338mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
339 struct mlx5_flow_handle *rule,
340 struct mlx5_esw_flow_attr *attr)
341{
342 __mlx5_eswitch_del_rule(esw, rule, attr, false);
343}
344
345void
346mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
347 struct mlx5_flow_handle *rule,
348 struct mlx5_esw_flow_attr *attr)
349{
350 __mlx5_eswitch_del_rule(esw, rule, attr, true);
351}
352
353static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
354{
355 struct mlx5_eswitch_rep *rep;
356 int i, err = 0;
357
358 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
359 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
360 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
361 continue;
362
363 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
364 if (err)
365 goto out;
366 }
367
368out:
369 return err;
370}
371
372static struct mlx5_eswitch_rep *
373esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
374{
375 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
376
377 in_rep = attr->in_rep;
378 out_rep = attr->dests[0].rep;
379
380 if (push)
381 vport = in_rep;
382 else if (pop)
383 vport = out_rep;
384 else
385 vport = in_rep;
386
387 return vport;
388}
389
390static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
391 bool push, bool pop, bool fwd)
392{
393 struct mlx5_eswitch_rep *in_rep, *out_rep;
394
395 if ((push || pop) && !fwd)
396 goto out_notsupp;
397
398 in_rep = attr->in_rep;
399 out_rep = attr->dests[0].rep;
400
401 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
402 goto out_notsupp;
403
404 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
405 goto out_notsupp;
406
407
408 if (!push && !pop && fwd)
409 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
410 goto out_notsupp;
411
412
413
414
415 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
416 goto out_notsupp;
417
418 return 0;
419
420out_notsupp:
421 return -EOPNOTSUPP;
422}
423
424int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
425 struct mlx5_esw_flow_attr *attr)
426{
427 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
428 struct mlx5_eswitch_rep *vport = NULL;
429 bool push, pop, fwd;
430 int err = 0;
431
432
433 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
434 return 0;
435
436 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
437 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
438 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
439 !attr->dest_chain);
440
441 mutex_lock(&esw->state_lock);
442
443 err = esw_add_vlan_action_check(attr, push, pop, fwd);
444 if (err)
445 goto unlock;
446
447 attr->vlan_handled = false;
448
449 vport = esw_vlan_action_get_vport(attr, push, pop);
450
451 if (!push && !pop && fwd) {
452
453 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
454 vport->vlan_refcount++;
455 attr->vlan_handled = true;
456 }
457
458 goto unlock;
459 }
460
461 if (!push && !pop)
462 goto unlock;
463
464 if (!(offloads->vlan_push_pop_refcount)) {
465
466 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
467 if (err)
468 goto out;
469 }
470 offloads->vlan_push_pop_refcount++;
471
472 if (push) {
473 if (vport->vlan_refcount)
474 goto skip_set_push;
475
476 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
477 SET_VLAN_INSERT | SET_VLAN_STRIP);
478 if (err)
479 goto out;
480 vport->vlan = attr->vlan_vid[0];
481skip_set_push:
482 vport->vlan_refcount++;
483 }
484out:
485 if (!err)
486 attr->vlan_handled = true;
487unlock:
488 mutex_unlock(&esw->state_lock);
489 return err;
490}
491
492int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
493 struct mlx5_esw_flow_attr *attr)
494{
495 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
496 struct mlx5_eswitch_rep *vport = NULL;
497 bool push, pop, fwd;
498 int err = 0;
499
500
501 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
502 return 0;
503
504 if (!attr->vlan_handled)
505 return 0;
506
507 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
508 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
509 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
510
511 mutex_lock(&esw->state_lock);
512
513 vport = esw_vlan_action_get_vport(attr, push, pop);
514
515 if (!push && !pop && fwd) {
516
517 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
518 vport->vlan_refcount--;
519
520 goto out;
521 }
522
523 if (push) {
524 vport->vlan_refcount--;
525 if (vport->vlan_refcount)
526 goto skip_unset_push;
527
528 vport->vlan = 0;
529 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
530 0, 0, SET_VLAN_STRIP);
531 if (err)
532 goto out;
533 }
534
535skip_unset_push:
536 offloads->vlan_push_pop_refcount--;
537 if (offloads->vlan_push_pop_refcount)
538 goto out;
539
540
541 err = esw_set_global_vlan_pop(esw, 0);
542
543out:
544 mutex_unlock(&esw->state_lock);
545 return err;
546}
547
548struct mlx5_flow_handle *
549mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
550 u32 sqn)
551{
552 struct mlx5_flow_act flow_act = {0};
553 struct mlx5_flow_destination dest = {};
554 struct mlx5_flow_handle *flow_rule;
555 struct mlx5_flow_spec *spec;
556 void *misc;
557
558 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
559 if (!spec) {
560 flow_rule = ERR_PTR(-ENOMEM);
561 goto out;
562 }
563
564 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
565 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
566
567 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
568
569 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
570 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
571 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
572
573 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
574 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
575 dest.vport.num = vport;
576 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
577
578 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
579 &flow_act, &dest, 1);
580 if (IS_ERR(flow_rule))
581 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
582out:
583 kvfree(spec);
584 return flow_rule;
585}
586EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
587
588void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
589{
590 mlx5_del_flow_rules(rule);
591}
592
593static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
594{
595 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
596 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
597 u8 fdb_to_vport_reg_c_id;
598 int err;
599
600 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
601 return 0;
602
603 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
604 out, sizeof(out));
605 if (err)
606 return err;
607
608 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
609 esw_vport_context.fdb_to_vport_reg_c_id);
610
611 if (enable)
612 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
613 else
614 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
615
616 MLX5_SET(modify_esw_vport_context_in, in,
617 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
618
619 MLX5_SET(modify_esw_vport_context_in, in,
620 field_select.fdb_to_vport_reg_c_id, 1);
621
622 return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
623 in, sizeof(in));
624}
625
626static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
627 struct mlx5_core_dev *peer_dev,
628 struct mlx5_flow_spec *spec,
629 struct mlx5_flow_destination *dest)
630{
631 void *misc;
632
633 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
634 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
635 misc_parameters_2);
636 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
637
638 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
639 } else {
640 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
641 misc_parameters);
642
643 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
644 MLX5_CAP_GEN(peer_dev, vhca_id));
645
646 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
647
648 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
649 misc_parameters);
650 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
651 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
652 source_eswitch_owner_vhca_id);
653 }
654
655 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
656 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
657 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
658 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
659}
660
661static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
662 struct mlx5_eswitch *peer_esw,
663 struct mlx5_flow_spec *spec,
664 u16 vport)
665{
666 void *misc;
667
668 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
669 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
670 misc_parameters_2);
671 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
672 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
673 vport));
674 } else {
675 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
676 misc_parameters);
677 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
678 }
679}
680
681static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
682 struct mlx5_core_dev *peer_dev)
683{
684 struct mlx5_flow_destination dest = {};
685 struct mlx5_flow_act flow_act = {0};
686 struct mlx5_flow_handle **flows;
687 struct mlx5_flow_handle *flow;
688 struct mlx5_flow_spec *spec;
689
690 int nvports = esw->total_vports;
691 void *misc;
692 int err, i;
693
694 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
695 if (!spec)
696 return -ENOMEM;
697
698 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
699
700 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
701 if (!flows) {
702 err = -ENOMEM;
703 goto alloc_flows_err;
704 }
705
706 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
707 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
708 misc_parameters);
709
710 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
711 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
712 spec, MLX5_VPORT_PF);
713
714 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
715 spec, &flow_act, &dest, 1);
716 if (IS_ERR(flow)) {
717 err = PTR_ERR(flow);
718 goto add_pf_flow_err;
719 }
720 flows[MLX5_VPORT_PF] = flow;
721 }
722
723 if (mlx5_ecpf_vport_exists(esw->dev)) {
724 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
725 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
726 spec, &flow_act, &dest, 1);
727 if (IS_ERR(flow)) {
728 err = PTR_ERR(flow);
729 goto add_ecpf_flow_err;
730 }
731 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
732 }
733
734 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
735 esw_set_peer_miss_rule_source_port(esw,
736 peer_dev->priv.eswitch,
737 spec, i);
738
739 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
740 spec, &flow_act, &dest, 1);
741 if (IS_ERR(flow)) {
742 err = PTR_ERR(flow);
743 goto add_vf_flow_err;
744 }
745 flows[i] = flow;
746 }
747
748 esw->fdb_table.offloads.peer_miss_rules = flows;
749
750 kvfree(spec);
751 return 0;
752
753add_vf_flow_err:
754 nvports = --i;
755 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
756 mlx5_del_flow_rules(flows[i]);
757
758 if (mlx5_ecpf_vport_exists(esw->dev))
759 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
760add_ecpf_flow_err:
761 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
762 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
763add_pf_flow_err:
764 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
765 kvfree(flows);
766alloc_flows_err:
767 kvfree(spec);
768 return err;
769}
770
771static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
772{
773 struct mlx5_flow_handle **flows;
774 int i;
775
776 flows = esw->fdb_table.offloads.peer_miss_rules;
777
778 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
779 mlx5_core_max_vfs(esw->dev))
780 mlx5_del_flow_rules(flows[i]);
781
782 if (mlx5_ecpf_vport_exists(esw->dev))
783 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
784
785 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
786 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
787
788 kvfree(flows);
789}
790
791static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
792{
793 struct mlx5_flow_act flow_act = {0};
794 struct mlx5_flow_destination dest = {};
795 struct mlx5_flow_handle *flow_rule = NULL;
796 struct mlx5_flow_spec *spec;
797 void *headers_c;
798 void *headers_v;
799 int err = 0;
800 u8 *dmac_c;
801 u8 *dmac_v;
802
803 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
804 if (!spec) {
805 err = -ENOMEM;
806 goto out;
807 }
808
809 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
810 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
811 outer_headers);
812 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
813 outer_headers.dmac_47_16);
814 dmac_c[0] = 0x01;
815
816 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
817 dest.vport.num = esw->manager_vport;
818 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
819
820 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
821 &flow_act, &dest, 1);
822 if (IS_ERR(flow_rule)) {
823 err = PTR_ERR(flow_rule);
824 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
825 goto out;
826 }
827
828 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
829
830 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
831 outer_headers);
832 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
833 outer_headers.dmac_47_16);
834 dmac_v[0] = 0x01;
835 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
836 &flow_act, &dest, 1);
837 if (IS_ERR(flow_rule)) {
838 err = PTR_ERR(flow_rule);
839 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
840 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
841 goto out;
842 }
843
844 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
845
846out:
847 kvfree(spec);
848 return err;
849}
850
851#define ESW_OFFLOADS_NUM_GROUPS 4
852
853
854
855
856
857
858
859
860#define ESW_SIZE (16 * 1024 * 1024)
861const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
862 64 * 1024, 4 * 1024 };
863
864static int
865get_sz_from_pool(struct mlx5_eswitch *esw)
866{
867 int sz = 0, i;
868
869 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
870 if (esw->fdb_table.offloads.fdb_left[i]) {
871 --esw->fdb_table.offloads.fdb_left[i];
872 sz = ESW_POOLS[i];
873 break;
874 }
875 }
876
877 return sz;
878}
879
880static void
881put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
882{
883 int i;
884
885 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
886 if (sz >= ESW_POOLS[i]) {
887 ++esw->fdb_table.offloads.fdb_left[i];
888 break;
889 }
890 }
891}
892
893static struct mlx5_flow_table *
894create_next_size_table(struct mlx5_eswitch *esw,
895 struct mlx5_flow_namespace *ns,
896 u16 table_prio,
897 int level,
898 u32 flags)
899{
900 struct mlx5_flow_table *fdb;
901 int sz;
902
903 sz = get_sz_from_pool(esw);
904 if (!sz)
905 return ERR_PTR(-ENOSPC);
906
907 fdb = mlx5_create_auto_grouped_flow_table(ns,
908 table_prio,
909 sz,
910 ESW_OFFLOADS_NUM_GROUPS,
911 level,
912 flags);
913 if (IS_ERR(fdb)) {
914 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
915 (int)PTR_ERR(fdb), table_prio, level, sz);
916 put_sz_to_pool(esw, sz);
917 }
918
919 return fdb;
920}
921
922static struct mlx5_flow_table *
923esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
924{
925 struct mlx5_core_dev *dev = esw->dev;
926 struct mlx5_flow_table *fdb = NULL;
927 struct mlx5_flow_namespace *ns;
928 int table_prio, l = 0;
929 u32 flags = 0;
930
931 if (chain == FDB_SLOW_PATH_CHAIN)
932 return esw->fdb_table.offloads.slow_fdb;
933
934 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
935
936 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
937 if (fdb) {
938
939 while (level >= 0)
940 fdb_prio_table(esw, chain, prio, level--).num_rules++;
941 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
942 return fdb;
943 }
944
945 ns = mlx5_get_fdb_sub_ns(dev, chain);
946 if (!ns) {
947 esw_warn(dev, "Failed to get FDB sub namespace\n");
948 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
949 return ERR_PTR(-EOPNOTSUPP);
950 }
951
952 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
953 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
954 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
955
956 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
957
958
959
960
961 for (l = 0; l <= level; l++) {
962 if (fdb_prio_table(esw, chain, prio, l).fdb) {
963 fdb_prio_table(esw, chain, prio, l).num_rules++;
964 continue;
965 }
966
967 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
968 if (IS_ERR(fdb)) {
969 l--;
970 goto err_create_fdb;
971 }
972
973 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
974 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
975 }
976
977 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
978 return fdb;
979
980err_create_fdb:
981 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
982 if (l >= 0)
983 esw_put_prio_table(esw, chain, prio, l);
984
985 return fdb;
986}
987
988static void
989esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
990{
991 int l;
992
993 if (chain == FDB_SLOW_PATH_CHAIN)
994 return;
995
996 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
997
998 for (l = level; l >= 0; l--) {
999 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1000 continue;
1001
1002 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1003 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1004 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1005 }
1006
1007 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1008}
1009
1010static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1011{
1012
1013 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1014 esw_put_prio_table(esw, 0, 1, 1);
1015 esw_put_prio_table(esw, 0, 1, 0);
1016 }
1017}
1018
1019#define MAX_PF_SQ 256
1020#define MAX_SQ_NVPORTS 32
1021
1022static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1023 u32 *flow_group_in)
1024{
1025 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1026 flow_group_in,
1027 match_criteria);
1028
1029 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1030 MLX5_SET(create_flow_group_in, flow_group_in,
1031 match_criteria_enable,
1032 MLX5_MATCH_MISC_PARAMETERS_2);
1033
1034 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1035 misc_parameters_2.metadata_reg_c_0);
1036 } else {
1037 MLX5_SET(create_flow_group_in, flow_group_in,
1038 match_criteria_enable,
1039 MLX5_MATCH_MISC_PARAMETERS);
1040
1041 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1042 misc_parameters.source_port);
1043 }
1044}
1045
1046static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1047{
1048 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1049 struct mlx5_flow_table_attr ft_attr = {};
1050 struct mlx5_core_dev *dev = esw->dev;
1051 u32 *flow_group_in, max_flow_counter;
1052 struct mlx5_flow_namespace *root_ns;
1053 struct mlx5_flow_table *fdb = NULL;
1054 int table_size, ix, err = 0, i;
1055 struct mlx5_flow_group *g;
1056 u32 flags = 0, fdb_max;
1057 void *match_criteria;
1058 u8 *dmac;
1059
1060 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1061 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1062 if (!flow_group_in)
1063 return -ENOMEM;
1064
1065 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1066 if (!root_ns) {
1067 esw_warn(dev, "Failed to get FDB flow namespace\n");
1068 err = -EOPNOTSUPP;
1069 goto ns_err;
1070 }
1071 esw->fdb_table.offloads.ns = root_ns;
1072 err = mlx5_flow_namespace_set_mode(root_ns,
1073 esw->dev->priv.steering->mode);
1074 if (err) {
1075 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1076 goto ns_err;
1077 }
1078
1079 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1080 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1081 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1082
1083 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
1084 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1085 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1086 fdb_max);
1087
1088 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1089 esw->fdb_table.offloads.fdb_left[i] =
1090 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1091
1092 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1093 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1094
1095
1096
1097
1098 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1099 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1100 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1101
1102 ft_attr.flags = flags;
1103 ft_attr.max_fte = table_size;
1104 ft_attr.prio = FDB_SLOW_PATH;
1105
1106 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1107 if (IS_ERR(fdb)) {
1108 err = PTR_ERR(fdb);
1109 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1110 goto slow_fdb_err;
1111 }
1112 esw->fdb_table.offloads.slow_fdb = fdb;
1113
1114
1115 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1116 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1117 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1118 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1119 esw_get_prio_table(esw, 0, 1, 0);
1120 esw_get_prio_table(esw, 0, 1, 1);
1121 } else {
1122 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1123 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1124 }
1125
1126
1127 memset(flow_group_in, 0, inlen);
1128 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1129 MLX5_MATCH_MISC_PARAMETERS);
1130
1131 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1132
1133 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1134 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1135
1136 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1137 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1138 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1139
1140 g = mlx5_create_flow_group(fdb, flow_group_in);
1141 if (IS_ERR(g)) {
1142 err = PTR_ERR(g);
1143 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1144 goto send_vport_err;
1145 }
1146 esw->fdb_table.offloads.send_to_vport_grp = g;
1147
1148
1149 memset(flow_group_in, 0, inlen);
1150
1151 esw_set_flow_group_source_port(esw, flow_group_in);
1152
1153 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1154 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1155 flow_group_in,
1156 match_criteria);
1157
1158 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1159 misc_parameters.source_eswitch_owner_vhca_id);
1160
1161 MLX5_SET(create_flow_group_in, flow_group_in,
1162 source_eswitch_owner_vhca_id_valid, 1);
1163 }
1164
1165 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1166 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1167 ix + esw->total_vports - 1);
1168 ix += esw->total_vports;
1169
1170 g = mlx5_create_flow_group(fdb, flow_group_in);
1171 if (IS_ERR(g)) {
1172 err = PTR_ERR(g);
1173 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1174 goto peer_miss_err;
1175 }
1176 esw->fdb_table.offloads.peer_miss_grp = g;
1177
1178
1179 memset(flow_group_in, 0, inlen);
1180 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1181 MLX5_MATCH_OUTER_HEADERS);
1182 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1183 match_criteria);
1184 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1185 outer_headers.dmac_47_16);
1186 dmac[0] = 0x01;
1187
1188 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1189 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1190 ix + MLX5_ESW_MISS_FLOWS);
1191
1192 g = mlx5_create_flow_group(fdb, flow_group_in);
1193 if (IS_ERR(g)) {
1194 err = PTR_ERR(g);
1195 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1196 goto miss_err;
1197 }
1198 esw->fdb_table.offloads.miss_grp = g;
1199
1200 err = esw_add_fdb_miss_rule(esw);
1201 if (err)
1202 goto miss_rule_err;
1203
1204 esw->nvports = nvports;
1205 kvfree(flow_group_in);
1206 return 0;
1207
1208miss_rule_err:
1209 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1210miss_err:
1211 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1212peer_miss_err:
1213 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1214send_vport_err:
1215 esw_destroy_offloads_fast_fdb_tables(esw);
1216 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1217slow_fdb_err:
1218
1219 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1220ns_err:
1221 kvfree(flow_group_in);
1222 return err;
1223}
1224
1225static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1226{
1227 if (!esw->fdb_table.offloads.slow_fdb)
1228 return;
1229
1230 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1231 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1232 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1233 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1234 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1235 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1236
1237 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1238 esw_destroy_offloads_fast_fdb_tables(esw);
1239
1240 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1241 MLX5_FLOW_STEERING_MODE_DMFS);
1242}
1243
1244static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1245{
1246 struct mlx5_flow_table_attr ft_attr = {};
1247 struct mlx5_core_dev *dev = esw->dev;
1248 struct mlx5_flow_table *ft_offloads;
1249 struct mlx5_flow_namespace *ns;
1250 int err = 0;
1251
1252 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1253 if (!ns) {
1254 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1255 return -EOPNOTSUPP;
1256 }
1257
1258 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1259
1260 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1261 if (IS_ERR(ft_offloads)) {
1262 err = PTR_ERR(ft_offloads);
1263 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1264 return err;
1265 }
1266
1267 esw->offloads.ft_offloads = ft_offloads;
1268 return 0;
1269}
1270
1271static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1272{
1273 struct mlx5_esw_offload *offloads = &esw->offloads;
1274
1275 mlx5_destroy_flow_table(offloads->ft_offloads);
1276}
1277
1278static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1279{
1280 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1281 struct mlx5_flow_group *g;
1282 u32 *flow_group_in;
1283 int err = 0;
1284
1285 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1286 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1287 if (!flow_group_in)
1288 return -ENOMEM;
1289
1290
1291 memset(flow_group_in, 0, inlen);
1292
1293 esw_set_flow_group_source_port(esw, flow_group_in);
1294
1295 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1296 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1297
1298 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1299
1300 if (IS_ERR(g)) {
1301 err = PTR_ERR(g);
1302 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1303 goto out;
1304 }
1305
1306 esw->offloads.vport_rx_group = g;
1307out:
1308 kvfree(flow_group_in);
1309 return err;
1310}
1311
1312static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1313{
1314 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1315}
1316
1317struct mlx5_flow_handle *
1318mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1319 struct mlx5_flow_destination *dest)
1320{
1321 struct mlx5_flow_act flow_act = {0};
1322 struct mlx5_flow_handle *flow_rule;
1323 struct mlx5_flow_spec *spec;
1324 void *misc;
1325
1326 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1327 if (!spec) {
1328 flow_rule = ERR_PTR(-ENOMEM);
1329 goto out;
1330 }
1331
1332 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1333 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1334 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1335 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1336
1337 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1338 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
1339
1340 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1341 } else {
1342 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1343 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1344
1345 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1346 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1347
1348 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1349 }
1350
1351 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1352 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1353 &flow_act, dest, 1);
1354 if (IS_ERR(flow_rule)) {
1355 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1356 goto out;
1357 }
1358
1359out:
1360 kvfree(spec);
1361 return flow_rule;
1362}
1363
1364static int esw_offloads_start(struct mlx5_eswitch *esw,
1365 struct netlink_ext_ack *extack)
1366{
1367 int err, err1;
1368
1369 if (esw->mode != MLX5_ESWITCH_LEGACY &&
1370 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1371 NL_SET_ERR_MSG_MOD(extack,
1372 "Can't set offloads mode, SRIOV legacy not enabled");
1373 return -EINVAL;
1374 }
1375
1376 mlx5_eswitch_disable(esw, false);
1377 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1378 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
1379 if (err) {
1380 NL_SET_ERR_MSG_MOD(extack,
1381 "Failed setting eswitch to offloads");
1382 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
1383 if (err1) {
1384 NL_SET_ERR_MSG_MOD(extack,
1385 "Failed setting eswitch back to legacy");
1386 }
1387 }
1388 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1389 if (mlx5_eswitch_inline_mode_get(esw,
1390 &esw->offloads.inline_mode)) {
1391 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1392 NL_SET_ERR_MSG_MOD(extack,
1393 "Inline mode is different between vports");
1394 }
1395 }
1396 return err;
1397}
1398
1399void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1400{
1401 kfree(esw->offloads.vport_reps);
1402}
1403
1404int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1405{
1406 int total_vports = esw->total_vports;
1407 struct mlx5_eswitch_rep *rep;
1408 int vport_index;
1409 u8 rep_type;
1410
1411 esw->offloads.vport_reps = kcalloc(total_vports,
1412 sizeof(struct mlx5_eswitch_rep),
1413 GFP_KERNEL);
1414 if (!esw->offloads.vport_reps)
1415 return -ENOMEM;
1416
1417 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1418 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1419 rep->vport_index = vport_index;
1420
1421 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1422 atomic_set(&rep->rep_data[rep_type].state,
1423 REP_UNREGISTERED);
1424 }
1425
1426 return 0;
1427}
1428
1429static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1430 struct mlx5_eswitch_rep *rep, u8 rep_type)
1431{
1432 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1433 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1434 esw->offloads.rep_ops[rep_type]->unload(rep);
1435}
1436
1437static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1438{
1439 struct mlx5_eswitch_rep *rep;
1440
1441 if (mlx5_ecpf_vport_exists(esw->dev)) {
1442 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1443 __esw_offloads_unload_rep(esw, rep, rep_type);
1444 }
1445
1446 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1447 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1448 __esw_offloads_unload_rep(esw, rep, rep_type);
1449 }
1450
1451 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1452 __esw_offloads_unload_rep(esw, rep, rep_type);
1453}
1454
1455static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1456 u8 rep_type)
1457{
1458 struct mlx5_eswitch_rep *rep;
1459 int i;
1460
1461 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1462 __esw_offloads_unload_rep(esw, rep, rep_type);
1463}
1464
1465static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1466{
1467 u8 rep_type = NUM_REP_TYPES;
1468
1469 while (rep_type-- > 0)
1470 __unload_reps_vf_vport(esw, nvports, rep_type);
1471}
1472
1473static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1474{
1475 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1476
1477
1478 __unload_reps_special_vport(esw, rep_type);
1479}
1480
1481static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
1482{
1483 u8 rep_type = NUM_REP_TYPES;
1484
1485 while (rep_type-- > 0)
1486 __unload_reps_all_vport(esw, rep_type);
1487}
1488
1489static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1490 struct mlx5_eswitch_rep *rep, u8 rep_type)
1491{
1492 int err = 0;
1493
1494 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1495 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1496 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1497 if (err)
1498 atomic_set(&rep->rep_data[rep_type].state,
1499 REP_REGISTERED);
1500 }
1501
1502 return err;
1503}
1504
1505static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1506{
1507 struct mlx5_eswitch_rep *rep;
1508 int err;
1509
1510 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1511 err = __esw_offloads_load_rep(esw, rep, rep_type);
1512 if (err)
1513 return err;
1514
1515 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1516 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1517 err = __esw_offloads_load_rep(esw, rep, rep_type);
1518 if (err)
1519 goto err_pf;
1520 }
1521
1522 if (mlx5_ecpf_vport_exists(esw->dev)) {
1523 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1524 err = __esw_offloads_load_rep(esw, rep, rep_type);
1525 if (err)
1526 goto err_ecpf;
1527 }
1528
1529 return 0;
1530
1531err_ecpf:
1532 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1533 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1534 __esw_offloads_unload_rep(esw, rep, rep_type);
1535 }
1536
1537err_pf:
1538 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1539 __esw_offloads_unload_rep(esw, rep, rep_type);
1540 return err;
1541}
1542
1543static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1544 u8 rep_type)
1545{
1546 struct mlx5_eswitch_rep *rep;
1547 int err, i;
1548
1549 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
1550 err = __esw_offloads_load_rep(esw, rep, rep_type);
1551 if (err)
1552 goto err_vf;
1553 }
1554
1555 return 0;
1556
1557err_vf:
1558 __unload_reps_vf_vport(esw, --i, rep_type);
1559 return err;
1560}
1561
1562static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1563{
1564 int err;
1565
1566
1567 err = __load_reps_special_vport(esw, rep_type);
1568 if (err)
1569 return err;
1570
1571 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1572 if (err)
1573 goto err_vfs;
1574
1575 return 0;
1576
1577err_vfs:
1578 __unload_reps_special_vport(esw, rep_type);
1579 return err;
1580}
1581
1582static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1583{
1584 u8 rep_type = 0;
1585 int err;
1586
1587 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1588 err = __load_reps_vf_vport(esw, nvports, rep_type);
1589 if (err)
1590 goto err_reps;
1591 }
1592
1593 return err;
1594
1595err_reps:
1596 while (rep_type-- > 0)
1597 __unload_reps_vf_vport(esw, nvports, rep_type);
1598 return err;
1599}
1600
1601static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
1602{
1603 u8 rep_type = 0;
1604 int err;
1605
1606 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1607 err = __load_reps_all_vport(esw, rep_type);
1608 if (err)
1609 goto err_reps;
1610 }
1611
1612 return err;
1613
1614err_reps:
1615 while (rep_type-- > 0)
1616 __unload_reps_all_vport(esw, rep_type);
1617 return err;
1618}
1619
1620#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1621#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1622
1623static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1624 struct mlx5_eswitch *peer_esw)
1625{
1626 int err;
1627
1628 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1629 if (err)
1630 return err;
1631
1632 return 0;
1633}
1634
1635static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1636{
1637 mlx5e_tc_clean_fdb_peer_flows(esw);
1638 esw_del_fdb_peer_miss_rules(esw);
1639}
1640
1641static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1642 struct mlx5_eswitch *peer_esw,
1643 bool pair)
1644{
1645 struct mlx5_flow_root_namespace *peer_ns;
1646 struct mlx5_flow_root_namespace *ns;
1647 int err;
1648
1649 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1650 ns = esw->dev->priv.steering->fdb_root_ns;
1651
1652 if (pair) {
1653 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1654 if (err)
1655 return err;
1656
1657 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
1658 if (err) {
1659 mlx5_flow_namespace_set_peer(ns, NULL);
1660 return err;
1661 }
1662 } else {
1663 mlx5_flow_namespace_set_peer(ns, NULL);
1664 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1665 }
1666
1667 return 0;
1668}
1669
1670static int mlx5_esw_offloads_devcom_event(int event,
1671 void *my_data,
1672 void *event_data)
1673{
1674 struct mlx5_eswitch *esw = my_data;
1675 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1676 struct mlx5_eswitch *peer_esw = event_data;
1677 int err;
1678
1679 switch (event) {
1680 case ESW_OFFLOADS_DEVCOM_PAIR:
1681 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1682 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1683 break;
1684
1685 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
1686 if (err)
1687 goto err_out;
1688 err = mlx5_esw_offloads_pair(esw, peer_esw);
1689 if (err)
1690 goto err_peer;
1691
1692 err = mlx5_esw_offloads_pair(peer_esw, esw);
1693 if (err)
1694 goto err_pair;
1695
1696 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1697 break;
1698
1699 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1700 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1701 break;
1702
1703 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1704 mlx5_esw_offloads_unpair(peer_esw);
1705 mlx5_esw_offloads_unpair(esw);
1706 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1707 break;
1708 }
1709
1710 return 0;
1711
1712err_pair:
1713 mlx5_esw_offloads_unpair(esw);
1714err_peer:
1715 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1716err_out:
1717 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1718 event, err);
1719 return err;
1720}
1721
1722static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1723{
1724 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1725
1726 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1727 mutex_init(&esw->offloads.peer_mutex);
1728
1729 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1730 return;
1731
1732 mlx5_devcom_register_component(devcom,
1733 MLX5_DEVCOM_ESW_OFFLOADS,
1734 mlx5_esw_offloads_devcom_event,
1735 esw);
1736
1737 mlx5_devcom_send_event(devcom,
1738 MLX5_DEVCOM_ESW_OFFLOADS,
1739 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1740}
1741
1742static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1743{
1744 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1745
1746 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1747 return;
1748
1749 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1750 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1751
1752 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1753}
1754
1755static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1756 struct mlx5_vport *vport)
1757{
1758 struct mlx5_flow_act flow_act = {0};
1759 struct mlx5_flow_spec *spec;
1760 int err = 0;
1761
1762
1763
1764
1765
1766
1767
1768 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1769 if (!spec) {
1770 err = -ENOMEM;
1771 goto out_no_mem;
1772 }
1773
1774
1775 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1776 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1777 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1778 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1779 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1780 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1781 flow_act.vlan[0].vid = 0;
1782 flow_act.vlan[0].prio = 0;
1783
1784 if (vport->ingress.modify_metadata_rule) {
1785 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1786 flow_act.modify_hdr = vport->ingress.modify_metadata;
1787 }
1788
1789 vport->ingress.allow_rule =
1790 mlx5_add_flow_rules(vport->ingress.acl, spec,
1791 &flow_act, NULL, 0);
1792 if (IS_ERR(vport->ingress.allow_rule)) {
1793 err = PTR_ERR(vport->ingress.allow_rule);
1794 esw_warn(esw->dev,
1795 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1796 vport->vport, err);
1797 vport->ingress.allow_rule = NULL;
1798 goto out;
1799 }
1800
1801out:
1802 kvfree(spec);
1803out_no_mem:
1804 if (err)
1805 esw_vport_cleanup_ingress_rules(esw, vport);
1806 return err;
1807}
1808
1809static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1810 struct mlx5_vport *vport)
1811{
1812 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1813 static const struct mlx5_flow_spec spec = {};
1814 struct mlx5_flow_act flow_act = {};
1815 int err = 0;
1816
1817 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1818 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1819 MLX5_SET(set_action_in, action, data,
1820 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1821
1822 vport->ingress.modify_metadata =
1823 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1824 1, action);
1825 if (IS_ERR(vport->ingress.modify_metadata)) {
1826 err = PTR_ERR(vport->ingress.modify_metadata);
1827 esw_warn(esw->dev,
1828 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1829 vport->vport, err);
1830 return err;
1831 }
1832
1833 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1834 flow_act.modify_hdr = vport->ingress.modify_metadata;
1835 vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
1836 &spec, &flow_act, NULL, 0);
1837 if (IS_ERR(vport->ingress.modify_metadata_rule)) {
1838 err = PTR_ERR(vport->ingress.modify_metadata_rule);
1839 esw_warn(esw->dev,
1840 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1841 vport->vport, err);
1842 vport->ingress.modify_metadata_rule = NULL;
1843 goto out;
1844 }
1845
1846out:
1847 if (err)
1848 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
1849 return err;
1850}
1851
1852void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1853 struct mlx5_vport *vport)
1854{
1855 if (vport->ingress.modify_metadata_rule) {
1856 mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
1857 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
1858
1859 vport->ingress.modify_metadata_rule = NULL;
1860 }
1861}
1862
1863static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1864 struct mlx5_vport *vport)
1865{
1866 struct mlx5_flow_act flow_act = {0};
1867 struct mlx5_flow_spec *spec;
1868 int err = 0;
1869
1870 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1871 return 0;
1872
1873
1874
1875
1876
1877
1878 esw_vport_cleanup_egress_rules(esw, vport);
1879
1880 err = esw_vport_enable_egress_acl(esw, vport);
1881 if (err) {
1882 mlx5_core_warn(esw->dev,
1883 "failed to enable egress acl (%d) on vport[%d]\n",
1884 err, vport->vport);
1885 return err;
1886 }
1887
1888 esw_debug(esw->dev,
1889 "vport[%d] configure prio tag egress rules\n", vport->vport);
1890
1891 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1892 if (!spec) {
1893 err = -ENOMEM;
1894 goto out_no_mem;
1895 }
1896
1897
1898 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1899 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1900 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1901 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1902
1903 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1904 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1905 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1906 vport->egress.allowed_vlan =
1907 mlx5_add_flow_rules(vport->egress.acl, spec,
1908 &flow_act, NULL, 0);
1909 if (IS_ERR(vport->egress.allowed_vlan)) {
1910 err = PTR_ERR(vport->egress.allowed_vlan);
1911 esw_warn(esw->dev,
1912 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1913 vport->vport, err);
1914 vport->egress.allowed_vlan = NULL;
1915 goto out;
1916 }
1917
1918out:
1919 kvfree(spec);
1920out_no_mem:
1921 if (err)
1922 esw_vport_cleanup_egress_rules(esw, vport);
1923 return err;
1924}
1925
1926static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
1927 struct mlx5_vport *vport)
1928{
1929 int err;
1930
1931 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1932 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1933 return 0;
1934
1935 esw_vport_cleanup_ingress_rules(esw, vport);
1936
1937 err = esw_vport_enable_ingress_acl(esw, vport);
1938 if (err) {
1939 esw_warn(esw->dev,
1940 "failed to enable ingress acl (%d) on vport[%d]\n",
1941 err, vport->vport);
1942 return err;
1943 }
1944
1945 esw_debug(esw->dev,
1946 "vport[%d] configure ingress rules\n", vport->vport);
1947
1948 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1949 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1950 if (err)
1951 goto out;
1952 }
1953
1954 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1955 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1956 err = esw_vport_ingress_prio_tag_config(esw, vport);
1957 if (err)
1958 goto out;
1959 }
1960
1961out:
1962 if (err)
1963 esw_vport_disable_ingress_acl(esw, vport);
1964 return err;
1965}
1966
1967static bool
1968esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1969{
1970 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1971 return false;
1972
1973 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1974 MLX5_FDB_TO_VPORT_REG_C_0))
1975 return false;
1976
1977 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1978 return false;
1979
1980 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1981 mlx5_ecpf_vport_exists(esw->dev))
1982 return false;
1983
1984 return true;
1985}
1986
1987static bool
1988esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
1989{
1990 return mlx5_core_mp_enabled(esw->dev);
1991}
1992
1993static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
1994{
1995 return esw_check_vport_match_metadata_mandatory(esw) &&
1996 esw_check_vport_match_metadata_supported(esw);
1997}
1998
1999static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
2000{
2001 struct mlx5_vport *vport;
2002 int i, j;
2003 int err;
2004
2005 if (esw_use_vport_metadata(esw))
2006 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2007
2008 mlx5_esw_for_all_vports(esw, i, vport) {
2009 err = esw_vport_ingress_common_config(esw, vport);
2010 if (err)
2011 goto err_ingress;
2012
2013 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2014 err = esw_vport_egress_prio_tag_config(esw, vport);
2015 if (err)
2016 goto err_egress;
2017 }
2018 }
2019
2020 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2021 esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
2022
2023 return 0;
2024
2025err_egress:
2026 esw_vport_disable_ingress_acl(esw, vport);
2027err_ingress:
2028 for (j = MLX5_VPORT_PF; j < i; j++) {
2029 vport = &esw->vports[j];
2030 esw_vport_disable_egress_acl(esw, vport);
2031 esw_vport_disable_ingress_acl(esw, vport);
2032 }
2033
2034 return err;
2035}
2036
2037static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
2038{
2039 struct mlx5_vport *vport;
2040 int i;
2041
2042 mlx5_esw_for_all_vports(esw, i, vport) {
2043 esw_vport_disable_egress_acl(esw, vport);
2044 esw_vport_disable_ingress_acl(esw, vport);
2045 }
2046
2047 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2048}
2049
2050static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2051{
2052 int num_vfs = esw->esw_funcs.num_vfs;
2053 int total_vports;
2054 int err;
2055
2056 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2057 total_vports = esw->total_vports;
2058 else
2059 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2060
2061 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2062 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2063
2064 err = esw_create_offloads_acl_tables(esw);
2065 if (err)
2066 return err;
2067
2068 err = esw_create_offloads_fdb_tables(esw, total_vports);
2069 if (err)
2070 goto create_fdb_err;
2071
2072 err = esw_create_offloads_table(esw, total_vports);
2073 if (err)
2074 goto create_ft_err;
2075
2076 err = esw_create_vport_rx_group(esw, total_vports);
2077 if (err)
2078 goto create_fg_err;
2079
2080 return 0;
2081
2082create_fg_err:
2083 esw_destroy_offloads_table(esw);
2084
2085create_ft_err:
2086 esw_destroy_offloads_fdb_tables(esw);
2087
2088create_fdb_err:
2089 esw_destroy_offloads_acl_tables(esw);
2090
2091 return err;
2092}
2093
2094static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2095{
2096 esw_destroy_vport_rx_group(esw);
2097 esw_destroy_offloads_table(esw);
2098 esw_destroy_offloads_fdb_tables(esw);
2099 esw_destroy_offloads_acl_tables(esw);
2100}
2101
2102static void
2103esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2104{
2105 bool host_pf_disabled;
2106 u16 new_num_vfs;
2107
2108 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2109 host_params_context.host_num_of_vfs);
2110 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2111 host_params_context.host_pf_disabled);
2112
2113 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2114 return;
2115
2116
2117 if (esw->esw_funcs.num_vfs > 0) {
2118 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
2119 } else {
2120 int err;
2121
2122 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
2123 if (err)
2124 return;
2125 }
2126 esw->esw_funcs.num_vfs = new_num_vfs;
2127}
2128
2129static void esw_functions_changed_event_handler(struct work_struct *work)
2130{
2131 struct mlx5_host_work *host_work;
2132 struct mlx5_eswitch *esw;
2133 const u32 *out;
2134
2135 host_work = container_of(work, struct mlx5_host_work, work);
2136 esw = host_work->esw;
2137
2138 out = mlx5_esw_query_functions(esw->dev);
2139 if (IS_ERR(out))
2140 goto out;
2141
2142 esw_vfs_changed_event_handler(esw, out);
2143 kvfree(out);
2144out:
2145 kfree(host_work);
2146}
2147
2148int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2149{
2150 struct mlx5_esw_functions *esw_funcs;
2151 struct mlx5_host_work *host_work;
2152 struct mlx5_eswitch *esw;
2153
2154 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2155 if (!host_work)
2156 return NOTIFY_DONE;
2157
2158 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2159 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2160
2161 host_work->esw = esw;
2162
2163 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2164 queue_work(esw->work_queue, &host_work->work);
2165
2166 return NOTIFY_OK;
2167}
2168
2169int esw_offloads_enable(struct mlx5_eswitch *esw)
2170{
2171 struct mlx5_vport *vport;
2172 int err, i;
2173
2174 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2175 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2176 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2177 else
2178 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2179
2180 mlx5_rdma_enable_roce(esw->dev);
2181 err = esw_offloads_steering_init(esw);
2182 if (err)
2183 goto err_steering_init;
2184
2185 err = esw_set_passing_vport_metadata(esw, true);
2186 if (err)
2187 goto err_vport_metadata;
2188
2189
2190 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2191 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2192
2193 mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2194
2195 err = esw_offloads_load_all_reps(esw);
2196 if (err)
2197 goto err_reps;
2198
2199 esw_offloads_devcom_init(esw);
2200 mutex_init(&esw->offloads.termtbl_mutex);
2201
2202 return 0;
2203
2204err_reps:
2205 mlx5_eswitch_disable_pf_vf_vports(esw);
2206 esw_set_passing_vport_metadata(esw, false);
2207err_vport_metadata:
2208 esw_offloads_steering_cleanup(esw);
2209err_steering_init:
2210 mlx5_rdma_disable_roce(esw->dev);
2211 return err;
2212}
2213
2214static int esw_offloads_stop(struct mlx5_eswitch *esw,
2215 struct netlink_ext_ack *extack)
2216{
2217 int err, err1;
2218
2219 mlx5_eswitch_disable(esw, false);
2220 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
2221 if (err) {
2222 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2223 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
2224 if (err1) {
2225 NL_SET_ERR_MSG_MOD(extack,
2226 "Failed setting eswitch back to offloads");
2227 }
2228 }
2229
2230 return err;
2231}
2232
2233void esw_offloads_disable(struct mlx5_eswitch *esw)
2234{
2235 esw_offloads_devcom_cleanup(esw);
2236 esw_offloads_unload_all_reps(esw);
2237 mlx5_eswitch_disable_pf_vf_vports(esw);
2238 esw_set_passing_vport_metadata(esw, false);
2239 esw_offloads_steering_cleanup(esw);
2240 mlx5_rdma_disable_roce(esw->dev);
2241 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2242}
2243
2244static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2245{
2246 switch (mode) {
2247 case DEVLINK_ESWITCH_MODE_LEGACY:
2248 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2249 break;
2250 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2251 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2252 break;
2253 default:
2254 return -EINVAL;
2255 }
2256
2257 return 0;
2258}
2259
2260static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2261{
2262 switch (mlx5_mode) {
2263 case MLX5_ESWITCH_LEGACY:
2264 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2265 break;
2266 case MLX5_ESWITCH_OFFLOADS:
2267 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2268 break;
2269 default:
2270 return -EINVAL;
2271 }
2272
2273 return 0;
2274}
2275
2276static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2277{
2278 switch (mode) {
2279 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2280 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2281 break;
2282 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2283 *mlx5_mode = MLX5_INLINE_MODE_L2;
2284 break;
2285 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2286 *mlx5_mode = MLX5_INLINE_MODE_IP;
2287 break;
2288 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2289 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2290 break;
2291 default:
2292 return -EINVAL;
2293 }
2294
2295 return 0;
2296}
2297
2298static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2299{
2300 switch (mlx5_mode) {
2301 case MLX5_INLINE_MODE_NONE:
2302 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2303 break;
2304 case MLX5_INLINE_MODE_L2:
2305 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2306 break;
2307 case MLX5_INLINE_MODE_IP:
2308 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2309 break;
2310 case MLX5_INLINE_MODE_TCP_UDP:
2311 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2312 break;
2313 default:
2314 return -EINVAL;
2315 }
2316
2317 return 0;
2318}
2319
2320static int mlx5_devlink_eswitch_check(struct devlink *devlink)
2321{
2322 struct mlx5_core_dev *dev = devlink_priv(devlink);
2323
2324 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2325 return -EOPNOTSUPP;
2326
2327 if(!MLX5_ESWITCH_MANAGER(dev))
2328 return -EPERM;
2329
2330 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2331 !mlx5_core_is_ecpf_esw_manager(dev))
2332 return -EOPNOTSUPP;
2333
2334 return 0;
2335}
2336
2337int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2338 struct netlink_ext_ack *extack)
2339{
2340 struct mlx5_core_dev *dev = devlink_priv(devlink);
2341 u16 cur_mlx5_mode, mlx5_mode = 0;
2342 int err;
2343
2344 err = mlx5_devlink_eswitch_check(devlink);
2345 if (err)
2346 return err;
2347
2348 cur_mlx5_mode = dev->priv.eswitch->mode;
2349
2350 if (esw_mode_from_devlink(mode, &mlx5_mode))
2351 return -EINVAL;
2352
2353 if (cur_mlx5_mode == mlx5_mode)
2354 return 0;
2355
2356 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2357 return esw_offloads_start(dev->priv.eswitch, extack);
2358 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2359 return esw_offloads_stop(dev->priv.eswitch, extack);
2360 else
2361 return -EINVAL;
2362}
2363
2364int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2365{
2366 struct mlx5_core_dev *dev = devlink_priv(devlink);
2367 int err;
2368
2369 err = mlx5_devlink_eswitch_check(devlink);
2370 if (err)
2371 return err;
2372
2373 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2374}
2375
2376int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2377 struct netlink_ext_ack *extack)
2378{
2379 struct mlx5_core_dev *dev = devlink_priv(devlink);
2380 struct mlx5_eswitch *esw = dev->priv.eswitch;
2381 int err, vport, num_vport;
2382 u8 mlx5_mode;
2383
2384 err = mlx5_devlink_eswitch_check(devlink);
2385 if (err)
2386 return err;
2387
2388 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2389 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2390 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2391 return 0;
2392
2393 case MLX5_CAP_INLINE_MODE_L2:
2394 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2395 return -EOPNOTSUPP;
2396 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2397 break;
2398 }
2399
2400 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2401 NL_SET_ERR_MSG_MOD(extack,
2402 "Can't set inline mode when flows are configured");
2403 return -EOPNOTSUPP;
2404 }
2405
2406 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2407 if (err)
2408 goto out;
2409
2410 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2411 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2412 if (err) {
2413 NL_SET_ERR_MSG_MOD(extack,
2414 "Failed to set min inline on vport");
2415 goto revert_inline_mode;
2416 }
2417 }
2418
2419 esw->offloads.inline_mode = mlx5_mode;
2420 return 0;
2421
2422revert_inline_mode:
2423 num_vport = --vport;
2424 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2425 mlx5_modify_nic_vport_min_inline(dev,
2426 vport,
2427 esw->offloads.inline_mode);
2428out:
2429 return err;
2430}
2431
2432int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2433{
2434 struct mlx5_core_dev *dev = devlink_priv(devlink);
2435 struct mlx5_eswitch *esw = dev->priv.eswitch;
2436 int err;
2437
2438 err = mlx5_devlink_eswitch_check(devlink);
2439 if (err)
2440 return err;
2441
2442 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2443}
2444
2445int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2446{
2447 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2448 struct mlx5_core_dev *dev = esw->dev;
2449 int vport;
2450
2451 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2452 return -EOPNOTSUPP;
2453
2454 if (esw->mode == MLX5_ESWITCH_NONE)
2455 return -EOPNOTSUPP;
2456
2457 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2458 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2459 mlx5_mode = MLX5_INLINE_MODE_NONE;
2460 goto out;
2461 case MLX5_CAP_INLINE_MODE_L2:
2462 mlx5_mode = MLX5_INLINE_MODE_L2;
2463 goto out;
2464 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2465 goto query_vports;
2466 }
2467
2468query_vports:
2469 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2470 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2471 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2472 if (prev_mlx5_mode != mlx5_mode)
2473 return -EINVAL;
2474 prev_mlx5_mode = mlx5_mode;
2475 }
2476
2477out:
2478 *mode = mlx5_mode;
2479 return 0;
2480}
2481
2482int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2483 enum devlink_eswitch_encap_mode encap,
2484 struct netlink_ext_ack *extack)
2485{
2486 struct mlx5_core_dev *dev = devlink_priv(devlink);
2487 struct mlx5_eswitch *esw = dev->priv.eswitch;
2488 int err;
2489
2490 err = mlx5_devlink_eswitch_check(devlink);
2491 if (err)
2492 return err;
2493
2494 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2495 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2496 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2497 return -EOPNOTSUPP;
2498
2499 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2500 return -EOPNOTSUPP;
2501
2502 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2503 esw->offloads.encap = encap;
2504 return 0;
2505 }
2506
2507 if (esw->offloads.encap == encap)
2508 return 0;
2509
2510 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2511 NL_SET_ERR_MSG_MOD(extack,
2512 "Can't set encapsulation when flows are configured");
2513 return -EOPNOTSUPP;
2514 }
2515
2516 esw_destroy_offloads_fdb_tables(esw);
2517
2518 esw->offloads.encap = encap;
2519
2520 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2521
2522 if (err) {
2523 NL_SET_ERR_MSG_MOD(extack,
2524 "Failed re-creating fast FDB table");
2525 esw->offloads.encap = !encap;
2526 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2527 }
2528
2529 return err;
2530}
2531
2532int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2533 enum devlink_eswitch_encap_mode *encap)
2534{
2535 struct mlx5_core_dev *dev = devlink_priv(devlink);
2536 struct mlx5_eswitch *esw = dev->priv.eswitch;
2537 int err;
2538
2539 err = mlx5_devlink_eswitch_check(devlink);
2540 if (err)
2541 return err;
2542
2543 *encap = esw->offloads.encap;
2544 return 0;
2545}
2546
2547void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2548 const struct mlx5_eswitch_rep_ops *ops,
2549 u8 rep_type)
2550{
2551 struct mlx5_eswitch_rep_data *rep_data;
2552 struct mlx5_eswitch_rep *rep;
2553 int i;
2554
2555 esw->offloads.rep_ops[rep_type] = ops;
2556 mlx5_esw_for_all_reps(esw, i, rep) {
2557 rep_data = &rep->rep_data[rep_type];
2558 atomic_set(&rep_data->state, REP_REGISTERED);
2559 }
2560}
2561EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2562
2563void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2564{
2565 struct mlx5_eswitch_rep *rep;
2566 int i;
2567
2568 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2569 __unload_reps_all_vport(esw, rep_type);
2570
2571 mlx5_esw_for_all_reps(esw, i, rep)
2572 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2573}
2574EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2575
2576void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2577{
2578 struct mlx5_eswitch_rep *rep;
2579
2580 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2581 return rep->rep_data[rep_type].priv;
2582}
2583
2584void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2585 u16 vport,
2586 u8 rep_type)
2587{
2588 struct mlx5_eswitch_rep *rep;
2589
2590 rep = mlx5_eswitch_get_rep(esw, vport);
2591
2592 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2593 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2594 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2595 return NULL;
2596}
2597EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2598
2599void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2600{
2601 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2602}
2603EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2604
2605struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2606 u16 vport)
2607{
2608 return mlx5_eswitch_get_rep(esw, vport);
2609}
2610EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2611
2612bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2613{
2614 return vport_num >= MLX5_VPORT_FIRST_VF &&
2615 vport_num <= esw->dev->priv.sriov.max_vfs;
2616}
2617
2618bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2619{
2620 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2621}
2622EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2623
2624u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2625 u16 vport_num)
2626{
2627 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2628}
2629EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
2630