1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "esw/acl/lgcy.h"
39#include "mlx5_core.h"
40#include "lib/eq.h"
41#include "eswitch.h"
42#include "fs_core.h"
43#include "devlink.h"
44#include "ecpf.h"
45#include "en/mod_hdr.h"
46
47enum {
48 MLX5_ACTION_NONE = 0,
49 MLX5_ACTION_ADD = 1,
50 MLX5_ACTION_DEL = 2,
51};
52
53
54struct vport_addr {
55 struct l2addr_node node;
56 u8 action;
57 u16 vport;
58 struct mlx5_flow_handle *flow_rule;
59 bool mpfs;
60
61 bool mc_promisc;
62};
63
64static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
65static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
66
67static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
68{
69 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
70 return -EOPNOTSUPP;
71
72 if (!MLX5_ESWITCH_MANAGER(dev))
73 return -EOPNOTSUPP;
74
75 return 0;
76}
77
78struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
79{
80 struct mlx5_core_dev *dev = devlink_priv(devlink);
81 int err;
82
83 err = mlx5_eswitch_check(dev);
84 if (err)
85 return ERR_PTR(err);
86
87 return dev->priv.eswitch;
88}
89
90struct mlx5_vport *__must_check
91mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
92{
93 u16 idx;
94
95 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
96 return ERR_PTR(-EPERM);
97
98 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
99
100 if (idx > esw->total_vports - 1) {
101 esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
102 vport_num, idx);
103 return ERR_PTR(-EINVAL);
104 }
105
106 return &esw->vports[idx];
107}
108
109static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
110 u32 events_mask)
111{
112 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
113 void *nic_vport_ctx;
114
115 MLX5_SET(modify_nic_vport_context_in, in,
116 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
117 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
118 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
119 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
120 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
121 in, nic_vport_context);
122
123 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
124
125 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
126 MLX5_SET(nic_vport_context, nic_vport_ctx,
127 event_on_uc_address_change, 1);
128 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
129 MLX5_SET(nic_vport_context, nic_vport_ctx,
130 event_on_mc_address_change, 1);
131 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
132 MLX5_SET(nic_vport_context, nic_vport_ctx,
133 event_on_promisc_change, 1);
134
135 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
136}
137
138
139int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
140 bool other_vport, void *in)
141{
142 MLX5_SET(modify_esw_vport_context_in, in, opcode,
143 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
144 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
145 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
146 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
147}
148
149static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
150 u16 vlan, u8 qos, u8 set_flags)
151{
152 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
153
154 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
155 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
156 return -EOPNOTSUPP;
157
158 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
159 vport, vlan, qos, set_flags);
160
161 if (set_flags & SET_VLAN_STRIP)
162 MLX5_SET(modify_esw_vport_context_in, in,
163 esw_vport_context.vport_cvlan_strip, 1);
164
165 if (set_flags & SET_VLAN_INSERT) {
166
167 MLX5_SET(modify_esw_vport_context_in, in,
168 esw_vport_context.vport_cvlan_insert, 1);
169
170 MLX5_SET(modify_esw_vport_context_in, in,
171 esw_vport_context.cvlan_pcp, qos);
172 MLX5_SET(modify_esw_vport_context_in, in,
173 esw_vport_context.cvlan_id, vlan);
174 }
175
176 MLX5_SET(modify_esw_vport_context_in, in,
177 field_select.vport_cvlan_strip, 1);
178 MLX5_SET(modify_esw_vport_context_in, in,
179 field_select.vport_cvlan_insert, 1);
180
181 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
182}
183
184
185static struct mlx5_flow_handle *
186__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
187 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
188{
189 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
190 MLX5_MATCH_OUTER_HEADERS);
191 struct mlx5_flow_handle *flow_rule = NULL;
192 struct mlx5_flow_act flow_act = {0};
193 struct mlx5_flow_destination dest = {};
194 struct mlx5_flow_spec *spec;
195 void *mv_misc = NULL;
196 void *mc_misc = NULL;
197 u8 *dmac_v = NULL;
198 u8 *dmac_c = NULL;
199
200 if (rx_rule)
201 match_header |= MLX5_MATCH_MISC_PARAMETERS;
202
203 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
204 if (!spec)
205 return NULL;
206
207 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
208 outer_headers.dmac_47_16);
209 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
210 outer_headers.dmac_47_16);
211
212 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
213 ether_addr_copy(dmac_v, mac_v);
214 ether_addr_copy(dmac_c, mac_c);
215 }
216
217 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
218 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
219 misc_parameters);
220 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
221 misc_parameters);
222 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
223 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
224 }
225
226 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
227 dest.vport.num = vport;
228
229 esw_debug(esw->dev,
230 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
231 dmac_v, dmac_c, vport);
232 spec->match_criteria_enable = match_header;
233 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
234 flow_rule =
235 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
236 &flow_act, &dest, 1);
237 if (IS_ERR(flow_rule)) {
238 esw_warn(esw->dev,
239 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
240 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
241 flow_rule = NULL;
242 }
243
244 kvfree(spec);
245 return flow_rule;
246}
247
248static struct mlx5_flow_handle *
249esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
250{
251 u8 mac_c[ETH_ALEN];
252
253 eth_broadcast_addr(mac_c);
254 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
255}
256
257static struct mlx5_flow_handle *
258esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
259{
260 u8 mac_c[ETH_ALEN];
261 u8 mac_v[ETH_ALEN];
262
263 eth_zero_addr(mac_c);
264 eth_zero_addr(mac_v);
265 mac_c[0] = 0x01;
266 mac_v[0] = 0x01;
267 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
268}
269
270static struct mlx5_flow_handle *
271esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
272{
273 u8 mac_c[ETH_ALEN];
274 u8 mac_v[ETH_ALEN];
275
276 eth_zero_addr(mac_c);
277 eth_zero_addr(mac_v);
278 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
279}
280
281enum {
282 LEGACY_VEPA_PRIO = 0,
283 LEGACY_FDB_PRIO,
284};
285
286static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
287{
288 struct mlx5_flow_table_attr ft_attr = {};
289 struct mlx5_core_dev *dev = esw->dev;
290 struct mlx5_flow_namespace *root_ns;
291 struct mlx5_flow_table *fdb;
292 int err;
293
294 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
295 if (!root_ns) {
296 esw_warn(dev, "Failed to get FDB flow namespace\n");
297 return -EOPNOTSUPP;
298 }
299
300
301 ft_attr.prio = LEGACY_VEPA_PRIO;
302 ft_attr.max_fte = 2;
303 ft_attr.autogroup.max_num_groups = 2;
304 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
305 if (IS_ERR(fdb)) {
306 err = PTR_ERR(fdb);
307 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
308 return err;
309 }
310 esw->fdb_table.legacy.vepa_fdb = fdb;
311
312 return 0;
313}
314
315static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
316{
317 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
318 struct mlx5_flow_table_attr ft_attr = {};
319 struct mlx5_core_dev *dev = esw->dev;
320 struct mlx5_flow_namespace *root_ns;
321 struct mlx5_flow_table *fdb;
322 struct mlx5_flow_group *g;
323 void *match_criteria;
324 int table_size;
325 u32 *flow_group_in;
326 u8 *dmac;
327 int err = 0;
328
329 esw_debug(dev, "Create FDB log_max_size(%d)\n",
330 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
331
332 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
333 if (!root_ns) {
334 esw_warn(dev, "Failed to get FDB flow namespace\n");
335 return -EOPNOTSUPP;
336 }
337
338 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
339 if (!flow_group_in)
340 return -ENOMEM;
341
342 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
343 ft_attr.max_fte = table_size;
344 ft_attr.prio = LEGACY_FDB_PRIO;
345 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
346 if (IS_ERR(fdb)) {
347 err = PTR_ERR(fdb);
348 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
349 goto out;
350 }
351 esw->fdb_table.legacy.fdb = fdb;
352
353
354 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
355 MLX5_MATCH_OUTER_HEADERS);
356 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
357 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
358 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
359
360 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
361 eth_broadcast_addr(dmac);
362 g = mlx5_create_flow_group(fdb, flow_group_in);
363 if (IS_ERR(g)) {
364 err = PTR_ERR(g);
365 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
366 goto out;
367 }
368 esw->fdb_table.legacy.addr_grp = g;
369
370
371 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
372 MLX5_MATCH_OUTER_HEADERS);
373 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
374 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
375 eth_zero_addr(dmac);
376 dmac[0] = 0x01;
377 g = mlx5_create_flow_group(fdb, flow_group_in);
378 if (IS_ERR(g)) {
379 err = PTR_ERR(g);
380 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
381 goto out;
382 }
383 esw->fdb_table.legacy.allmulti_grp = g;
384
385
386
387
388 eth_zero_addr(dmac);
389 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
390 MLX5_MATCH_MISC_PARAMETERS);
391 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
392 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
393 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
394 g = mlx5_create_flow_group(fdb, flow_group_in);
395 if (IS_ERR(g)) {
396 err = PTR_ERR(g);
397 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
398 goto out;
399 }
400 esw->fdb_table.legacy.promisc_grp = g;
401
402out:
403 if (err)
404 esw_destroy_legacy_fdb_table(esw);
405
406 kvfree(flow_group_in);
407 return err;
408}
409
410static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
411{
412 esw_debug(esw->dev, "Destroy VEPA Table\n");
413 if (!esw->fdb_table.legacy.vepa_fdb)
414 return;
415
416 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
417 esw->fdb_table.legacy.vepa_fdb = NULL;
418}
419
420static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
421{
422 esw_debug(esw->dev, "Destroy FDB Table\n");
423 if (!esw->fdb_table.legacy.fdb)
424 return;
425
426 if (esw->fdb_table.legacy.promisc_grp)
427 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
428 if (esw->fdb_table.legacy.allmulti_grp)
429 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
430 if (esw->fdb_table.legacy.addr_grp)
431 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
432 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
433
434 esw->fdb_table.legacy.fdb = NULL;
435 esw->fdb_table.legacy.addr_grp = NULL;
436 esw->fdb_table.legacy.allmulti_grp = NULL;
437 esw->fdb_table.legacy.promisc_grp = NULL;
438}
439
440static int esw_create_legacy_table(struct mlx5_eswitch *esw)
441{
442 int err;
443
444 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
445
446 err = esw_create_legacy_vepa_table(esw);
447 if (err)
448 return err;
449
450 err = esw_create_legacy_fdb_table(esw);
451 if (err)
452 esw_destroy_legacy_vepa_table(esw);
453
454 return err;
455}
456
457static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
458{
459 esw_cleanup_vepa_rules(esw);
460 esw_destroy_legacy_fdb_table(esw);
461 esw_destroy_legacy_vepa_table(esw);
462}
463
464#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
465 MLX5_VPORT_MC_ADDR_CHANGE | \
466 MLX5_VPORT_PROMISC_CHANGE)
467
468static int esw_legacy_enable(struct mlx5_eswitch *esw)
469{
470 struct mlx5_vport *vport;
471 int ret, i;
472
473 ret = esw_create_legacy_table(esw);
474 if (ret)
475 return ret;
476
477 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
478 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
479
480 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
481 if (ret)
482 esw_destroy_legacy_table(esw);
483 return ret;
484}
485
486static void esw_legacy_disable(struct mlx5_eswitch *esw)
487{
488 struct esw_mc_addr *mc_promisc;
489
490 mlx5_eswitch_disable_pf_vf_vports(esw);
491
492 mc_promisc = &esw->mc_promisc;
493 if (mc_promisc->uplink_rule)
494 mlx5_del_flow_rules(mc_promisc->uplink_rule);
495
496 esw_destroy_legacy_table(esw);
497}
498
499
500typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
501 struct vport_addr *vaddr);
502
503static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
504{
505 u8 *mac = vaddr->node.addr;
506 u16 vport = vaddr->vport;
507 int err;
508
509
510
511
512 if (mlx5_esw_is_manager_vport(esw, vport))
513 goto fdb_add;
514
515 err = mlx5_mpfs_add_mac(esw->dev, mac);
516 if (err) {
517 esw_warn(esw->dev,
518 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
519 mac, vport, err);
520 return err;
521 }
522 vaddr->mpfs = true;
523
524fdb_add:
525
526 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
527 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
528
529 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
530 vport, mac, vaddr->flow_rule);
531
532 return 0;
533}
534
535static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
536{
537 u8 *mac = vaddr->node.addr;
538 u16 vport = vaddr->vport;
539 int err = 0;
540
541
542
543
544 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
545 goto fdb_del;
546
547 err = mlx5_mpfs_del_mac(esw->dev, mac);
548 if (err)
549 esw_warn(esw->dev,
550 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
551 mac, vport, err);
552 vaddr->mpfs = false;
553
554fdb_del:
555 if (vaddr->flow_rule)
556 mlx5_del_flow_rules(vaddr->flow_rule);
557 vaddr->flow_rule = NULL;
558
559 return 0;
560}
561
562static void update_allmulti_vports(struct mlx5_eswitch *esw,
563 struct vport_addr *vaddr,
564 struct esw_mc_addr *esw_mc)
565{
566 u8 *mac = vaddr->node.addr;
567 struct mlx5_vport *vport;
568 u16 i, vport_num;
569
570 mlx5_esw_for_all_vports(esw, i, vport) {
571 struct hlist_head *vport_hash = vport->mc_list;
572 struct vport_addr *iter_vaddr =
573 l2addr_hash_find(vport_hash,
574 mac,
575 struct vport_addr);
576 vport_num = vport->vport;
577 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
578 vaddr->vport == vport_num)
579 continue;
580 switch (vaddr->action) {
581 case MLX5_ACTION_ADD:
582 if (iter_vaddr)
583 continue;
584 iter_vaddr = l2addr_hash_add(vport_hash, mac,
585 struct vport_addr,
586 GFP_KERNEL);
587 if (!iter_vaddr) {
588 esw_warn(esw->dev,
589 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
590 mac, vport_num);
591 continue;
592 }
593 iter_vaddr->vport = vport_num;
594 iter_vaddr->flow_rule =
595 esw_fdb_set_vport_rule(esw,
596 mac,
597 vport_num);
598 iter_vaddr->mc_promisc = true;
599 break;
600 case MLX5_ACTION_DEL:
601 if (!iter_vaddr)
602 continue;
603 mlx5_del_flow_rules(iter_vaddr->flow_rule);
604 l2addr_hash_del(iter_vaddr);
605 break;
606 }
607 }
608}
609
610static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
611{
612 struct hlist_head *hash = esw->mc_table;
613 struct esw_mc_addr *esw_mc;
614 u8 *mac = vaddr->node.addr;
615 u16 vport = vaddr->vport;
616
617 if (!esw->fdb_table.legacy.fdb)
618 return 0;
619
620 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
621 if (esw_mc)
622 goto add;
623
624 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
625 if (!esw_mc)
626 return -ENOMEM;
627
628 esw_mc->uplink_rule =
629 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
630
631
632 update_allmulti_vports(esw, vaddr, esw_mc);
633
634add:
635
636
637
638 if (!vaddr->mc_promisc)
639 esw_mc->refcnt++;
640
641
642 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
643 esw_debug(esw->dev,
644 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
645 vport, mac, vaddr->flow_rule,
646 esw_mc->refcnt, esw_mc->uplink_rule);
647 return 0;
648}
649
650static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
651{
652 struct hlist_head *hash = esw->mc_table;
653 struct esw_mc_addr *esw_mc;
654 u8 *mac = vaddr->node.addr;
655 u16 vport = vaddr->vport;
656
657 if (!esw->fdb_table.legacy.fdb)
658 return 0;
659
660 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
661 if (!esw_mc) {
662 esw_warn(esw->dev,
663 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
664 mac, vport);
665 return -EINVAL;
666 }
667 esw_debug(esw->dev,
668 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
669 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
670 esw_mc->uplink_rule);
671
672 if (vaddr->flow_rule)
673 mlx5_del_flow_rules(vaddr->flow_rule);
674 vaddr->flow_rule = NULL;
675
676
677
678
679 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
680 return 0;
681
682
683 update_allmulti_vports(esw, vaddr, esw_mc);
684
685 if (esw_mc->uplink_rule)
686 mlx5_del_flow_rules(esw_mc->uplink_rule);
687
688 l2addr_hash_del(esw_mc);
689 return 0;
690}
691
692
693static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
694 struct mlx5_vport *vport, int list_type)
695{
696 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
697 vport_addr_action vport_addr_add;
698 vport_addr_action vport_addr_del;
699 struct vport_addr *addr;
700 struct l2addr_node *node;
701 struct hlist_head *hash;
702 struct hlist_node *tmp;
703 int hi;
704
705 vport_addr_add = is_uc ? esw_add_uc_addr :
706 esw_add_mc_addr;
707 vport_addr_del = is_uc ? esw_del_uc_addr :
708 esw_del_mc_addr;
709
710 hash = is_uc ? vport->uc_list : vport->mc_list;
711 for_each_l2hash_node(node, tmp, hash, hi) {
712 addr = container_of(node, struct vport_addr, node);
713 switch (addr->action) {
714 case MLX5_ACTION_ADD:
715 vport_addr_add(esw, addr);
716 addr->action = MLX5_ACTION_NONE;
717 break;
718 case MLX5_ACTION_DEL:
719 vport_addr_del(esw, addr);
720 l2addr_hash_del(addr);
721 break;
722 }
723 }
724}
725
726
727static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
728 struct mlx5_vport *vport, int list_type)
729{
730 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
731 u8 (*mac_list)[ETH_ALEN];
732 struct l2addr_node *node;
733 struct vport_addr *addr;
734 struct hlist_head *hash;
735 struct hlist_node *tmp;
736 int size;
737 int err;
738 int hi;
739 int i;
740
741 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
742 MLX5_MAX_MC_PER_VPORT(esw->dev);
743
744 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
745 if (!mac_list)
746 return;
747
748 hash = is_uc ? vport->uc_list : vport->mc_list;
749
750 for_each_l2hash_node(node, tmp, hash, hi) {
751 addr = container_of(node, struct vport_addr, node);
752 addr->action = MLX5_ACTION_DEL;
753 }
754
755 if (!vport->enabled)
756 goto out;
757
758 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
759 mac_list, &size);
760 if (err)
761 goto out;
762 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
763 vport->vport, is_uc ? "UC" : "MC", size);
764
765 for (i = 0; i < size; i++) {
766 if (is_uc && !is_valid_ether_addr(mac_list[i]))
767 continue;
768
769 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
770 continue;
771
772 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
773 if (addr) {
774 addr->action = MLX5_ACTION_NONE;
775
776
777
778
779 if (addr->mc_promisc) {
780 struct esw_mc_addr *esw_mc =
781 l2addr_hash_find(esw->mc_table,
782 mac_list[i],
783 struct esw_mc_addr);
784 if (!esw_mc) {
785 esw_warn(esw->dev,
786 "Failed to MAC(%pM) in mcast DB\n",
787 mac_list[i]);
788 continue;
789 }
790 esw_mc->refcnt++;
791 addr->mc_promisc = false;
792 }
793 continue;
794 }
795
796 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
797 GFP_KERNEL);
798 if (!addr) {
799 esw_warn(esw->dev,
800 "Failed to add MAC(%pM) to vport[%d] DB\n",
801 mac_list[i], vport->vport);
802 continue;
803 }
804 addr->vport = vport->vport;
805 addr->action = MLX5_ACTION_ADD;
806 }
807out:
808 kfree(mac_list);
809}
810
811
812
813
814static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
815 struct mlx5_vport *vport)
816{
817 struct l2addr_node *node;
818 struct vport_addr *addr;
819 struct hlist_head *hash;
820 struct hlist_node *tmp;
821 int hi;
822
823 hash = vport->mc_list;
824
825 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
826 u8 *mac = node->addr;
827
828 addr = l2addr_hash_find(hash, mac, struct vport_addr);
829 if (addr) {
830 if (addr->action == MLX5_ACTION_DEL)
831 addr->action = MLX5_ACTION_NONE;
832 continue;
833 }
834 addr = l2addr_hash_add(hash, mac, struct vport_addr,
835 GFP_KERNEL);
836 if (!addr) {
837 esw_warn(esw->dev,
838 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
839 mac, vport->vport);
840 continue;
841 }
842 addr->vport = vport->vport;
843 addr->action = MLX5_ACTION_ADD;
844 addr->mc_promisc = true;
845 }
846}
847
848
849static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
850 struct mlx5_vport *vport,
851 bool promisc, bool mc_promisc)
852{
853 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
854
855 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
856 goto promisc;
857
858 if (mc_promisc) {
859 vport->allmulti_rule =
860 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
861 if (!allmulti_addr->uplink_rule)
862 allmulti_addr->uplink_rule =
863 esw_fdb_set_vport_allmulti_rule(esw,
864 MLX5_VPORT_UPLINK);
865 allmulti_addr->refcnt++;
866 } else if (vport->allmulti_rule) {
867 mlx5_del_flow_rules(vport->allmulti_rule);
868 vport->allmulti_rule = NULL;
869
870 if (--allmulti_addr->refcnt > 0)
871 goto promisc;
872
873 if (allmulti_addr->uplink_rule)
874 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
875 allmulti_addr->uplink_rule = NULL;
876 }
877
878promisc:
879 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
880 return;
881
882 if (promisc) {
883 vport->promisc_rule =
884 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
885 } else if (vport->promisc_rule) {
886 mlx5_del_flow_rules(vport->promisc_rule);
887 vport->promisc_rule = NULL;
888 }
889}
890
891
892static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
893 struct mlx5_vport *vport)
894{
895 int promisc_all = 0;
896 int promisc_uc = 0;
897 int promisc_mc = 0;
898 int err;
899
900 err = mlx5_query_nic_vport_promisc(esw->dev,
901 vport->vport,
902 &promisc_uc,
903 &promisc_mc,
904 &promisc_all);
905 if (err)
906 return;
907 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
908 vport->vport, promisc_all, promisc_mc);
909
910 if (!vport->info.trusted || !vport->enabled) {
911 promisc_uc = 0;
912 promisc_mc = 0;
913 promisc_all = 0;
914 }
915
916 esw_apply_vport_rx_mode(esw, vport, promisc_all,
917 (promisc_all || promisc_mc));
918}
919
920static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
921{
922 struct mlx5_core_dev *dev = vport->dev;
923 struct mlx5_eswitch *esw = dev->priv.eswitch;
924 u8 mac[ETH_ALEN];
925
926 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
927 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
928 vport->vport, mac);
929
930 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
931 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
932 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
933 }
934
935 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
936 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
937
938 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
939 esw_update_vport_rx_mode(esw, vport);
940 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
941 esw_update_vport_mc_promisc(esw, vport);
942 }
943
944 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
945 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
946
947 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
948 if (vport->enabled)
949 arm_vport_context_events_cmd(dev, vport->vport,
950 vport->enabled_events);
951}
952
953static void esw_vport_change_handler(struct work_struct *work)
954{
955 struct mlx5_vport *vport =
956 container_of(work, struct mlx5_vport, vport_change_handler);
957 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
958
959 mutex_lock(&esw->state_lock);
960 esw_vport_change_handle_locked(vport);
961 mutex_unlock(&esw->state_lock);
962}
963
964static bool element_type_supported(struct mlx5_eswitch *esw, int type)
965{
966 const struct mlx5_core_dev *dev = esw->dev;
967
968 switch (type) {
969 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
970 return MLX5_CAP_QOS(dev, esw_element_type) &
971 ELEMENT_TYPE_CAP_MASK_TASR;
972 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
973 return MLX5_CAP_QOS(dev, esw_element_type) &
974 ELEMENT_TYPE_CAP_MASK_VPORT;
975 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
976 return MLX5_CAP_QOS(dev, esw_element_type) &
977 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
978 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
979 return MLX5_CAP_QOS(dev, esw_element_type) &
980 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
981 }
982 return false;
983}
984
985
986static void esw_create_tsar(struct mlx5_eswitch *esw)
987{
988 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
989 struct mlx5_core_dev *dev = esw->dev;
990 __be32 *attr;
991 int err;
992
993 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
994 return;
995
996 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
997 return;
998
999 if (esw->qos.enabled)
1000 return;
1001
1002 MLX5_SET(scheduling_context, tsar_ctx, element_type,
1003 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1004
1005 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1006 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
1007
1008 err = mlx5_create_scheduling_element_cmd(dev,
1009 SCHEDULING_HIERARCHY_E_SWITCH,
1010 tsar_ctx,
1011 &esw->qos.root_tsar_id);
1012 if (err) {
1013 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1014 return;
1015 }
1016
1017 esw->qos.enabled = true;
1018}
1019
1020static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1021{
1022 int err;
1023
1024 if (!esw->qos.enabled)
1025 return;
1026
1027 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1028 SCHEDULING_HIERARCHY_E_SWITCH,
1029 esw->qos.root_tsar_id);
1030 if (err)
1031 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1032
1033 esw->qos.enabled = false;
1034}
1035
1036static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1037 struct mlx5_vport *vport,
1038 u32 initial_max_rate, u32 initial_bw_share)
1039{
1040 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1041 struct mlx5_core_dev *dev = esw->dev;
1042 void *vport_elem;
1043 int err = 0;
1044
1045 if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1046 !MLX5_CAP_QOS(dev, esw_scheduling))
1047 return 0;
1048
1049 if (vport->qos.enabled)
1050 return -EEXIST;
1051
1052 MLX5_SET(scheduling_context, sched_ctx, element_type,
1053 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1054 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1055 element_attributes);
1056 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1057 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1058 esw->qos.root_tsar_id);
1059 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1060 initial_max_rate);
1061 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1062
1063 err = mlx5_create_scheduling_element_cmd(dev,
1064 SCHEDULING_HIERARCHY_E_SWITCH,
1065 sched_ctx,
1066 &vport->qos.esw_tsar_ix);
1067 if (err) {
1068 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1069 vport->vport, err);
1070 return err;
1071 }
1072
1073 vport->qos.enabled = true;
1074 return 0;
1075}
1076
1077static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1078 struct mlx5_vport *vport)
1079{
1080 int err;
1081
1082 if (!vport->qos.enabled)
1083 return;
1084
1085 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1086 SCHEDULING_HIERARCHY_E_SWITCH,
1087 vport->qos.esw_tsar_ix);
1088 if (err)
1089 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1090 vport->vport, err);
1091
1092 vport->qos.enabled = false;
1093}
1094
1095static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1096 struct mlx5_vport *vport,
1097 u32 max_rate, u32 bw_share)
1098{
1099 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1100 struct mlx5_core_dev *dev = esw->dev;
1101 void *vport_elem;
1102 u32 bitmask = 0;
1103 int err = 0;
1104
1105 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1106 return -EOPNOTSUPP;
1107
1108 if (!vport->qos.enabled)
1109 return -EIO;
1110
1111 MLX5_SET(scheduling_context, sched_ctx, element_type,
1112 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1113 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1114 element_attributes);
1115 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1116 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1117 esw->qos.root_tsar_id);
1118 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1119 max_rate);
1120 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1121 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1122 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1123
1124 err = mlx5_modify_scheduling_element_cmd(dev,
1125 SCHEDULING_HIERARCHY_E_SWITCH,
1126 sched_ctx,
1127 vport->qos.esw_tsar_ix,
1128 bitmask);
1129 if (err) {
1130 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1131 vport->vport, err);
1132 return err;
1133 }
1134
1135 return 0;
1136}
1137
1138int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1139 u32 rate_mbps)
1140{
1141 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1142 struct mlx5_vport *vport;
1143
1144 vport = mlx5_eswitch_get_vport(esw, vport_num);
1145 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1146
1147 return mlx5_modify_scheduling_element_cmd(esw->dev,
1148 SCHEDULING_HIERARCHY_E_SWITCH,
1149 ctx,
1150 vport->qos.esw_tsar_ix,
1151 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1152}
1153
1154static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
1155{
1156 ((u8 *)node_guid)[7] = mac[0];
1157 ((u8 *)node_guid)[6] = mac[1];
1158 ((u8 *)node_guid)[5] = mac[2];
1159 ((u8 *)node_guid)[4] = 0xff;
1160 ((u8 *)node_guid)[3] = 0xfe;
1161 ((u8 *)node_guid)[2] = mac[3];
1162 ((u8 *)node_guid)[1] = mac[4];
1163 ((u8 *)node_guid)[0] = mac[5];
1164}
1165
1166static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1167 struct mlx5_vport *vport)
1168{
1169 int ret;
1170
1171
1172 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1173 return 0;
1174
1175 ret = esw_acl_ingress_lgcy_setup(esw, vport);
1176 if (ret)
1177 goto ingress_err;
1178
1179 ret = esw_acl_egress_lgcy_setup(esw, vport);
1180 if (ret)
1181 goto egress_err;
1182
1183 return 0;
1184
1185egress_err:
1186 esw_acl_ingress_lgcy_cleanup(esw, vport);
1187ingress_err:
1188 return ret;
1189}
1190
1191static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
1192 struct mlx5_vport *vport)
1193{
1194 if (esw->mode == MLX5_ESWITCH_LEGACY)
1195 return esw_vport_create_legacy_acl_tables(esw, vport);
1196 else
1197 return esw_vport_create_offloads_acl_tables(esw, vport);
1198}
1199
1200static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1201 struct mlx5_vport *vport)
1202
1203{
1204 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1205 return;
1206
1207 esw_acl_egress_lgcy_cleanup(esw, vport);
1208 esw_acl_ingress_lgcy_cleanup(esw, vport);
1209}
1210
1211static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
1212 struct mlx5_vport *vport)
1213{
1214 if (esw->mode == MLX5_ESWITCH_LEGACY)
1215 esw_vport_destroy_legacy_acl_tables(esw, vport);
1216 else
1217 esw_vport_destroy_offloads_acl_tables(esw, vport);
1218}
1219
1220static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1221{
1222 u16 vport_num = vport->vport;
1223 int flags;
1224 int err;
1225
1226 err = esw_vport_setup_acl(esw, vport);
1227 if (err)
1228 return err;
1229
1230
1231 esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
1232
1233 if (mlx5_esw_is_manager_vport(esw, vport_num))
1234 return 0;
1235
1236 mlx5_modify_vport_admin_state(esw->dev,
1237 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1238 vport_num, 1,
1239 vport->info.link_state);
1240
1241
1242 if (vport_num) {
1243 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1244 vport->info.mac);
1245 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1246 vport->info.node_guid);
1247 }
1248
1249 flags = (vport->info.vlan || vport->info.qos) ?
1250 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1251 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
1252 vport->info.qos, flags);
1253
1254 return 0;
1255}
1256
1257
1258static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1259{
1260 u16 vport_num = vport->vport;
1261
1262 if (!mlx5_esw_is_manager_vport(esw, vport_num))
1263 mlx5_modify_vport_admin_state(esw->dev,
1264 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1265 vport_num, 1,
1266 MLX5_VPORT_ADMIN_STATE_DOWN);
1267
1268 esw_vport_disable_qos(esw, vport);
1269 esw_vport_cleanup_acl(esw, vport);
1270}
1271
1272static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
1273 enum mlx5_eswitch_vport_event enabled_events)
1274{
1275 struct mlx5_vport *vport;
1276 int ret;
1277
1278 vport = mlx5_eswitch_get_vport(esw, vport_num);
1279
1280 mutex_lock(&esw->state_lock);
1281 WARN_ON(vport->enabled);
1282
1283 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1284
1285 ret = esw_vport_setup(esw, vport);
1286 if (ret)
1287 goto done;
1288
1289
1290 vport->enabled_events = enabled_events;
1291 vport->enabled = true;
1292
1293
1294
1295
1296 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1297 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1298 vport->info.trusted = true;
1299
1300 esw_vport_change_handle_locked(vport);
1301
1302 esw->enabled_vports++;
1303 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1304done:
1305 mutex_unlock(&esw->state_lock);
1306 return ret;
1307}
1308
1309static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
1310{
1311 struct mlx5_vport *vport;
1312
1313 vport = mlx5_eswitch_get_vport(esw, vport_num);
1314
1315 mutex_lock(&esw->state_lock);
1316 if (!vport->enabled)
1317 goto done;
1318
1319 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1320
1321 vport->enabled = false;
1322
1323
1324 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1325
1326
1327
1328
1329 esw_vport_change_handle_locked(vport);
1330 vport->enabled_events = 0;
1331 esw_vport_cleanup(esw, vport);
1332 esw->enabled_vports--;
1333
1334done:
1335 mutex_unlock(&esw->state_lock);
1336}
1337
1338static int eswitch_vport_event(struct notifier_block *nb,
1339 unsigned long type, void *data)
1340{
1341 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1342 struct mlx5_eqe *eqe = data;
1343 struct mlx5_vport *vport;
1344 u16 vport_num;
1345
1346 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1347 vport = mlx5_eswitch_get_vport(esw, vport_num);
1348 if (!IS_ERR(vport))
1349 queue_work(esw->work_queue, &vport->vport_change_handler);
1350 return NOTIFY_OK;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1362{
1363 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1364 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1365 u32 *out;
1366 int err;
1367
1368 out = kvzalloc(outlen, GFP_KERNEL);
1369 if (!out)
1370 return ERR_PTR(-ENOMEM);
1371
1372 MLX5_SET(query_esw_functions_in, in, opcode,
1373 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1374
1375 err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
1376 if (!err)
1377 return out;
1378
1379 kvfree(out);
1380 return ERR_PTR(err);
1381}
1382
1383static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1384{
1385 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1386 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1387
1388 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1389 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1390 ESW_FUNCTIONS_CHANGED);
1391 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1392 }
1393}
1394
1395static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1396{
1397 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1398 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1399
1400 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1401
1402 flush_workqueue(esw->work_queue);
1403}
1404
1405static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1406{
1407 struct mlx5_vport *vport;
1408 int i;
1409
1410 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1411 memset(&vport->info, 0, sizeof(vport->info));
1412 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1413 }
1414}
1415
1416
1417#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1418
1419int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1420 enum mlx5_eswitch_vport_event enabled_events)
1421{
1422 int err;
1423
1424 err = esw_enable_vport(esw, vport_num, enabled_events);
1425 if (err)
1426 return err;
1427
1428 err = esw_offloads_load_rep(esw, vport_num);
1429 if (err)
1430 goto err_rep;
1431
1432 return err;
1433
1434err_rep:
1435 esw_disable_vport(esw, vport_num);
1436 return err;
1437}
1438
1439void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1440{
1441 esw_offloads_unload_rep(esw, vport_num);
1442 esw_disable_vport(esw, vport_num);
1443}
1444
1445void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1446{
1447 int i;
1448
1449 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
1450 mlx5_eswitch_unload_vport(esw, i);
1451}
1452
1453int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1454 enum mlx5_eswitch_vport_event enabled_events)
1455{
1456 int err;
1457 int i;
1458
1459 mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
1460 err = mlx5_eswitch_load_vport(esw, i, enabled_events);
1461 if (err)
1462 goto vf_err;
1463 }
1464
1465 return 0;
1466
1467vf_err:
1468 mlx5_eswitch_unload_vf_vports(esw, i - 1);
1469 return err;
1470}
1471
1472
1473
1474
1475int
1476mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1477 enum mlx5_eswitch_vport_event enabled_events)
1478{
1479 int ret;
1480
1481
1482 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1483 if (ret)
1484 return ret;
1485
1486
1487 if (mlx5_ecpf_vport_exists(esw->dev)) {
1488 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1489 if (ret)
1490 goto ecpf_err;
1491 }
1492
1493
1494 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1495 enabled_events);
1496 if (ret)
1497 goto vf_err;
1498 return 0;
1499
1500vf_err:
1501 if (mlx5_ecpf_vport_exists(esw->dev))
1502 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1503
1504ecpf_err:
1505 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1506 return ret;
1507}
1508
1509
1510
1511
1512void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1513{
1514 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1515
1516 if (mlx5_ecpf_vport_exists(esw->dev))
1517 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1518
1519 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1520}
1521
1522static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1523{
1524 struct devlink *devlink = priv_to_devlink(esw->dev);
1525 union devlink_param_value val;
1526 int err;
1527
1528 err = devlink_param_driverinit_value_get(devlink,
1529 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1530 &val);
1531 if (!err) {
1532 esw->params.large_group_num = val.vu32;
1533 } else {
1534 esw_warn(esw->dev,
1535 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1536 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1537 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1538 }
1539}
1540
1541static void
1542mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1543{
1544 const u32 *out;
1545
1546 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
1547
1548 if (num_vfs < 0)
1549 return;
1550
1551 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1552 esw->esw_funcs.num_vfs = num_vfs;
1553 return;
1554 }
1555
1556 out = mlx5_esw_query_functions(esw->dev);
1557 if (IS_ERR(out))
1558 return;
1559
1560 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1561 host_params_context.host_num_of_vfs);
1562 kvfree(out);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
1583{
1584 int err;
1585
1586 lockdep_assert_held(&esw->mode_lock);
1587
1588 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1589 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1590 return -EOPNOTSUPP;
1591 }
1592
1593 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1594 esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
1595
1596 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1597 esw_warn(esw->dev, "engress ACL is not supported by FW\n");
1598
1599 mlx5_eswitch_get_devlink_param(esw);
1600
1601 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1602
1603 esw_create_tsar(esw);
1604
1605 esw->mode = mode;
1606
1607 mlx5_lag_update(esw->dev);
1608
1609 if (mode == MLX5_ESWITCH_LEGACY) {
1610 err = esw_legacy_enable(esw);
1611 } else {
1612 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1613 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1614 err = esw_offloads_enable(esw);
1615 }
1616
1617 if (err)
1618 goto abort;
1619
1620 mlx5_eswitch_event_handlers_register(esw);
1621
1622 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1623 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1624 esw->esw_funcs.num_vfs, esw->enabled_vports);
1625
1626 return 0;
1627
1628abort:
1629 esw->mode = MLX5_ESWITCH_NONE;
1630
1631 if (mode == MLX5_ESWITCH_OFFLOADS) {
1632 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1633 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1634 }
1635 esw_destroy_tsar(esw);
1636 return err;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1648{
1649 int ret;
1650
1651 if (!ESW_ALLOWED(esw))
1652 return 0;
1653
1654 mutex_lock(&esw->mode_lock);
1655 if (esw->mode == MLX5_ESWITCH_NONE) {
1656 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
1657 } else {
1658 enum mlx5_eswitch_vport_event vport_events;
1659
1660 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1661 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1662 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1663 if (!ret)
1664 esw->esw_funcs.num_vfs = num_vfs;
1665 }
1666 mutex_unlock(&esw->mode_lock);
1667 return ret;
1668}
1669
1670void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1671{
1672 int old_mode;
1673
1674 lockdep_assert_held_write(&esw->mode_lock);
1675
1676 if (esw->mode == MLX5_ESWITCH_NONE)
1677 return;
1678
1679 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1680 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1681 esw->esw_funcs.num_vfs, esw->enabled_vports);
1682
1683 mlx5_eswitch_event_handlers_unregister(esw);
1684
1685 if (esw->mode == MLX5_ESWITCH_LEGACY)
1686 esw_legacy_disable(esw);
1687 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1688 esw_offloads_disable(esw);
1689
1690 old_mode = esw->mode;
1691 esw->mode = MLX5_ESWITCH_NONE;
1692
1693 mlx5_lag_update(esw->dev);
1694
1695 if (old_mode == MLX5_ESWITCH_OFFLOADS) {
1696 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1697 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1698 }
1699 esw_destroy_tsar(esw);
1700
1701 if (clear_vf)
1702 mlx5_eswitch_clear_vf_vports_info(esw);
1703}
1704
1705void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
1706{
1707 if (!ESW_ALLOWED(esw))
1708 return;
1709
1710 mutex_lock(&esw->mode_lock);
1711 mlx5_eswitch_disable_locked(esw, clear_vf);
1712 esw->esw_funcs.num_vfs = 0;
1713 mutex_unlock(&esw->mode_lock);
1714}
1715
1716int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1717{
1718 struct mlx5_eswitch *esw;
1719 struct mlx5_vport *vport;
1720 int total_vports;
1721 int err, i;
1722
1723 if (!MLX5_VPORT_MANAGER(dev))
1724 return 0;
1725
1726 total_vports = mlx5_eswitch_get_total_vports(dev);
1727
1728 esw_info(dev,
1729 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1730 total_vports,
1731 MLX5_MAX_UC_PER_VPORT(dev),
1732 MLX5_MAX_MC_PER_VPORT(dev));
1733
1734 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1735 if (!esw)
1736 return -ENOMEM;
1737
1738 esw->dev = dev;
1739 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1740 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1741
1742 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1743 if (!esw->work_queue) {
1744 err = -ENOMEM;
1745 goto abort;
1746 }
1747
1748 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1749 GFP_KERNEL);
1750 if (!esw->vports) {
1751 err = -ENOMEM;
1752 goto abort;
1753 }
1754
1755 esw->total_vports = total_vports;
1756
1757 err = esw_offloads_init_reps(esw);
1758 if (err)
1759 goto abort;
1760
1761 mutex_init(&esw->offloads.encap_tbl_lock);
1762 hash_init(esw->offloads.encap_tbl);
1763 mutex_init(&esw->offloads.decap_tbl_lock);
1764 hash_init(esw->offloads.decap_tbl);
1765 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1766 atomic64_set(&esw->offloads.num_flows, 0);
1767 ida_init(&esw->offloads.vport_metadata_ida);
1768 mutex_init(&esw->state_lock);
1769 mutex_init(&esw->mode_lock);
1770
1771 mlx5_esw_for_all_vports(esw, i, vport) {
1772 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
1773 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1774 vport->dev = dev;
1775 INIT_WORK(&vport->vport_change_handler,
1776 esw_vport_change_handler);
1777 }
1778
1779 esw->enabled_vports = 0;
1780 esw->mode = MLX5_ESWITCH_NONE;
1781 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1782
1783 dev->priv.eswitch = esw;
1784 return 0;
1785abort:
1786 if (esw->work_queue)
1787 destroy_workqueue(esw->work_queue);
1788 esw_offloads_cleanup_reps(esw);
1789 kfree(esw->vports);
1790 kfree(esw);
1791 return err;
1792}
1793
1794void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1795{
1796 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1797 return;
1798
1799 esw_info(esw->dev, "cleanup\n");
1800
1801 esw->dev->priv.eswitch = NULL;
1802 destroy_workqueue(esw->work_queue);
1803 esw_offloads_cleanup_reps(esw);
1804 mutex_destroy(&esw->mode_lock);
1805 mutex_destroy(&esw->state_lock);
1806 ida_destroy(&esw->offloads.vport_metadata_ida);
1807 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1808 mutex_destroy(&esw->offloads.encap_tbl_lock);
1809 mutex_destroy(&esw->offloads.decap_tbl_lock);
1810 kfree(esw->vports);
1811 kfree(esw);
1812}
1813
1814
1815static int
1816mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1817 struct mlx5_vport *evport, const u8 *mac)
1818{
1819 u16 vport_num = evport->vport;
1820 u64 node_guid;
1821 int err = 0;
1822
1823 if (is_multicast_ether_addr(mac))
1824 return -EINVAL;
1825
1826 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1827 mlx5_core_warn(esw->dev,
1828 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1829 vport_num);
1830
1831 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1832 if (err) {
1833 mlx5_core_warn(esw->dev,
1834 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1835 vport_num, err);
1836 return err;
1837 }
1838
1839 node_guid_gen_from_mac(&node_guid, mac);
1840 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1841 if (err)
1842 mlx5_core_warn(esw->dev,
1843 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1844 vport_num, err);
1845
1846 ether_addr_copy(evport->info.mac, mac);
1847 evport->info.node_guid = node_guid;
1848 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1849 err = esw_acl_ingress_lgcy_setup(esw, evport);
1850
1851 return err;
1852}
1853
1854int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1855 u16 vport, const u8 *mac)
1856{
1857 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1858 int err = 0;
1859
1860 if (IS_ERR(evport))
1861 return PTR_ERR(evport);
1862
1863 mutex_lock(&esw->state_lock);
1864 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1865 mutex_unlock(&esw->state_lock);
1866 return err;
1867}
1868
1869static bool
1870is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
1871{
1872 return vport_num == MLX5_VPORT_PF ||
1873 mlx5_eswitch_is_vf_vport(esw, vport_num);
1874}
1875
1876int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
1877 struct devlink_port *port,
1878 u8 *hw_addr, int *hw_addr_len,
1879 struct netlink_ext_ack *extack)
1880{
1881 struct mlx5_eswitch *esw;
1882 struct mlx5_vport *vport;
1883 int err = -EOPNOTSUPP;
1884 u16 vport_num;
1885
1886 esw = mlx5_devlink_eswitch_get(devlink);
1887 if (IS_ERR(esw))
1888 return PTR_ERR(esw);
1889
1890 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1891 if (!is_port_function_supported(esw, vport_num))
1892 return -EOPNOTSUPP;
1893
1894 vport = mlx5_eswitch_get_vport(esw, vport_num);
1895 if (IS_ERR(vport)) {
1896 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1897 return PTR_ERR(vport);
1898 }
1899
1900 mutex_lock(&esw->state_lock);
1901 if (vport->enabled) {
1902 ether_addr_copy(hw_addr, vport->info.mac);
1903 *hw_addr_len = ETH_ALEN;
1904 err = 0;
1905 } else {
1906 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1907 }
1908 mutex_unlock(&esw->state_lock);
1909 return err;
1910}
1911
1912int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
1913 struct devlink_port *port,
1914 const u8 *hw_addr, int hw_addr_len,
1915 struct netlink_ext_ack *extack)
1916{
1917 struct mlx5_eswitch *esw;
1918 struct mlx5_vport *vport;
1919 int err = -EOPNOTSUPP;
1920 u16 vport_num;
1921
1922 esw = mlx5_devlink_eswitch_get(devlink);
1923 if (IS_ERR(esw)) {
1924 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
1925 return PTR_ERR(esw);
1926 }
1927
1928 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1929 if (!is_port_function_supported(esw, vport_num)) {
1930 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
1931 return -EINVAL;
1932 }
1933 vport = mlx5_eswitch_get_vport(esw, vport_num);
1934 if (IS_ERR(vport)) {
1935 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1936 return PTR_ERR(vport);
1937 }
1938
1939 mutex_lock(&esw->state_lock);
1940 if (vport->enabled)
1941 err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
1942 else
1943 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1944 mutex_unlock(&esw->state_lock);
1945 return err;
1946}
1947
1948int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1949 u16 vport, int link_state)
1950{
1951 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1952 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1953 int other_vport = 1;
1954 int err = 0;
1955
1956 if (!ESW_ALLOWED(esw))
1957 return -EPERM;
1958 if (IS_ERR(evport))
1959 return PTR_ERR(evport);
1960
1961 if (vport == MLX5_VPORT_UPLINK) {
1962 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1963 other_vport = 0;
1964 vport = 0;
1965 }
1966 mutex_lock(&esw->state_lock);
1967
1968 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1969 if (err) {
1970 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1971 vport, opmod, err);
1972 goto unlock;
1973 }
1974
1975 evport->info.link_state = link_state;
1976
1977unlock:
1978 mutex_unlock(&esw->state_lock);
1979 return err;
1980}
1981
1982int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1983 u16 vport, struct ifla_vf_info *ivi)
1984{
1985 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1986
1987 if (IS_ERR(evport))
1988 return PTR_ERR(evport);
1989
1990 memset(ivi, 0, sizeof(*ivi));
1991 ivi->vf = vport - 1;
1992
1993 mutex_lock(&esw->state_lock);
1994 ether_addr_copy(ivi->mac, evport->info.mac);
1995 ivi->linkstate = evport->info.link_state;
1996 ivi->vlan = evport->info.vlan;
1997 ivi->qos = evport->info.qos;
1998 ivi->spoofchk = evport->info.spoofchk;
1999 ivi->trusted = evport->info.trusted;
2000 ivi->min_tx_rate = evport->info.min_rate;
2001 ivi->max_tx_rate = evport->info.max_rate;
2002 mutex_unlock(&esw->state_lock);
2003
2004 return 0;
2005}
2006
2007int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2008 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2009{
2010 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2011 int err = 0;
2012
2013 if (IS_ERR(evport))
2014 return PTR_ERR(evport);
2015 if (vlan > 4095 || qos > 7)
2016 return -EINVAL;
2017
2018 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2019 if (err)
2020 return err;
2021
2022 evport->info.vlan = vlan;
2023 evport->info.qos = qos;
2024 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2025 err = esw_acl_ingress_lgcy_setup(esw, evport);
2026 if (err)
2027 return err;
2028 err = esw_acl_egress_lgcy_setup(esw, evport);
2029 }
2030
2031 return err;
2032}
2033
2034int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2035 u16 vport, u16 vlan, u8 qos)
2036{
2037 u8 set_flags = 0;
2038 int err;
2039
2040 if (!ESW_ALLOWED(esw))
2041 return -EPERM;
2042
2043 if (vlan || qos)
2044 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2045
2046 mutex_lock(&esw->state_lock);
2047 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2048 mutex_unlock(&esw->state_lock);
2049
2050 return err;
2051}
2052
2053int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2054 u16 vport, bool spoofchk)
2055{
2056 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2057 bool pschk;
2058 int err = 0;
2059
2060 if (!ESW_ALLOWED(esw))
2061 return -EPERM;
2062 if (IS_ERR(evport))
2063 return PTR_ERR(evport);
2064
2065 mutex_lock(&esw->state_lock);
2066 pschk = evport->info.spoofchk;
2067 evport->info.spoofchk = spoofchk;
2068 if (pschk && !is_valid_ether_addr(evport->info.mac))
2069 mlx5_core_warn(esw->dev,
2070 "Spoofchk in set while MAC is invalid, vport(%d)\n",
2071 evport->vport);
2072 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2073 err = esw_acl_ingress_lgcy_setup(esw, evport);
2074 if (err)
2075 evport->info.spoofchk = pschk;
2076 mutex_unlock(&esw->state_lock);
2077
2078 return err;
2079}
2080
2081static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2082{
2083 if (esw->fdb_table.legacy.vepa_uplink_rule)
2084 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2085
2086 if (esw->fdb_table.legacy.vepa_star_rule)
2087 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2088
2089 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2090 esw->fdb_table.legacy.vepa_star_rule = NULL;
2091}
2092
2093static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2094 u8 setting)
2095{
2096 struct mlx5_flow_destination dest = {};
2097 struct mlx5_flow_act flow_act = {};
2098 struct mlx5_flow_handle *flow_rule;
2099 struct mlx5_flow_spec *spec;
2100 int err = 0;
2101 void *misc;
2102
2103 if (!setting) {
2104 esw_cleanup_vepa_rules(esw);
2105 return 0;
2106 }
2107
2108 if (esw->fdb_table.legacy.vepa_uplink_rule)
2109 return 0;
2110
2111 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2112 if (!spec)
2113 return -ENOMEM;
2114
2115
2116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2117 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2118
2119 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2120 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2121
2122 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2123 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2124 dest.ft = esw->fdb_table.legacy.fdb;
2125 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2126 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2127 &flow_act, &dest, 1);
2128 if (IS_ERR(flow_rule)) {
2129 err = PTR_ERR(flow_rule);
2130 goto out;
2131 } else {
2132 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2133 }
2134
2135
2136 memset(&dest, 0, sizeof(dest));
2137 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2138 dest.vport.num = MLX5_VPORT_UPLINK;
2139 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2140 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
2141 &flow_act, &dest, 1);
2142 if (IS_ERR(flow_rule)) {
2143 err = PTR_ERR(flow_rule);
2144 goto out;
2145 } else {
2146 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2147 }
2148
2149out:
2150 kvfree(spec);
2151 if (err)
2152 esw_cleanup_vepa_rules(esw);
2153 return err;
2154}
2155
2156int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2157{
2158 int err = 0;
2159
2160 if (!esw)
2161 return -EOPNOTSUPP;
2162
2163 if (!ESW_ALLOWED(esw))
2164 return -EPERM;
2165
2166 mutex_lock(&esw->state_lock);
2167 if (esw->mode != MLX5_ESWITCH_LEGACY) {
2168 err = -EOPNOTSUPP;
2169 goto out;
2170 }
2171
2172 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2173
2174out:
2175 mutex_unlock(&esw->state_lock);
2176 return err;
2177}
2178
2179int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2180{
2181 if (!esw)
2182 return -EOPNOTSUPP;
2183
2184 if (!ESW_ALLOWED(esw))
2185 return -EPERM;
2186
2187 if (esw->mode != MLX5_ESWITCH_LEGACY)
2188 return -EOPNOTSUPP;
2189
2190 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2191 return 0;
2192}
2193
2194int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2195 u16 vport, bool setting)
2196{
2197 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2198
2199 if (!ESW_ALLOWED(esw))
2200 return -EPERM;
2201 if (IS_ERR(evport))
2202 return PTR_ERR(evport);
2203
2204 mutex_lock(&esw->state_lock);
2205 evport->info.trusted = setting;
2206 if (evport->enabled)
2207 esw_vport_change_handle_locked(evport);
2208 mutex_unlock(&esw->state_lock);
2209
2210 return 0;
2211}
2212
2213static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2214{
2215 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2216 struct mlx5_vport *evport;
2217 u32 max_guarantee = 0;
2218 int i;
2219
2220 mlx5_esw_for_all_vports(esw, i, evport) {
2221 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2222 continue;
2223 max_guarantee = evport->info.min_rate;
2224 }
2225
2226 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2227}
2228
2229static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2230{
2231 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2232 struct mlx5_vport *evport;
2233 u32 vport_max_rate;
2234 u32 vport_min_rate;
2235 u32 bw_share;
2236 int err;
2237 int i;
2238
2239 mlx5_esw_for_all_vports(esw, i, evport) {
2240 if (!evport->enabled)
2241 continue;
2242 vport_min_rate = evport->info.min_rate;
2243 vport_max_rate = evport->info.max_rate;
2244 bw_share = MLX5_MIN_BW_SHARE;
2245
2246 if (vport_min_rate)
2247 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2248 divider,
2249 fw_max_bw_share);
2250
2251 if (bw_share == evport->qos.bw_share)
2252 continue;
2253
2254 err = esw_vport_qos_config(esw, evport, vport_max_rate,
2255 bw_share);
2256 if (!err)
2257 evport->qos.bw_share = bw_share;
2258 else
2259 return err;
2260 }
2261
2262 return 0;
2263}
2264
2265int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2266 u32 max_rate, u32 min_rate)
2267{
2268 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2269 u32 fw_max_bw_share;
2270 u32 previous_min_rate;
2271 u32 divider;
2272 bool min_rate_supported;
2273 bool max_rate_supported;
2274 int err = 0;
2275
2276 if (!ESW_ALLOWED(esw))
2277 return -EPERM;
2278 if (IS_ERR(evport))
2279 return PTR_ERR(evport);
2280
2281 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2282 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2283 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2284 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2285
2286 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2287 return -EOPNOTSUPP;
2288
2289 mutex_lock(&esw->state_lock);
2290
2291 if (min_rate == evport->info.min_rate)
2292 goto set_max_rate;
2293
2294 previous_min_rate = evport->info.min_rate;
2295 evport->info.min_rate = min_rate;
2296 divider = calculate_vports_min_rate_divider(esw);
2297 err = normalize_vports_min_rate(esw, divider);
2298 if (err) {
2299 evport->info.min_rate = previous_min_rate;
2300 goto unlock;
2301 }
2302
2303set_max_rate:
2304 if (max_rate == evport->info.max_rate)
2305 goto unlock;
2306
2307 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2308 if (!err)
2309 evport->info.max_rate = max_rate;
2310
2311unlock:
2312 mutex_unlock(&esw->state_lock);
2313 return err;
2314}
2315
2316static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2317 struct mlx5_vport *vport,
2318 struct mlx5_vport_drop_stats *stats)
2319{
2320 struct mlx5_eswitch *esw = dev->priv.eswitch;
2321 u64 rx_discard_vport_down, tx_discard_vport_down;
2322 u64 bytes = 0;
2323 int err = 0;
2324
2325 if (esw->mode != MLX5_ESWITCH_LEGACY)
2326 return 0;
2327
2328 mutex_lock(&esw->state_lock);
2329 if (!vport->enabled)
2330 goto unlock;
2331
2332 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
2333 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
2334 &stats->rx_dropped, &bytes);
2335
2336 if (vport->ingress.legacy.drop_counter)
2337 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
2338 &stats->tx_dropped, &bytes);
2339
2340 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2341 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2342 goto unlock;
2343
2344 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
2345 &rx_discard_vport_down,
2346 &tx_discard_vport_down);
2347 if (err)
2348 goto unlock;
2349
2350 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2351 stats->rx_dropped += rx_discard_vport_down;
2352 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2353 stats->tx_dropped += tx_discard_vport_down;
2354
2355unlock:
2356 mutex_unlock(&esw->state_lock);
2357 return err;
2358}
2359
2360int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2361 u16 vport_num,
2362 struct ifla_vf_stats *vf_stats)
2363{
2364 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2365 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2366 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2367 struct mlx5_vport_drop_stats stats = {};
2368 int err = 0;
2369 u32 *out;
2370
2371 if (IS_ERR(vport))
2372 return PTR_ERR(vport);
2373
2374 out = kvzalloc(outlen, GFP_KERNEL);
2375 if (!out)
2376 return -ENOMEM;
2377
2378 MLX5_SET(query_vport_counter_in, in, opcode,
2379 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2380 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2381 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2382 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2383
2384 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2385 if (err)
2386 goto free_out;
2387
2388 #define MLX5_GET_CTR(p, x) \
2389 MLX5_GET64(query_vport_counter_out, p, x)
2390
2391 memset(vf_stats, 0, sizeof(*vf_stats));
2392 vf_stats->rx_packets =
2393 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2394 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2395 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2396 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2397 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2398
2399 vf_stats->rx_bytes =
2400 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2401 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2402 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2403 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2404 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2405
2406 vf_stats->tx_packets =
2407 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2408 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2409 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2410 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2411 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2412
2413 vf_stats->tx_bytes =
2414 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2415 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2416 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2417 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2418 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2419
2420 vf_stats->multicast =
2421 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2422 MLX5_GET_CTR(out, received_ib_multicast.packets);
2423
2424 vf_stats->broadcast =
2425 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2426
2427 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2428 if (err)
2429 goto free_out;
2430 vf_stats->rx_dropped = stats.rx_dropped;
2431 vf_stats->tx_dropped = stats.tx_dropped;
2432
2433free_out:
2434 kvfree(out);
2435 return err;
2436}
2437
2438u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2439{
2440 return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2441}
2442EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2443
2444enum devlink_eswitch_encap_mode
2445mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2446{
2447 struct mlx5_eswitch *esw;
2448
2449 esw = dev->priv.eswitch;
2450 return ESW_ALLOWED(esw) ? esw->offloads.encap :
2451 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2452}
2453EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2454
2455bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2456{
2457 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2458 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2459 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2460 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2461 return true;
2462
2463 return false;
2464}
2465
2466bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2467 struct mlx5_core_dev *dev1)
2468{
2469 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2470 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2471}
2472
2473
2474