1
2
3
4#include <linux/etherdevice.h>
5#include <linux/mlx5/driver.h>
6#include <linux/mlx5/mlx5_ifc.h>
7#include <linux/mlx5/vport.h>
8#include <linux/mlx5/fs.h>
9#include "esw/acl/lgcy.h"
10#include "esw/legacy.h"
11#include "mlx5_core.h"
12#include "eswitch.h"
13#include "fs_core.h"
14
15enum {
16 LEGACY_VEPA_PRIO = 0,
17 LEGACY_FDB_PRIO,
18};
19
20static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
21{
22 struct mlx5_flow_table_attr ft_attr = {};
23 struct mlx5_core_dev *dev = esw->dev;
24 struct mlx5_flow_namespace *root_ns;
25 struct mlx5_flow_table *fdb;
26 int err;
27
28 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
29 if (!root_ns) {
30 esw_warn(dev, "Failed to get FDB flow namespace\n");
31 return -EOPNOTSUPP;
32 }
33
34
35 ft_attr.prio = LEGACY_VEPA_PRIO;
36 ft_attr.max_fte = 2;
37 ft_attr.autogroup.max_num_groups = 2;
38 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
39 if (IS_ERR(fdb)) {
40 err = PTR_ERR(fdb);
41 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
42 return err;
43 }
44 esw->fdb_table.legacy.vepa_fdb = fdb;
45
46 return 0;
47}
48
49static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
50{
51 esw_debug(esw->dev, "Destroy FDB Table\n");
52 if (!esw->fdb_table.legacy.fdb)
53 return;
54
55 if (esw->fdb_table.legacy.promisc_grp)
56 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
57 if (esw->fdb_table.legacy.allmulti_grp)
58 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
59 if (esw->fdb_table.legacy.addr_grp)
60 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
61 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
62
63 esw->fdb_table.legacy.fdb = NULL;
64 esw->fdb_table.legacy.addr_grp = NULL;
65 esw->fdb_table.legacy.allmulti_grp = NULL;
66 esw->fdb_table.legacy.promisc_grp = NULL;
67 atomic64_set(&esw->user_count, 0);
68}
69
70static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
71{
72 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
73 struct mlx5_flow_table_attr ft_attr = {};
74 struct mlx5_core_dev *dev = esw->dev;
75 struct mlx5_flow_namespace *root_ns;
76 struct mlx5_flow_table *fdb;
77 struct mlx5_flow_group *g;
78 void *match_criteria;
79 int table_size;
80 u32 *flow_group_in;
81 u8 *dmac;
82 int err = 0;
83
84 esw_debug(dev, "Create FDB log_max_size(%d)\n",
85 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
86
87 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
88 if (!root_ns) {
89 esw_warn(dev, "Failed to get FDB flow namespace\n");
90 return -EOPNOTSUPP;
91 }
92
93 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
94 if (!flow_group_in)
95 return -ENOMEM;
96
97 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
98 ft_attr.max_fte = table_size;
99 ft_attr.prio = LEGACY_FDB_PRIO;
100 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
101 if (IS_ERR(fdb)) {
102 err = PTR_ERR(fdb);
103 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
104 goto out;
105 }
106 esw->fdb_table.legacy.fdb = fdb;
107
108
109 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
110 MLX5_MATCH_OUTER_HEADERS);
111 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
112 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
113 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
114
115 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
116 eth_broadcast_addr(dmac);
117 g = mlx5_create_flow_group(fdb, flow_group_in);
118 if (IS_ERR(g)) {
119 err = PTR_ERR(g);
120 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
121 goto out;
122 }
123 esw->fdb_table.legacy.addr_grp = g;
124
125
126 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
127 MLX5_MATCH_OUTER_HEADERS);
128 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
129 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
130 eth_zero_addr(dmac);
131 dmac[0] = 0x01;
132 g = mlx5_create_flow_group(fdb, flow_group_in);
133 if (IS_ERR(g)) {
134 err = PTR_ERR(g);
135 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
136 goto out;
137 }
138 esw->fdb_table.legacy.allmulti_grp = g;
139
140
141
142
143 eth_zero_addr(dmac);
144 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
145 MLX5_MATCH_MISC_PARAMETERS);
146 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
147 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
148 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
149 g = mlx5_create_flow_group(fdb, flow_group_in);
150 if (IS_ERR(g)) {
151 err = PTR_ERR(g);
152 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
153 goto out;
154 }
155 esw->fdb_table.legacy.promisc_grp = g;
156
157out:
158 if (err)
159 esw_destroy_legacy_fdb_table(esw);
160
161 kvfree(flow_group_in);
162 return err;
163}
164
165static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
166{
167 esw_debug(esw->dev, "Destroy VEPA Table\n");
168 if (!esw->fdb_table.legacy.vepa_fdb)
169 return;
170
171 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
172 esw->fdb_table.legacy.vepa_fdb = NULL;
173}
174
175static int esw_create_legacy_table(struct mlx5_eswitch *esw)
176{
177 int err;
178
179 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
180 atomic64_set(&esw->user_count, 0);
181
182 err = esw_create_legacy_vepa_table(esw);
183 if (err)
184 return err;
185
186 err = esw_create_legacy_fdb_table(esw);
187 if (err)
188 esw_destroy_legacy_vepa_table(esw);
189
190 return err;
191}
192
193static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
194{
195 if (esw->fdb_table.legacy.vepa_uplink_rule)
196 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
197
198 if (esw->fdb_table.legacy.vepa_star_rule)
199 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
200
201 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
202 esw->fdb_table.legacy.vepa_star_rule = NULL;
203}
204
205static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
206{
207 esw_cleanup_vepa_rules(esw);
208 esw_destroy_legacy_fdb_table(esw);
209 esw_destroy_legacy_vepa_table(esw);
210}
211
212#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
213 MLX5_VPORT_MC_ADDR_CHANGE | \
214 MLX5_VPORT_PROMISC_CHANGE)
215
216int esw_legacy_enable(struct mlx5_eswitch *esw)
217{
218 struct mlx5_vport *vport;
219 unsigned long i;
220 int ret;
221
222 ret = esw_create_legacy_table(esw);
223 if (ret)
224 return ret;
225
226 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
227 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
228
229 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
230 if (ret)
231 esw_destroy_legacy_table(esw);
232 return ret;
233}
234
235void esw_legacy_disable(struct mlx5_eswitch *esw)
236{
237 struct esw_mc_addr *mc_promisc;
238
239 mlx5_eswitch_disable_pf_vf_vports(esw);
240
241 mc_promisc = &esw->mc_promisc;
242 if (mc_promisc->uplink_rule)
243 mlx5_del_flow_rules(mc_promisc->uplink_rule);
244
245 esw_destroy_legacy_table(esw);
246}
247
248static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
249 u8 setting)
250{
251 struct mlx5_flow_destination dest = {};
252 struct mlx5_flow_act flow_act = {};
253 struct mlx5_flow_handle *flow_rule;
254 struct mlx5_flow_spec *spec;
255 int err = 0;
256 void *misc;
257
258 if (!setting) {
259 esw_cleanup_vepa_rules(esw);
260 return 0;
261 }
262
263 if (esw->fdb_table.legacy.vepa_uplink_rule)
264 return 0;
265
266 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
267 if (!spec)
268 return -ENOMEM;
269
270
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
273
274 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
275 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
276
277 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
278 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
279 dest.ft = esw->fdb_table.legacy.fdb;
280 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
281 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
282 &flow_act, &dest, 1);
283 if (IS_ERR(flow_rule)) {
284 err = PTR_ERR(flow_rule);
285 goto out;
286 } else {
287 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
288 }
289
290
291 memset(&dest, 0, sizeof(dest));
292 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
293 dest.vport.num = MLX5_VPORT_UPLINK;
294 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
295 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
296 &flow_act, &dest, 1);
297 if (IS_ERR(flow_rule)) {
298 err = PTR_ERR(flow_rule);
299 goto out;
300 } else {
301 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
302 }
303
304out:
305 kvfree(spec);
306 if (err)
307 esw_cleanup_vepa_rules(esw);
308 return err;
309}
310
311int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
312{
313 int err = 0;
314
315 if (!esw)
316 return -EOPNOTSUPP;
317
318 if (!mlx5_esw_allowed(esw))
319 return -EPERM;
320
321 mutex_lock(&esw->state_lock);
322 if (esw->mode != MLX5_ESWITCH_LEGACY) {
323 err = -EOPNOTSUPP;
324 goto out;
325 }
326
327 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
328
329out:
330 mutex_unlock(&esw->state_lock);
331 return err;
332}
333
334int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
335{
336 if (!esw)
337 return -EOPNOTSUPP;
338
339 if (!mlx5_esw_allowed(esw))
340 return -EPERM;
341
342 if (esw->mode != MLX5_ESWITCH_LEGACY)
343 return -EOPNOTSUPP;
344
345 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
346 return 0;
347}
348
349int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
350{
351 int ret;
352
353
354 if (mlx5_esw_is_manager_vport(esw, vport->vport))
355 return 0;
356
357 ret = esw_acl_ingress_lgcy_setup(esw, vport);
358 if (ret)
359 goto ingress_err;
360
361 ret = esw_acl_egress_lgcy_setup(esw, vport);
362 if (ret)
363 goto egress_err;
364
365 return 0;
366
367egress_err:
368 esw_acl_ingress_lgcy_cleanup(esw, vport);
369ingress_err:
370 return ret;
371}
372
373void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
374{
375 if (mlx5_esw_is_manager_vport(esw, vport->vport))
376 return;
377
378 esw_acl_egress_lgcy_cleanup(esw, vport);
379 esw_acl_ingress_lgcy_cleanup(esw, vport);
380}
381
382int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
383 struct mlx5_vport *vport,
384 struct mlx5_vport_drop_stats *stats)
385{
386 u64 rx_discard_vport_down, tx_discard_vport_down;
387 struct mlx5_eswitch *esw = dev->priv.eswitch;
388 u64 bytes = 0;
389 int err = 0;
390
391 if (esw->mode != MLX5_ESWITCH_LEGACY)
392 return 0;
393
394 mutex_lock(&esw->state_lock);
395 if (!vport->enabled)
396 goto unlock;
397
398 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
399 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
400 &stats->rx_dropped, &bytes);
401
402 if (vport->ingress.legacy.drop_counter)
403 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
404 &stats->tx_dropped, &bytes);
405
406 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
407 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
408 goto unlock;
409
410 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
411 &rx_discard_vport_down,
412 &tx_discard_vport_down);
413 if (err)
414 goto unlock;
415
416 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
417 stats->rx_dropped += rx_discard_vport_down;
418 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
419 stats->tx_dropped += tx_discard_vport_down;
420
421unlock:
422 mutex_unlock(&esw->state_lock);
423 return err;
424}
425
426int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
427 u16 vport, u16 vlan, u8 qos)
428{
429 u8 set_flags = 0;
430 int err = 0;
431
432 if (!mlx5_esw_allowed(esw))
433 return -EPERM;
434
435 if (vlan || qos)
436 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
437
438 mutex_lock(&esw->state_lock);
439 if (esw->mode != MLX5_ESWITCH_LEGACY) {
440 if (!vlan)
441 goto unlock;
442
443 err = -EOPNOTSUPP;
444 goto unlock;
445 }
446
447 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
448
449unlock:
450 mutex_unlock(&esw->state_lock);
451 return err;
452}
453
454int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
455 u16 vport, bool spoofchk)
456{
457 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
458 bool pschk;
459 int err = 0;
460
461 if (!mlx5_esw_allowed(esw))
462 return -EPERM;
463 if (IS_ERR(evport))
464 return PTR_ERR(evport);
465
466 mutex_lock(&esw->state_lock);
467 if (esw->mode != MLX5_ESWITCH_LEGACY) {
468 err = -EOPNOTSUPP;
469 goto unlock;
470 }
471 pschk = evport->info.spoofchk;
472 evport->info.spoofchk = spoofchk;
473 if (pschk && !is_valid_ether_addr(evport->info.mac))
474 mlx5_core_warn(esw->dev,
475 "Spoofchk in set while MAC is invalid, vport(%d)\n",
476 evport->vport);
477 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
478 err = esw_acl_ingress_lgcy_setup(esw, evport);
479 if (err)
480 evport->info.spoofchk = pschk;
481
482unlock:
483 mutex_unlock(&esw->state_lock);
484 return err;
485}
486
487int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
488 u16 vport, bool setting)
489{
490 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
491 int err = 0;
492
493 if (!mlx5_esw_allowed(esw))
494 return -EPERM;
495 if (IS_ERR(evport))
496 return PTR_ERR(evport);
497
498 mutex_lock(&esw->state_lock);
499 if (esw->mode != MLX5_ESWITCH_LEGACY) {
500 err = -EOPNOTSUPP;
501 goto unlock;
502 }
503 evport->info.trusted = setting;
504 if (evport->enabled)
505 esw_vport_change_handle_locked(evport);
506
507unlock:
508 mutex_unlock(&esw->state_lock);
509 return err;
510}
511