1
2
3
4#include <linux/list.h>
5#include <linux/notifier.h>
6#include <net/netevent.h>
7#include <net/switchdev.h>
8#include "lib/devcom.h"
9#include "bridge.h"
10#include "eswitch.h"
11#include "bridge_priv.h"
12#define CREATE_TRACE_POINTS
13#include "diag/bridge_tracepoint.h"
14
15#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
16#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
17#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
18#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
19 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
20#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
21 (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
22#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
23 (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
24#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
25
26#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
27#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
28#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
29#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
30 (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
31#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
32#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
33 (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
34#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
35
36#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
37
38enum {
39 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
40 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
41 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
42};
43
44static const struct rhashtable_params fdb_ht_params = {
45 .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
46 .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
47 .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
48 .automatic_shrinking = true,
49};
50
51enum {
52 MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
53};
54
55struct mlx5_esw_bridge {
56 int ifindex;
57 int refcnt;
58 struct list_head list;
59 struct mlx5_esw_bridge_offloads *br_offloads;
60
61 struct list_head fdb_list;
62 struct rhashtable fdb_ht;
63
64 struct mlx5_flow_table *egress_ft;
65 struct mlx5_flow_group *egress_vlan_fg;
66 struct mlx5_flow_group *egress_mac_fg;
67 struct mlx5_flow_group *egress_miss_fg;
68 struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
69 struct mlx5_flow_handle *egress_miss_handle;
70 unsigned long ageing_time;
71 u32 flags;
72};
73
74static void
75mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
76 unsigned long val)
77{
78 struct switchdev_notifier_fdb_info send_info = {};
79
80 send_info.addr = addr;
81 send_info.vid = vid;
82 send_info.offloaded = true;
83 call_switchdev_notifiers(val, dev, &send_info.info, NULL);
84}
85
86static void
87mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
88{
89 if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
90 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
91 entry->key.vid,
92 SWITCHDEV_FDB_DEL_TO_BRIDGE);
93}
94
95static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
96{
97 return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
98 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
99 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
100 offsetof(struct vlan_ethhdr, h_vlan_proto);
101}
102
103static struct mlx5_pkt_reformat *
104mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
105{
106 struct mlx5_pkt_reformat_params reformat_params = {};
107
108 reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
109 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
110 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
111 reformat_params.size = sizeof(struct vlan_hdr);
112 return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
113}
114
115static struct mlx5_flow_table *
116mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
117{
118 struct mlx5_flow_table_attr ft_attr = {};
119 struct mlx5_core_dev *dev = esw->dev;
120 struct mlx5_flow_namespace *ns;
121 struct mlx5_flow_table *fdb;
122
123 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
124 if (!ns) {
125 esw_warn(dev, "Failed to get FDB namespace\n");
126 return ERR_PTR(-ENOENT);
127 }
128
129 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
130 ft_attr.max_fte = max_fte;
131 ft_attr.level = level;
132 ft_attr.prio = FDB_BR_OFFLOAD;
133 fdb = mlx5_create_flow_table(ns, &ft_attr);
134 if (IS_ERR(fdb))
135 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
136
137 return fdb;
138}
139
140static struct mlx5_flow_group *
141mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
142{
143 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
144 struct mlx5_flow_group *fg;
145 u32 *in, *match;
146
147 in = kvzalloc(inlen, GFP_KERNEL);
148 if (!in)
149 return ERR_PTR(-ENOMEM);
150
151 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
152 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
153 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
154
155 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
156 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
157 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
158 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
159
160 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
161 mlx5_eswitch_get_vport_metadata_mask());
162
163 MLX5_SET(create_flow_group_in, in, start_flow_index,
164 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
165 MLX5_SET(create_flow_group_in, in, end_flow_index,
166 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
167
168 fg = mlx5_create_flow_group(ingress_ft, in);
169 kvfree(in);
170 if (IS_ERR(fg))
171 esw_warn(esw->dev,
172 "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
173 PTR_ERR(fg));
174
175 return fg;
176}
177
178static struct mlx5_flow_group *
179mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
180 struct mlx5_flow_table *ingress_ft)
181{
182 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
183 struct mlx5_flow_group *fg;
184 u32 *in, *match;
185
186 in = kvzalloc(inlen, GFP_KERNEL);
187 if (!in)
188 return ERR_PTR(-ENOMEM);
189
190 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
191 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
192 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
193
194 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
195 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
196 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
197
198 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
199 mlx5_eswitch_get_vport_metadata_mask());
200
201 MLX5_SET(create_flow_group_in, in, start_flow_index,
202 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
203 MLX5_SET(create_flow_group_in, in, end_flow_index,
204 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
205
206 fg = mlx5_create_flow_group(ingress_ft, in);
207 if (IS_ERR(fg))
208 esw_warn(esw->dev,
209 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
210 PTR_ERR(fg));
211
212 kvfree(in);
213 return fg;
214}
215
216static struct mlx5_flow_group *
217mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
218{
219 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
220 struct mlx5_flow_group *fg;
221 u32 *in, *match;
222
223 in = kvzalloc(inlen, GFP_KERNEL);
224 if (!in)
225 return ERR_PTR(-ENOMEM);
226
227 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
228 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
229 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
230
231 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
232 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
233
234 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
235 mlx5_eswitch_get_vport_metadata_mask());
236
237 MLX5_SET(create_flow_group_in, in, start_flow_index,
238 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
239 MLX5_SET(create_flow_group_in, in, end_flow_index,
240 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
241
242 fg = mlx5_create_flow_group(ingress_ft, in);
243 if (IS_ERR(fg))
244 esw_warn(esw->dev,
245 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
246 PTR_ERR(fg));
247
248 kvfree(in);
249 return fg;
250}
251
252static struct mlx5_flow_group *
253mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
254{
255 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
256 struct mlx5_flow_group *fg;
257 u32 *in, *match;
258
259 in = kvzalloc(inlen, GFP_KERNEL);
260 if (!in)
261 return ERR_PTR(-ENOMEM);
262
263 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
264 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
265
266 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
267 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
268 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
269 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
270
271 MLX5_SET(create_flow_group_in, in, start_flow_index,
272 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
273 MLX5_SET(create_flow_group_in, in, end_flow_index,
274 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
275
276 fg = mlx5_create_flow_group(egress_ft, in);
277 if (IS_ERR(fg))
278 esw_warn(esw->dev,
279 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
280 PTR_ERR(fg));
281 kvfree(in);
282 return fg;
283}
284
285static struct mlx5_flow_group *
286mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
287{
288 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
289 struct mlx5_flow_group *fg;
290 u32 *in, *match;
291
292 in = kvzalloc(inlen, GFP_KERNEL);
293 if (!in)
294 return ERR_PTR(-ENOMEM);
295
296 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
297 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
298
299 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
300 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
301
302 MLX5_SET(create_flow_group_in, in, start_flow_index,
303 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
304 MLX5_SET(create_flow_group_in, in, end_flow_index,
305 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
306
307 fg = mlx5_create_flow_group(egress_ft, in);
308 if (IS_ERR(fg))
309 esw_warn(esw->dev,
310 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
311 PTR_ERR(fg));
312 kvfree(in);
313 return fg;
314}
315
316static struct mlx5_flow_group *
317mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
318{
319 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
320 struct mlx5_flow_group *fg;
321 u32 *in, *match;
322
323 in = kvzalloc(inlen, GFP_KERNEL);
324 if (!in)
325 return ERR_PTR(-ENOMEM);
326
327 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
328 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
329
330 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
331
332 MLX5_SET(create_flow_group_in, in, start_flow_index,
333 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
334 MLX5_SET(create_flow_group_in, in, end_flow_index,
335 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
336
337 fg = mlx5_create_flow_group(egress_ft, in);
338 if (IS_ERR(fg))
339 esw_warn(esw->dev,
340 "Failed to create bridge egress table miss flow group (err=%ld)\n",
341 PTR_ERR(fg));
342 kvfree(in);
343 return fg;
344}
345
346static int
347mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
348{
349 struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
350 struct mlx5_flow_table *ingress_ft, *skip_ft;
351 struct mlx5_eswitch *esw = br_offloads->esw;
352 int err;
353
354 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
355 return -EOPNOTSUPP;
356
357 ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
358 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
359 esw);
360 if (IS_ERR(ingress_ft))
361 return PTR_ERR(ingress_ft);
362
363 skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
364 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
365 esw);
366 if (IS_ERR(skip_ft)) {
367 err = PTR_ERR(skip_ft);
368 goto err_skip_tbl;
369 }
370
371 vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
372 if (IS_ERR(vlan_fg)) {
373 err = PTR_ERR(vlan_fg);
374 goto err_vlan_fg;
375 }
376
377 filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
378 if (IS_ERR(filter_fg)) {
379 err = PTR_ERR(filter_fg);
380 goto err_filter_fg;
381 }
382
383 mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
384 if (IS_ERR(mac_fg)) {
385 err = PTR_ERR(mac_fg);
386 goto err_mac_fg;
387 }
388
389 br_offloads->ingress_ft = ingress_ft;
390 br_offloads->skip_ft = skip_ft;
391 br_offloads->ingress_vlan_fg = vlan_fg;
392 br_offloads->ingress_filter_fg = filter_fg;
393 br_offloads->ingress_mac_fg = mac_fg;
394 return 0;
395
396err_mac_fg:
397 mlx5_destroy_flow_group(filter_fg);
398err_filter_fg:
399 mlx5_destroy_flow_group(vlan_fg);
400err_vlan_fg:
401 mlx5_destroy_flow_table(skip_ft);
402err_skip_tbl:
403 mlx5_destroy_flow_table(ingress_ft);
404 return err;
405}
406
407static void
408mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
409{
410 mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
411 br_offloads->ingress_mac_fg = NULL;
412 mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
413 br_offloads->ingress_filter_fg = NULL;
414 mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
415 br_offloads->ingress_vlan_fg = NULL;
416 mlx5_destroy_flow_table(br_offloads->skip_ft);
417 br_offloads->skip_ft = NULL;
418 mlx5_destroy_flow_table(br_offloads->ingress_ft);
419 br_offloads->ingress_ft = NULL;
420}
421
422static struct mlx5_flow_handle *
423mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
424 struct mlx5_flow_table *skip_ft,
425 struct mlx5_pkt_reformat *pkt_reformat);
426
427static int
428mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
429 struct mlx5_esw_bridge *bridge)
430{
431 struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
432 struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
433 struct mlx5_flow_handle *miss_handle = NULL;
434 struct mlx5_eswitch *esw = br_offloads->esw;
435 struct mlx5_flow_table *egress_ft;
436 int err;
437
438 egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
439 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
440 esw);
441 if (IS_ERR(egress_ft))
442 return PTR_ERR(egress_ft);
443
444 vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
445 if (IS_ERR(vlan_fg)) {
446 err = PTR_ERR(vlan_fg);
447 goto err_vlan_fg;
448 }
449
450 mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
451 if (IS_ERR(mac_fg)) {
452 err = PTR_ERR(mac_fg);
453 goto err_mac_fg;
454 }
455
456 if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
457 miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
458 if (IS_ERR(miss_fg)) {
459 esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
460 PTR_ERR(miss_fg));
461 miss_fg = NULL;
462 goto skip_miss_flow;
463 }
464
465 miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
466 if (IS_ERR(miss_pkt_reformat)) {
467 esw_warn(esw->dev,
468 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
469 PTR_ERR(miss_pkt_reformat));
470 miss_pkt_reformat = NULL;
471 mlx5_destroy_flow_group(miss_fg);
472 miss_fg = NULL;
473 goto skip_miss_flow;
474 }
475
476 miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
477 br_offloads->skip_ft,
478 miss_pkt_reformat);
479 if (IS_ERR(miss_handle)) {
480 esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
481 PTR_ERR(miss_handle));
482 miss_handle = NULL;
483 mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
484 miss_pkt_reformat = NULL;
485 mlx5_destroy_flow_group(miss_fg);
486 miss_fg = NULL;
487 goto skip_miss_flow;
488 }
489 }
490skip_miss_flow:
491
492 bridge->egress_ft = egress_ft;
493 bridge->egress_vlan_fg = vlan_fg;
494 bridge->egress_mac_fg = mac_fg;
495 bridge->egress_miss_fg = miss_fg;
496 bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
497 bridge->egress_miss_handle = miss_handle;
498 return 0;
499
500err_mac_fg:
501 mlx5_destroy_flow_group(vlan_fg);
502err_vlan_fg:
503 mlx5_destroy_flow_table(egress_ft);
504 return err;
505}
506
507static void
508mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
509{
510 if (bridge->egress_miss_handle)
511 mlx5_del_flow_rules(bridge->egress_miss_handle);
512 if (bridge->egress_miss_pkt_reformat)
513 mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
514 bridge->egress_miss_pkt_reformat);
515 if (bridge->egress_miss_fg)
516 mlx5_destroy_flow_group(bridge->egress_miss_fg);
517 mlx5_destroy_flow_group(bridge->egress_mac_fg);
518 mlx5_destroy_flow_group(bridge->egress_vlan_fg);
519 mlx5_destroy_flow_table(bridge->egress_ft);
520}
521
522static struct mlx5_flow_handle *
523mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
524 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
525 struct mlx5_esw_bridge *bridge,
526 struct mlx5_eswitch *esw)
527{
528 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
529 struct mlx5_flow_act flow_act = {
530 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
531 .flags = FLOW_ACT_NO_APPEND,
532 };
533 struct mlx5_flow_destination dests[2] = {};
534 struct mlx5_flow_spec *rule_spec;
535 struct mlx5_flow_handle *handle;
536 u8 *smac_v, *smac_c;
537
538 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
539 if (!rule_spec)
540 return ERR_PTR(-ENOMEM);
541
542 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
543
544 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
545 outer_headers.smac_47_16);
546 ether_addr_copy(smac_v, addr);
547 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
548 outer_headers.smac_47_16);
549 eth_broadcast_addr(smac_c);
550
551 MLX5_SET(fte_match_param, rule_spec->match_criteria,
552 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
553 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
554 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
555
556 if (vlan && vlan->pkt_reformat_push) {
557 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
558 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
559 flow_act.pkt_reformat = vlan->pkt_reformat_push;
560 flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
561 } else if (vlan) {
562 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
563 outer_headers.cvlan_tag);
564 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
565 outer_headers.cvlan_tag);
566 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
567 outer_headers.first_vid);
568 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
569 vlan->vid);
570 }
571
572 dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
573 dests[0].ft = bridge->egress_ft;
574 dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
575 dests[1].counter_id = counter_id;
576
577 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
578 ARRAY_SIZE(dests));
579
580 kvfree(rule_spec);
581 return handle;
582}
583
584static struct mlx5_flow_handle *
585mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
586 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
587 struct mlx5_esw_bridge *bridge)
588{
589 return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
590 bridge, bridge->br_offloads->esw);
591}
592
593static struct mlx5_flow_handle *
594mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
595 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
596 struct mlx5_esw_bridge *bridge)
597{
598 struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
599 static struct mlx5_flow_handle *handle;
600 struct mlx5_eswitch *peer_esw;
601
602 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
603 if (!peer_esw)
604 return ERR_PTR(-ENODEV);
605
606 handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
607 bridge, peer_esw);
608
609 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
610 return handle;
611}
612
613static struct mlx5_flow_handle *
614mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
615 struct mlx5_esw_bridge *bridge)
616{
617 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
618 struct mlx5_flow_destination dest = {
619 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
620 .ft = br_offloads->skip_ft,
621 };
622 struct mlx5_flow_act flow_act = {
623 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
624 .flags = FLOW_ACT_NO_APPEND,
625 };
626 struct mlx5_flow_spec *rule_spec;
627 struct mlx5_flow_handle *handle;
628 u8 *smac_v, *smac_c;
629
630 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
631 if (!rule_spec)
632 return ERR_PTR(-ENOMEM);
633
634 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
635
636 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
637 outer_headers.smac_47_16);
638 ether_addr_copy(smac_v, addr);
639 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
640 outer_headers.smac_47_16);
641 eth_broadcast_addr(smac_c);
642
643 MLX5_SET(fte_match_param, rule_spec->match_criteria,
644 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
645 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
646 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
647
648 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
649 outer_headers.cvlan_tag);
650 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
651 outer_headers.cvlan_tag);
652
653 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
654
655 kvfree(rule_spec);
656 return handle;
657}
658
659static struct mlx5_flow_handle *
660mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
661 struct mlx5_esw_bridge_vlan *vlan,
662 struct mlx5_esw_bridge *bridge)
663{
664 struct mlx5_flow_destination dest = {
665 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
666 .vport.num = vport_num,
667 };
668 struct mlx5_flow_act flow_act = {
669 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
670 .flags = FLOW_ACT_NO_APPEND,
671 };
672 struct mlx5_flow_spec *rule_spec;
673 struct mlx5_flow_handle *handle;
674 u8 *dmac_v, *dmac_c;
675
676 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
677 if (!rule_spec)
678 return ERR_PTR(-ENOMEM);
679
680 if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
681 vport_num == MLX5_VPORT_UPLINK)
682 rule_spec->flow_context.flow_source =
683 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
684 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
685
686 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
687 outer_headers.dmac_47_16);
688 ether_addr_copy(dmac_v, addr);
689 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
690 outer_headers.dmac_47_16);
691 eth_broadcast_addr(dmac_c);
692
693 if (vlan) {
694 if (vlan->pkt_reformat_pop) {
695 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
696 flow_act.pkt_reformat = vlan->pkt_reformat_pop;
697 }
698
699 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
700 outer_headers.cvlan_tag);
701 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
702 outer_headers.cvlan_tag);
703 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
704 outer_headers.first_vid);
705 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
706 vlan->vid);
707 }
708
709 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
710 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
711 dest.vport.vhca_id = esw_owner_vhca_id;
712 }
713 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
714
715 kvfree(rule_spec);
716 return handle;
717}
718
719static struct mlx5_flow_handle *
720mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
721 struct mlx5_flow_table *skip_ft,
722 struct mlx5_pkt_reformat *pkt_reformat)
723{
724 struct mlx5_flow_destination dest = {
725 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
726 .ft = skip_ft,
727 };
728 struct mlx5_flow_act flow_act = {
729 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
730 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
731 .flags = FLOW_ACT_NO_APPEND,
732 .pkt_reformat = pkt_reformat,
733 };
734 struct mlx5_flow_spec *rule_spec;
735 struct mlx5_flow_handle *handle;
736
737 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
738 if (!rule_spec)
739 return ERR_PTR(-ENOMEM);
740
741 rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
742
743 MLX5_SET(fte_match_param, rule_spec->match_criteria,
744 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
745 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
746 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
747
748 handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
749
750 kvfree(rule_spec);
751 return handle;
752}
753
754static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
755 struct mlx5_esw_bridge_offloads *br_offloads)
756{
757 struct mlx5_esw_bridge *bridge;
758 int err;
759
760 bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
761 if (!bridge)
762 return ERR_PTR(-ENOMEM);
763
764 bridge->br_offloads = br_offloads;
765 err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
766 if (err)
767 goto err_egress_tbl;
768
769 err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
770 if (err)
771 goto err_fdb_ht;
772
773 INIT_LIST_HEAD(&bridge->fdb_list);
774 bridge->ifindex = ifindex;
775 bridge->refcnt = 1;
776 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
777 list_add(&bridge->list, &br_offloads->bridges);
778
779 return bridge;
780
781err_fdb_ht:
782 mlx5_esw_bridge_egress_table_cleanup(bridge);
783err_egress_tbl:
784 kvfree(bridge);
785 return ERR_PTR(err);
786}
787
788static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
789{
790 bridge->refcnt++;
791}
792
793static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
794 struct mlx5_esw_bridge *bridge)
795{
796 if (--bridge->refcnt)
797 return;
798
799 mlx5_esw_bridge_egress_table_cleanup(bridge);
800 list_del(&bridge->list);
801 rhashtable_destroy(&bridge->fdb_ht);
802 kvfree(bridge);
803
804 if (list_empty(&br_offloads->bridges))
805 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
806}
807
808static struct mlx5_esw_bridge *
809mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
810{
811 struct mlx5_esw_bridge *bridge;
812
813 ASSERT_RTNL();
814
815 list_for_each_entry(bridge, &br_offloads->bridges, list) {
816 if (bridge->ifindex == ifindex) {
817 mlx5_esw_bridge_get(bridge);
818 return bridge;
819 }
820 }
821
822 if (!br_offloads->ingress_ft) {
823 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
824
825 if (err)
826 return ERR_PTR(err);
827 }
828
829 bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
830 if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
831 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
832 return bridge;
833}
834
835static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
836{
837 return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
838}
839
840static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
841{
842 return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
843}
844
845static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
846 struct mlx5_esw_bridge_offloads *br_offloads)
847{
848 return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
849}
850
851static struct mlx5_esw_bridge_port *
852mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
853 struct mlx5_esw_bridge_offloads *br_offloads)
854{
855 return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
856 esw_owner_vhca_id));
857}
858
859static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
860 struct mlx5_esw_bridge_offloads *br_offloads)
861{
862 xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
863}
864
865static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
866{
867 trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
868
869 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
870 entry->key.vid,
871 SWITCHDEV_FDB_ADD_TO_BRIDGE);
872}
873
874static void
875mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
876 struct mlx5_esw_bridge *bridge)
877{
878 trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
879
880 rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
881 mlx5_del_flow_rules(entry->egress_handle);
882 if (entry->filter_handle)
883 mlx5_del_flow_rules(entry->filter_handle);
884 mlx5_del_flow_rules(entry->ingress_handle);
885 mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
886 list_del(&entry->vlan_list);
887 list_del(&entry->list);
888 kvfree(entry);
889}
890
891static void
892mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
893 struct mlx5_esw_bridge *bridge)
894{
895 mlx5_esw_bridge_fdb_del_notify(entry);
896 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
897}
898
899static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
900{
901 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
902
903 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
904 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
905}
906
907static struct mlx5_esw_bridge_vlan *
908mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
909{
910 return xa_load(&port->vlans, vid);
911}
912
913static int
914mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
915{
916 struct {
917 __be16 h_vlan_proto;
918 __be16 h_vlan_TCI;
919 } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
920 struct mlx5_pkt_reformat_params reformat_params = {};
921 struct mlx5_pkt_reformat *pkt_reformat;
922
923 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
924 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
925 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
926 offsetof(struct vlan_ethhdr, h_vlan_proto)) {
927 esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
928 return -EOPNOTSUPP;
929 }
930
931 reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
932 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
933 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
934 reformat_params.size = sizeof(vlan_hdr);
935 reformat_params.data = &vlan_hdr;
936 pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
937 &reformat_params,
938 MLX5_FLOW_NAMESPACE_FDB);
939 if (IS_ERR(pkt_reformat)) {
940 esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
941 PTR_ERR(pkt_reformat));
942 return PTR_ERR(pkt_reformat);
943 }
944
945 vlan->pkt_reformat_push = pkt_reformat;
946 return 0;
947}
948
949static void
950mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
951{
952 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
953 vlan->pkt_reformat_push = NULL;
954}
955
956static int
957mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
958{
959 struct mlx5_pkt_reformat *pkt_reformat;
960
961 if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
962 esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
963 return -EOPNOTSUPP;
964 }
965
966 pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
967 if (IS_ERR(pkt_reformat)) {
968 esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
969 PTR_ERR(pkt_reformat));
970 return PTR_ERR(pkt_reformat);
971 }
972
973 vlan->pkt_reformat_pop = pkt_reformat;
974 return 0;
975}
976
977static void
978mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
979{
980 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
981 vlan->pkt_reformat_pop = NULL;
982}
983
984static int
985mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
986{
987 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
988 struct mlx5_modify_hdr *pkt_mod_hdr;
989
990 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
991 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
992 MLX5_SET(set_action_in, action, offset, 8);
993 MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
994 MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
995
996 pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
997 if (IS_ERR(pkt_mod_hdr))
998 return PTR_ERR(pkt_mod_hdr);
999
1000 vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1001 return 0;
1002}
1003
1004static void
1005mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1006{
1007 mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1008 vlan->pkt_mod_hdr_push_mark = NULL;
1009}
1010
1011static struct mlx5_esw_bridge_vlan *
1012mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1013 struct mlx5_eswitch *esw)
1014{
1015 struct mlx5_esw_bridge_vlan *vlan;
1016 int err;
1017
1018 vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1019 if (!vlan)
1020 return ERR_PTR(-ENOMEM);
1021
1022 vlan->vid = vid;
1023 vlan->flags = flags;
1024 INIT_LIST_HEAD(&vlan->fdb_list);
1025
1026 if (flags & BRIDGE_VLAN_INFO_PVID) {
1027 err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
1028 if (err)
1029 goto err_vlan_push;
1030
1031 err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1032 if (err)
1033 goto err_vlan_push_mark;
1034 }
1035 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1036 err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1037 if (err)
1038 goto err_vlan_pop;
1039 }
1040
1041 err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1042 if (err)
1043 goto err_xa_insert;
1044
1045 trace_mlx5_esw_bridge_vlan_create(vlan);
1046 return vlan;
1047
1048err_xa_insert:
1049 if (vlan->pkt_reformat_pop)
1050 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1051err_vlan_pop:
1052 if (vlan->pkt_mod_hdr_push_mark)
1053 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1054err_vlan_push_mark:
1055 if (vlan->pkt_reformat_push)
1056 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1057err_vlan_push:
1058 kvfree(vlan);
1059 return ERR_PTR(err);
1060}
1061
1062static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1063 struct mlx5_esw_bridge_vlan *vlan)
1064{
1065 xa_erase(&port->vlans, vlan->vid);
1066}
1067
1068static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
1069 struct mlx5_esw_bridge *bridge)
1070{
1071 struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1072 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1073
1074 list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1075 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1076
1077 if (vlan->pkt_reformat_pop)
1078 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1079 if (vlan->pkt_mod_hdr_push_mark)
1080 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1081 if (vlan->pkt_reformat_push)
1082 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1083}
1084
1085static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1086 struct mlx5_esw_bridge_vlan *vlan,
1087 struct mlx5_esw_bridge *bridge)
1088{
1089 trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1090 mlx5_esw_bridge_vlan_flush(vlan, bridge);
1091 mlx5_esw_bridge_vlan_erase(port, vlan);
1092 kvfree(vlan);
1093}
1094
1095static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1096 struct mlx5_esw_bridge *bridge)
1097{
1098 struct mlx5_esw_bridge_vlan *vlan;
1099 unsigned long index;
1100
1101 xa_for_each(&port->vlans, index, vlan)
1102 mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1103}
1104
1105static struct mlx5_esw_bridge_vlan *
1106mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1107 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1108{
1109 struct mlx5_esw_bridge_port *port;
1110 struct mlx5_esw_bridge_vlan *vlan;
1111
1112 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1113 if (!port) {
1114
1115
1116
1117 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1118 return ERR_PTR(-EINVAL);
1119 }
1120
1121 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1122 if (!vlan) {
1123
1124
1125
1126 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1127 vport_num);
1128 return ERR_PTR(-EINVAL);
1129 }
1130
1131 return vlan;
1132}
1133
1134static struct mlx5_esw_bridge_fdb_entry *
1135mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1136 const unsigned char *addr, u16 vid)
1137{
1138 struct mlx5_esw_bridge_fdb_key key = {};
1139
1140 ether_addr_copy(key.addr, addr);
1141 key.vid = vid;
1142 return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1143}
1144
1145static struct mlx5_esw_bridge_fdb_entry *
1146mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1147 const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1148 struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1149{
1150 struct mlx5_esw_bridge_vlan *vlan = NULL;
1151 struct mlx5_esw_bridge_fdb_entry *entry;
1152 struct mlx5_flow_handle *handle;
1153 struct mlx5_fc *counter;
1154 int err;
1155
1156 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1157 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1158 esw);
1159 if (IS_ERR(vlan))
1160 return ERR_CAST(vlan);
1161 }
1162
1163 entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1164 if (entry)
1165 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1166
1167 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1168 if (!entry)
1169 return ERR_PTR(-ENOMEM);
1170
1171 ether_addr_copy(entry->key.addr, addr);
1172 entry->key.vid = vid;
1173 entry->dev = dev;
1174 entry->vport_num = vport_num;
1175 entry->esw_owner_vhca_id = esw_owner_vhca_id;
1176 entry->lastuse = jiffies;
1177 if (added_by_user)
1178 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1179 if (peer)
1180 entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1181
1182 counter = mlx5_fc_create(esw->dev, true);
1183 if (IS_ERR(counter)) {
1184 err = PTR_ERR(counter);
1185 goto err_ingress_fc_create;
1186 }
1187 entry->ingress_counter = counter;
1188
1189 handle = peer ?
1190 mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
1191 mlx5_fc_id(counter), bridge) :
1192 mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1193 mlx5_fc_id(counter), bridge);
1194 if (IS_ERR(handle)) {
1195 err = PTR_ERR(handle);
1196 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
1197 vport_num, err);
1198 goto err_ingress_flow_create;
1199 }
1200 entry->ingress_handle = handle;
1201
1202 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1203 handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1204 if (IS_ERR(handle)) {
1205 err = PTR_ERR(handle);
1206 esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1207 vport_num, err);
1208 goto err_ingress_filter_flow_create;
1209 }
1210 entry->filter_handle = handle;
1211 }
1212
1213 handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1214 bridge);
1215 if (IS_ERR(handle)) {
1216 err = PTR_ERR(handle);
1217 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1218 vport_num, err);
1219 goto err_egress_flow_create;
1220 }
1221 entry->egress_handle = handle;
1222
1223 err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1224 if (err) {
1225 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1226 goto err_ht_init;
1227 }
1228
1229 if (vlan)
1230 list_add(&entry->vlan_list, &vlan->fdb_list);
1231 else
1232 INIT_LIST_HEAD(&entry->vlan_list);
1233 list_add(&entry->list, &bridge->fdb_list);
1234
1235 trace_mlx5_esw_bridge_fdb_entry_init(entry);
1236 return entry;
1237
1238err_ht_init:
1239 mlx5_del_flow_rules(entry->egress_handle);
1240err_egress_flow_create:
1241 if (entry->filter_handle)
1242 mlx5_del_flow_rules(entry->filter_handle);
1243err_ingress_filter_flow_create:
1244 mlx5_del_flow_rules(entry->ingress_handle);
1245err_ingress_flow_create:
1246 mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1247err_ingress_fc_create:
1248 kvfree(entry);
1249 return ERR_PTR(err);
1250}
1251
1252int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1253 struct mlx5_esw_bridge_offloads *br_offloads)
1254{
1255 struct mlx5_esw_bridge_port *port;
1256
1257 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1258 if (!port)
1259 return -EINVAL;
1260
1261 port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1262 return 0;
1263}
1264
1265int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1266 struct mlx5_esw_bridge_offloads *br_offloads)
1267{
1268 struct mlx5_esw_bridge_port *port;
1269 struct mlx5_esw_bridge *bridge;
1270 bool filtering;
1271
1272 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1273 if (!port)
1274 return -EINVAL;
1275
1276 bridge = port->bridge;
1277 filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1278 if (filtering == enable)
1279 return 0;
1280
1281 mlx5_esw_bridge_fdb_flush(bridge);
1282 if (enable)
1283 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1284 else
1285 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1286
1287 return 0;
1288}
1289
1290static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1291 struct mlx5_esw_bridge_offloads *br_offloads,
1292 struct mlx5_esw_bridge *bridge)
1293{
1294 struct mlx5_eswitch *esw = br_offloads->esw;
1295 struct mlx5_esw_bridge_port *port;
1296 int err;
1297
1298 port = kvzalloc(sizeof(*port), GFP_KERNEL);
1299 if (!port)
1300 return -ENOMEM;
1301
1302 port->vport_num = vport_num;
1303 port->esw_owner_vhca_id = esw_owner_vhca_id;
1304 port->bridge = bridge;
1305 port->flags |= flags;
1306 xa_init(&port->vlans);
1307 err = mlx5_esw_bridge_port_insert(port, br_offloads);
1308 if (err) {
1309 esw_warn(esw->dev,
1310 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1311 port->vport_num, port->esw_owner_vhca_id, err);
1312 goto err_port_insert;
1313 }
1314 trace_mlx5_esw_bridge_vport_init(port);
1315
1316 return 0;
1317
1318err_port_insert:
1319 kvfree(port);
1320 return err;
1321}
1322
1323static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1324 struct mlx5_esw_bridge_port *port)
1325{
1326 u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1327 struct mlx5_esw_bridge *bridge = port->bridge;
1328 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1329
1330 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1331 if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1332 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1333
1334 trace_mlx5_esw_bridge_vport_cleanup(port);
1335 mlx5_esw_bridge_port_vlans_flush(port, bridge);
1336 mlx5_esw_bridge_port_erase(port, br_offloads);
1337 kvfree(port);
1338 mlx5_esw_bridge_put(br_offloads, bridge);
1339 return 0;
1340}
1341
1342static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1343 u16 flags,
1344 struct mlx5_esw_bridge_offloads *br_offloads,
1345 struct netlink_ext_ack *extack)
1346{
1347 struct mlx5_esw_bridge *bridge;
1348 int err;
1349
1350 bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1351 if (IS_ERR(bridge)) {
1352 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1353 return PTR_ERR(bridge);
1354 }
1355
1356 err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1357 if (err) {
1358 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1359 goto err_vport;
1360 }
1361 return 0;
1362
1363err_vport:
1364 mlx5_esw_bridge_put(br_offloads, bridge);
1365 return err;
1366}
1367
1368int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1369 struct mlx5_esw_bridge_offloads *br_offloads,
1370 struct netlink_ext_ack *extack)
1371{
1372 return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
1373 br_offloads, extack);
1374}
1375
1376int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1377 struct mlx5_esw_bridge_offloads *br_offloads,
1378 struct netlink_ext_ack *extack)
1379{
1380 struct mlx5_esw_bridge_port *port;
1381 int err;
1382
1383 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1384 if (!port) {
1385 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1386 return -EINVAL;
1387 }
1388 if (port->bridge->ifindex != ifindex) {
1389 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1390 return -EINVAL;
1391 }
1392
1393 err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1394 if (err)
1395 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1396 return err;
1397}
1398
1399int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1400 struct mlx5_esw_bridge_offloads *br_offloads,
1401 struct netlink_ext_ack *extack)
1402{
1403 if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1404 return 0;
1405
1406 return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
1407 MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1408 br_offloads, extack);
1409}
1410
1411int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1412 struct mlx5_esw_bridge_offloads *br_offloads,
1413 struct netlink_ext_ack *extack)
1414{
1415 return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
1416 extack);
1417}
1418
1419int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1420 struct mlx5_esw_bridge_offloads *br_offloads,
1421 struct netlink_ext_ack *extack)
1422{
1423 struct mlx5_esw_bridge_port *port;
1424 struct mlx5_esw_bridge_vlan *vlan;
1425
1426 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1427 if (!port)
1428 return -EINVAL;
1429
1430 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1431 if (vlan) {
1432 if (vlan->flags == flags)
1433 return 0;
1434 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1435 }
1436
1437 vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
1438 if (IS_ERR(vlan)) {
1439 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1440 return PTR_ERR(vlan);
1441 }
1442 return 0;
1443}
1444
1445void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1446 struct mlx5_esw_bridge_offloads *br_offloads)
1447{
1448 struct mlx5_esw_bridge_port *port;
1449 struct mlx5_esw_bridge_vlan *vlan;
1450
1451 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1452 if (!port)
1453 return;
1454
1455 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1456 if (!vlan)
1457 return;
1458 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1459}
1460
1461void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1462 struct mlx5_esw_bridge_offloads *br_offloads,
1463 struct switchdev_notifier_fdb_info *fdb_info)
1464{
1465 struct mlx5_esw_bridge_fdb_entry *entry;
1466 struct mlx5_esw_bridge_port *port;
1467 struct mlx5_esw_bridge *bridge;
1468
1469 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1470 if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
1471 return;
1472
1473 bridge = port->bridge;
1474 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1475 if (!entry) {
1476 esw_debug(br_offloads->esw->dev,
1477 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1478 fdb_info->addr, fdb_info->vid, vport_num);
1479 return;
1480 }
1481
1482 entry->lastuse = jiffies;
1483}
1484
1485void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1486 struct mlx5_esw_bridge_offloads *br_offloads,
1487 struct switchdev_notifier_fdb_info *fdb_info)
1488{
1489 struct mlx5_esw_bridge_fdb_entry *entry;
1490 struct mlx5_esw_bridge_port *port;
1491 struct mlx5_esw_bridge *bridge;
1492
1493 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1494 if (!port)
1495 return;
1496
1497 bridge = port->bridge;
1498 entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1499 fdb_info->vid, fdb_info->added_by_user,
1500 port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1501 br_offloads->esw, bridge);
1502 if (IS_ERR(entry))
1503 return;
1504
1505 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1506 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1507 SWITCHDEV_FDB_OFFLOADED);
1508 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1509
1510 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1511 SWITCHDEV_FDB_ADD_TO_BRIDGE);
1512}
1513
1514void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1515 struct mlx5_esw_bridge_offloads *br_offloads,
1516 struct switchdev_notifier_fdb_info *fdb_info)
1517{
1518 struct mlx5_eswitch *esw = br_offloads->esw;
1519 struct mlx5_esw_bridge_fdb_entry *entry;
1520 struct mlx5_esw_bridge_port *port;
1521 struct mlx5_esw_bridge *bridge;
1522
1523 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1524 if (!port)
1525 return;
1526
1527 bridge = port->bridge;
1528 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1529 if (!entry) {
1530 esw_warn(esw->dev,
1531 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1532 fdb_info->addr, fdb_info->vid, vport_num);
1533 return;
1534 }
1535
1536 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1537}
1538
1539void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1540{
1541 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1542 struct mlx5_esw_bridge *bridge;
1543
1544 list_for_each_entry(bridge, &br_offloads->bridges, list) {
1545 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1546 unsigned long lastuse =
1547 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1548
1549 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1550 continue;
1551
1552 if (time_after(lastuse, entry->lastuse))
1553 mlx5_esw_bridge_fdb_entry_refresh(entry);
1554 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1555 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1556 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1557 }
1558 }
1559}
1560
1561static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1562{
1563 struct mlx5_esw_bridge_port *port;
1564 unsigned long i;
1565
1566 xa_for_each(&br_offloads->ports, i, port)
1567 mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1568
1569 WARN_ONCE(!list_empty(&br_offloads->bridges),
1570 "Cleaning up bridge offloads while still having bridges attached\n");
1571}
1572
1573struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1574{
1575 struct mlx5_esw_bridge_offloads *br_offloads;
1576
1577 ASSERT_RTNL();
1578
1579 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1580 if (!br_offloads)
1581 return ERR_PTR(-ENOMEM);
1582
1583 INIT_LIST_HEAD(&br_offloads->bridges);
1584 xa_init(&br_offloads->ports);
1585 br_offloads->esw = esw;
1586 esw->br_offloads = br_offloads;
1587
1588 return br_offloads;
1589}
1590
1591void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1592{
1593 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1594
1595 ASSERT_RTNL();
1596
1597 if (!br_offloads)
1598 return;
1599
1600 mlx5_esw_bridge_flush(br_offloads);
1601 WARN_ON(!xa_empty(&br_offloads->ports));
1602
1603 esw->br_offloads = NULL;
1604 kvfree(br_offloads);
1605}
1606