1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/list.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37#include <linux/mlx5/fs.h>
38#include "en.h"
39
40static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
44
45enum {
46 MLX5E_FULLMATCH = 0,
47 MLX5E_ALLMULTI = 1,
48 MLX5E_PROMISC = 2,
49};
50
51enum {
52 MLX5E_UC = 0,
53 MLX5E_MC_IPV4 = 1,
54 MLX5E_MC_IPV6 = 2,
55 MLX5E_MC_OTHER = 3,
56};
57
58enum {
59 MLX5E_ACTION_NONE = 0,
60 MLX5E_ACTION_ADD = 1,
61 MLX5E_ACTION_DEL = 2,
62};
63
64struct mlx5e_l2_hash_node {
65 struct hlist_node hlist;
66 u8 action;
67 struct mlx5e_l2_rule ai;
68};
69
70static inline int mlx5e_hash_l2(u8 *addr)
71{
72 return addr[5];
73}
74
75static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
76{
77 struct mlx5e_l2_hash_node *hn;
78 int ix = mlx5e_hash_l2(addr);
79 int found = 0;
80
81 hlist_for_each_entry(hn, &hash[ix], hlist)
82 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
83 found = 1;
84 break;
85 }
86
87 if (found) {
88 hn->action = MLX5E_ACTION_NONE;
89 return;
90 }
91
92 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
93 if (!hn)
94 return;
95
96 ether_addr_copy(hn->ai.addr, addr);
97 hn->action = MLX5E_ACTION_ADD;
98
99 hlist_add_head(&hn->hlist, &hash[ix]);
100}
101
102static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
103{
104 hlist_del(&hn->hlist);
105 kfree(hn);
106}
107
108static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
109{
110 struct net_device *ndev = priv->netdev;
111 int max_list_size;
112 int list_size;
113 u16 *vlans;
114 int vlan;
115 int err;
116 int i;
117
118 list_size = 0;
119 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
120 list_size++;
121
122 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
123
124 if (list_size > max_list_size) {
125 netdev_warn(ndev,
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size, max_list_size);
128 list_size = max_list_size;
129 }
130
131 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
132 if (!vlans)
133 return -ENOMEM;
134
135 i = 0;
136 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
137 if (i >= list_size)
138 break;
139 vlans[i++] = vlan;
140 }
141
142 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
143 if (err)
144 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
145 err);
146
147 kfree(vlans);
148 return err;
149}
150
151enum mlx5e_vlan_rule_type {
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
153 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
154 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
155 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
156};
157
158static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
159 enum mlx5e_vlan_rule_type rule_type,
160 u16 vid, struct mlx5_flow_spec *spec)
161{
162 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
163 struct mlx5_flow_destination dest;
164 struct mlx5_flow_handle **rule_p;
165 MLX5_DECLARE_FLOW_ACT(flow_act);
166 int err = 0;
167
168 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
169 dest.ft = priv->fs.l2.ft.t;
170
171 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
172
173
174 switch (rule_type) {
175 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
176 rule_p = &priv->fs.vlan.untagged_rule;
177 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
178 outer_headers.cvlan_tag);
179 break;
180 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
181 rule_p = &priv->fs.vlan.any_cvlan_rule;
182 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
183 outer_headers.cvlan_tag);
184 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
187 rule_p = &priv->fs.vlan.any_svlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.svlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
191 break;
192 default:
193 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.cvlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
197 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
198 outer_headers.first_vid);
199 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
200 vid);
201 break;
202 }
203
204 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
205
206 if (IS_ERR(*rule_p)) {
207 err = PTR_ERR(*rule_p);
208 *rule_p = NULL;
209 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
210 }
211
212 return err;
213}
214
215static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
216 enum mlx5e_vlan_rule_type rule_type, u16 vid)
217{
218 struct mlx5_flow_spec *spec;
219 int err = 0;
220
221 spec = mlx5_vzalloc(sizeof(*spec));
222 if (!spec) {
223 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
224 return -ENOMEM;
225 }
226
227 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
228 mlx5e_vport_context_update_vlans(priv);
229
230 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
231
232 kvfree(spec);
233
234 return err;
235}
236
237static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
238 enum mlx5e_vlan_rule_type rule_type, u16 vid)
239{
240 switch (rule_type) {
241 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
242 if (priv->fs.vlan.untagged_rule) {
243 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
244 priv->fs.vlan.untagged_rule = NULL;
245 }
246 break;
247 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
248 if (priv->fs.vlan.any_cvlan_rule) {
249 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
250 priv->fs.vlan.any_cvlan_rule = NULL;
251 }
252 break;
253 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
254 if (priv->fs.vlan.any_svlan_rule) {
255 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
256 priv->fs.vlan.any_svlan_rule = NULL;
257 }
258 break;
259 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
260 mlx5e_vport_context_update_vlans(priv);
261 if (priv->fs.vlan.active_vlans_rule[vid]) {
262 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
263 priv->fs.vlan.active_vlans_rule[vid] = NULL;
264 }
265 mlx5e_vport_context_update_vlans(priv);
266 break;
267 }
268}
269
270static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
271{
272 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
273 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
274}
275
276static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
277{
278 int err;
279
280 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
281 if (err)
282 return err;
283
284 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
285}
286
287void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
288{
289 if (!priv->fs.vlan.filter_disabled)
290 return;
291
292 priv->fs.vlan.filter_disabled = false;
293 if (priv->netdev->flags & IFF_PROMISC)
294 return;
295 mlx5e_del_any_vid_rules(priv);
296}
297
298void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
299{
300 if (priv->fs.vlan.filter_disabled)
301 return;
302
303 priv->fs.vlan.filter_disabled = true;
304 if (priv->netdev->flags & IFF_PROMISC)
305 return;
306 mlx5e_add_any_vid_rules(priv);
307}
308
309int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
310 u16 vid)
311{
312 struct mlx5e_priv *priv = netdev_priv(dev);
313
314 set_bit(vid, priv->fs.vlan.active_vlans);
315
316 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
317}
318
319int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
320 u16 vid)
321{
322 struct mlx5e_priv *priv = netdev_priv(dev);
323
324 clear_bit(vid, priv->fs.vlan.active_vlans);
325
326 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
327
328 return 0;
329}
330
331static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
332{
333 int i;
334
335 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
336
337 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
338 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
339 }
340
341 if (priv->fs.vlan.filter_disabled &&
342 !(priv->netdev->flags & IFF_PROMISC))
343 mlx5e_add_any_vid_rules(priv);
344}
345
346static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
347{
348 int i;
349
350 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
351
352 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
353 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
354 }
355
356 if (priv->fs.vlan.filter_disabled &&
357 !(priv->netdev->flags & IFF_PROMISC))
358 mlx5e_del_any_vid_rules(priv);
359}
360
361#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
362 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
363 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
364
365static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
366 struct mlx5e_l2_hash_node *hn)
367{
368 switch (hn->action) {
369 case MLX5E_ACTION_ADD:
370 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
371 hn->action = MLX5E_ACTION_NONE;
372 break;
373
374 case MLX5E_ACTION_DEL:
375 mlx5e_del_l2_flow_rule(priv, &hn->ai);
376 mlx5e_del_l2_from_hash(hn);
377 break;
378 }
379}
380
381static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
382{
383 struct net_device *netdev = priv->netdev;
384 struct netdev_hw_addr *ha;
385
386 netif_addr_lock_bh(netdev);
387
388 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
389 priv->netdev->dev_addr);
390
391 netdev_for_each_uc_addr(ha, netdev)
392 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
393
394 netdev_for_each_mc_addr(ha, netdev)
395 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
396
397 netif_addr_unlock_bh(netdev);
398}
399
400static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
401 u8 addr_array[][ETH_ALEN], int size)
402{
403 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
404 struct net_device *ndev = priv->netdev;
405 struct mlx5e_l2_hash_node *hn;
406 struct hlist_head *addr_list;
407 struct hlist_node *tmp;
408 int i = 0;
409 int hi;
410
411 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
412
413 if (is_uc)
414 ether_addr_copy(addr_array[i++], ndev->dev_addr);
415 else if (priv->fs.l2.broadcast_enabled)
416 ether_addr_copy(addr_array[i++], ndev->broadcast);
417
418 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
419 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
420 continue;
421 if (i >= size)
422 break;
423 ether_addr_copy(addr_array[i++], hn->ai.addr);
424 }
425}
426
427static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
428 int list_type)
429{
430 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
431 struct mlx5e_l2_hash_node *hn;
432 u8 (*addr_array)[ETH_ALEN] = NULL;
433 struct hlist_head *addr_list;
434 struct hlist_node *tmp;
435 int max_size;
436 int size;
437 int err;
438 int hi;
439
440 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
441 max_size = is_uc ?
442 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
443 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
444
445 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
446 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
447 size++;
448
449 if (size > max_size) {
450 netdev_warn(priv->netdev,
451 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
452 is_uc ? "UC" : "MC", size, max_size);
453 size = max_size;
454 }
455
456 if (size) {
457 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
458 if (!addr_array) {
459 err = -ENOMEM;
460 goto out;
461 }
462 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
463 }
464
465 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
466out:
467 if (err)
468 netdev_err(priv->netdev,
469 "Failed to modify vport %s list err(%d)\n",
470 is_uc ? "UC" : "MC", err);
471 kfree(addr_array);
472}
473
474static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
475{
476 struct mlx5e_l2_table *ea = &priv->fs.l2;
477
478 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
479 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
480 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
481 ea->allmulti_enabled,
482 ea->promisc_enabled);
483}
484
485static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
486{
487 struct mlx5e_l2_hash_node *hn;
488 struct hlist_node *tmp;
489 int i;
490
491 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
492 mlx5e_execute_l2_action(priv, hn);
493
494 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
495 mlx5e_execute_l2_action(priv, hn);
496}
497
498static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
499{
500 struct mlx5e_l2_hash_node *hn;
501 struct hlist_node *tmp;
502 int i;
503
504 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
505 hn->action = MLX5E_ACTION_DEL;
506 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
507 hn->action = MLX5E_ACTION_DEL;
508
509 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
510 mlx5e_sync_netdev_addr(priv);
511
512 mlx5e_apply_netdev_addr(priv);
513}
514
515void mlx5e_set_rx_mode_work(struct work_struct *work)
516{
517 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
518 set_rx_mode_work);
519
520 struct mlx5e_l2_table *ea = &priv->fs.l2;
521 struct net_device *ndev = priv->netdev;
522
523 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
524 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
525 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
526 bool broadcast_enabled = rx_mode_enable;
527
528 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
529 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
530 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
531 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
532 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
533 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
534
535 if (enable_promisc) {
536 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
537 if (!priv->fs.vlan.filter_disabled)
538 mlx5e_add_any_vid_rules(priv);
539 }
540 if (enable_allmulti)
541 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
542 if (enable_broadcast)
543 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
544
545 mlx5e_handle_netdev_addr(priv);
546
547 if (disable_broadcast)
548 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
549 if (disable_allmulti)
550 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
551 if (disable_promisc) {
552 if (!priv->fs.vlan.filter_disabled)
553 mlx5e_del_any_vid_rules(priv);
554 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
555 }
556
557 ea->promisc_enabled = promisc_enabled;
558 ea->allmulti_enabled = allmulti_enabled;
559 ea->broadcast_enabled = broadcast_enabled;
560
561 mlx5e_vport_context_update(priv);
562}
563
564static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
565{
566 int i;
567
568 for (i = ft->num_groups - 1; i >= 0; i--) {
569 if (!IS_ERR_OR_NULL(ft->g[i]))
570 mlx5_destroy_flow_group(ft->g[i]);
571 ft->g[i] = NULL;
572 }
573 ft->num_groups = 0;
574}
575
576void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
577{
578 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
579}
580
581void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
582{
583 mlx5e_destroy_groups(ft);
584 kfree(ft->g);
585 mlx5_destroy_flow_table(ft->t);
586 ft->t = NULL;
587}
588
589static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
590{
591 int i;
592
593 for (i = 0; i < MLX5E_NUM_TT; i++) {
594 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
595 mlx5_del_flow_rules(ttc->rules[i]);
596 ttc->rules[i] = NULL;
597 }
598 }
599}
600
601static struct {
602 u16 etype;
603 u8 proto;
604} ttc_rules[] = {
605 [MLX5E_TT_IPV4_TCP] = {
606 .etype = ETH_P_IP,
607 .proto = IPPROTO_TCP,
608 },
609 [MLX5E_TT_IPV6_TCP] = {
610 .etype = ETH_P_IPV6,
611 .proto = IPPROTO_TCP,
612 },
613 [MLX5E_TT_IPV4_UDP] = {
614 .etype = ETH_P_IP,
615 .proto = IPPROTO_UDP,
616 },
617 [MLX5E_TT_IPV6_UDP] = {
618 .etype = ETH_P_IPV6,
619 .proto = IPPROTO_UDP,
620 },
621 [MLX5E_TT_IPV4_IPSEC_AH] = {
622 .etype = ETH_P_IP,
623 .proto = IPPROTO_AH,
624 },
625 [MLX5E_TT_IPV6_IPSEC_AH] = {
626 .etype = ETH_P_IPV6,
627 .proto = IPPROTO_AH,
628 },
629 [MLX5E_TT_IPV4_IPSEC_ESP] = {
630 .etype = ETH_P_IP,
631 .proto = IPPROTO_ESP,
632 },
633 [MLX5E_TT_IPV6_IPSEC_ESP] = {
634 .etype = ETH_P_IPV6,
635 .proto = IPPROTO_ESP,
636 },
637 [MLX5E_TT_IPV4] = {
638 .etype = ETH_P_IP,
639 .proto = 0,
640 },
641 [MLX5E_TT_IPV6] = {
642 .etype = ETH_P_IPV6,
643 .proto = 0,
644 },
645 [MLX5E_TT_ANY] = {
646 .etype = 0,
647 .proto = 0,
648 },
649};
650
651static struct mlx5_flow_handle *
652mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
653 struct mlx5_flow_table *ft,
654 struct mlx5_flow_destination *dest,
655 u16 etype,
656 u8 proto)
657{
658 MLX5_DECLARE_FLOW_ACT(flow_act);
659 struct mlx5_flow_handle *rule;
660 struct mlx5_flow_spec *spec;
661 int err = 0;
662
663 spec = mlx5_vzalloc(sizeof(*spec));
664 if (!spec) {
665 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
666 return ERR_PTR(-ENOMEM);
667 }
668
669 if (proto) {
670 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
671 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
672 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
673 }
674 if (etype) {
675 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
676 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
677 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
678 }
679
680 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
681 if (IS_ERR(rule)) {
682 err = PTR_ERR(rule);
683 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
684 }
685
686 kvfree(spec);
687 return err ? ERR_PTR(err) : rule;
688}
689
690static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
691{
692 struct mlx5_flow_destination dest;
693 struct mlx5e_ttc_table *ttc;
694 struct mlx5_flow_handle **rules;
695 struct mlx5_flow_table *ft;
696 int tt;
697 int err;
698
699 ttc = &priv->fs.ttc;
700 ft = ttc->ft.t;
701 rules = ttc->rules;
702
703 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
704 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
705 if (tt == MLX5E_TT_ANY)
706 dest.tir_num = priv->direct_tir[0].tirn;
707 else
708 dest.tir_num = priv->indir_tir[tt].tirn;
709 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
710 ttc_rules[tt].etype,
711 ttc_rules[tt].proto);
712 if (IS_ERR(rules[tt]))
713 goto del_rules;
714 }
715
716 return 0;
717
718del_rules:
719 err = PTR_ERR(rules[tt]);
720 rules[tt] = NULL;
721 mlx5e_cleanup_ttc_rules(ttc);
722 return err;
723}
724
725#define MLX5E_TTC_NUM_GROUPS 3
726#define MLX5E_TTC_GROUP1_SIZE BIT(3)
727#define MLX5E_TTC_GROUP2_SIZE BIT(1)
728#define MLX5E_TTC_GROUP3_SIZE BIT(0)
729#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
730 MLX5E_TTC_GROUP2_SIZE +\
731 MLX5E_TTC_GROUP3_SIZE)
732static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
733{
734 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
735 struct mlx5e_flow_table *ft = &ttc->ft;
736 int ix = 0;
737 u32 *in;
738 int err;
739 u8 *mc;
740
741 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
742 sizeof(*ft->g), GFP_KERNEL);
743 if (!ft->g)
744 return -ENOMEM;
745 in = mlx5_vzalloc(inlen);
746 if (!in) {
747 kfree(ft->g);
748 return -ENOMEM;
749 }
750
751
752 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
753 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
754 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
755 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
756 MLX5_SET_CFG(in, start_flow_index, ix);
757 ix += MLX5E_TTC_GROUP1_SIZE;
758 MLX5_SET_CFG(in, end_flow_index, ix - 1);
759 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
760 if (IS_ERR(ft->g[ft->num_groups]))
761 goto err;
762 ft->num_groups++;
763
764
765 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
766 MLX5_SET_CFG(in, start_flow_index, ix);
767 ix += MLX5E_TTC_GROUP2_SIZE;
768 MLX5_SET_CFG(in, end_flow_index, ix - 1);
769 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
770 if (IS_ERR(ft->g[ft->num_groups]))
771 goto err;
772 ft->num_groups++;
773
774
775 memset(in, 0, inlen);
776 MLX5_SET_CFG(in, start_flow_index, ix);
777 ix += MLX5E_TTC_GROUP3_SIZE;
778 MLX5_SET_CFG(in, end_flow_index, ix - 1);
779 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
780 if (IS_ERR(ft->g[ft->num_groups]))
781 goto err;
782 ft->num_groups++;
783
784 kvfree(in);
785 return 0;
786
787err:
788 err = PTR_ERR(ft->g[ft->num_groups]);
789 ft->g[ft->num_groups] = NULL;
790 kvfree(in);
791
792 return err;
793}
794
795void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
796{
797 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
798
799 mlx5e_cleanup_ttc_rules(ttc);
800 mlx5e_destroy_flow_table(&ttc->ft);
801}
802
803int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
804{
805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
806 struct mlx5_flow_table_attr ft_attr = {};
807 struct mlx5e_flow_table *ft = &ttc->ft;
808 int err;
809
810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
811 ft_attr.level = MLX5E_TTC_FT_LEVEL;
812 ft_attr.prio = MLX5E_NIC_PRIO;
813
814 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
815 if (IS_ERR(ft->t)) {
816 err = PTR_ERR(ft->t);
817 ft->t = NULL;
818 return err;
819 }
820
821 err = mlx5e_create_ttc_table_groups(ttc);
822 if (err)
823 goto err;
824
825 err = mlx5e_generate_ttc_table_rules(priv);
826 if (err)
827 goto err;
828
829 return 0;
830err:
831 mlx5e_destroy_flow_table(ft);
832 return err;
833}
834
835static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
836 struct mlx5e_l2_rule *ai)
837{
838 if (!IS_ERR_OR_NULL(ai->rule)) {
839 mlx5_del_flow_rules(ai->rule);
840 ai->rule = NULL;
841 }
842}
843
844static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
845 struct mlx5e_l2_rule *ai, int type)
846{
847 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
848 struct mlx5_flow_destination dest;
849 MLX5_DECLARE_FLOW_ACT(flow_act);
850 struct mlx5_flow_spec *spec;
851 int err = 0;
852 u8 *mc_dmac;
853 u8 *mv_dmac;
854
855 spec = mlx5_vzalloc(sizeof(*spec));
856 if (!spec) {
857 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
858 return -ENOMEM;
859 }
860
861 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
862 outer_headers.dmac_47_16);
863 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
864 outer_headers.dmac_47_16);
865
866 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
867 dest.ft = priv->fs.ttc.ft.t;
868
869 switch (type) {
870 case MLX5E_FULLMATCH:
871 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
872 eth_broadcast_addr(mc_dmac);
873 ether_addr_copy(mv_dmac, ai->addr);
874 break;
875
876 case MLX5E_ALLMULTI:
877 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
878 mc_dmac[0] = 0x01;
879 mv_dmac[0] = 0x01;
880 break;
881
882 case MLX5E_PROMISC:
883 break;
884 }
885
886 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
887 if (IS_ERR(ai->rule)) {
888 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
889 __func__, mv_dmac);
890 err = PTR_ERR(ai->rule);
891 ai->rule = NULL;
892 }
893
894 kvfree(spec);
895
896 return err;
897}
898
899#define MLX5E_NUM_L2_GROUPS 3
900#define MLX5E_L2_GROUP1_SIZE BIT(0)
901#define MLX5E_L2_GROUP2_SIZE BIT(15)
902#define MLX5E_L2_GROUP3_SIZE BIT(0)
903#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
904 MLX5E_L2_GROUP2_SIZE +\
905 MLX5E_L2_GROUP3_SIZE)
906static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
907{
908 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
909 struct mlx5e_flow_table *ft = &l2_table->ft;
910 int ix = 0;
911 u8 *mc_dmac;
912 u32 *in;
913 int err;
914 u8 *mc;
915
916 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
917 if (!ft->g)
918 return -ENOMEM;
919 in = mlx5_vzalloc(inlen);
920 if (!in) {
921 kfree(ft->g);
922 return -ENOMEM;
923 }
924
925 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
926 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
927 outer_headers.dmac_47_16);
928
929 MLX5_SET_CFG(in, start_flow_index, ix);
930 ix += MLX5E_L2_GROUP1_SIZE;
931 MLX5_SET_CFG(in, end_flow_index, ix - 1);
932 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
933 if (IS_ERR(ft->g[ft->num_groups]))
934 goto err_destroy_groups;
935 ft->num_groups++;
936
937
938 eth_broadcast_addr(mc_dmac);
939 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
940 MLX5_SET_CFG(in, start_flow_index, ix);
941 ix += MLX5E_L2_GROUP2_SIZE;
942 MLX5_SET_CFG(in, end_flow_index, ix - 1);
943 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
944 if (IS_ERR(ft->g[ft->num_groups]))
945 goto err_destroy_groups;
946 ft->num_groups++;
947
948
949 eth_zero_addr(mc_dmac);
950 mc_dmac[0] = 0x01;
951 MLX5_SET_CFG(in, start_flow_index, ix);
952 ix += MLX5E_L2_GROUP3_SIZE;
953 MLX5_SET_CFG(in, end_flow_index, ix - 1);
954 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
955 if (IS_ERR(ft->g[ft->num_groups]))
956 goto err_destroy_groups;
957 ft->num_groups++;
958
959 kvfree(in);
960 return 0;
961
962err_destroy_groups:
963 err = PTR_ERR(ft->g[ft->num_groups]);
964 ft->g[ft->num_groups] = NULL;
965 mlx5e_destroy_groups(ft);
966 kvfree(in);
967
968 return err;
969}
970
971static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
972{
973 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
974}
975
976static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
977{
978 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
979 struct mlx5e_flow_table *ft = &l2_table->ft;
980 struct mlx5_flow_table_attr ft_attr = {};
981 int err;
982
983 ft->num_groups = 0;
984
985 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
986 ft_attr.level = MLX5E_L2_FT_LEVEL;
987 ft_attr.prio = MLX5E_NIC_PRIO;
988
989 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
990 if (IS_ERR(ft->t)) {
991 err = PTR_ERR(ft->t);
992 ft->t = NULL;
993 return err;
994 }
995
996 err = mlx5e_create_l2_table_groups(l2_table);
997 if (err)
998 goto err_destroy_flow_table;
999
1000 return 0;
1001
1002err_destroy_flow_table:
1003 mlx5_destroy_flow_table(ft->t);
1004 ft->t = NULL;
1005
1006 return err;
1007}
1008
1009#define MLX5E_NUM_VLAN_GROUPS 3
1010#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1011#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1012#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1013#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1014 MLX5E_VLAN_GROUP1_SIZE +\
1015 MLX5E_VLAN_GROUP2_SIZE)
1016
1017static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1018 int inlen)
1019{
1020 int err;
1021 int ix = 0;
1022 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1023
1024 memset(in, 0, inlen);
1025 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1026 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1027 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1028 MLX5_SET_CFG(in, start_flow_index, ix);
1029 ix += MLX5E_VLAN_GROUP0_SIZE;
1030 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1031 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1032 if (IS_ERR(ft->g[ft->num_groups]))
1033 goto err_destroy_groups;
1034 ft->num_groups++;
1035
1036 memset(in, 0, inlen);
1037 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1038 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1039 MLX5_SET_CFG(in, start_flow_index, ix);
1040 ix += MLX5E_VLAN_GROUP1_SIZE;
1041 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1042 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1043 if (IS_ERR(ft->g[ft->num_groups]))
1044 goto err_destroy_groups;
1045 ft->num_groups++;
1046
1047 memset(in, 0, inlen);
1048 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1049 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1050 MLX5_SET_CFG(in, start_flow_index, ix);
1051 ix += MLX5E_VLAN_GROUP2_SIZE;
1052 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1053 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1054 if (IS_ERR(ft->g[ft->num_groups]))
1055 goto err_destroy_groups;
1056 ft->num_groups++;
1057
1058 return 0;
1059
1060err_destroy_groups:
1061 err = PTR_ERR(ft->g[ft->num_groups]);
1062 ft->g[ft->num_groups] = NULL;
1063 mlx5e_destroy_groups(ft);
1064
1065 return err;
1066}
1067
1068static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1069{
1070 u32 *in;
1071 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1072 int err;
1073
1074 in = mlx5_vzalloc(inlen);
1075 if (!in)
1076 return -ENOMEM;
1077
1078 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1079
1080 kvfree(in);
1081 return err;
1082}
1083
1084static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1085{
1086 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1087 struct mlx5_flow_table_attr ft_attr = {};
1088 int err;
1089
1090 ft->num_groups = 0;
1091
1092 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1093 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1094 ft_attr.prio = MLX5E_NIC_PRIO;
1095
1096 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1097
1098 if (IS_ERR(ft->t)) {
1099 err = PTR_ERR(ft->t);
1100 ft->t = NULL;
1101 return err;
1102 }
1103 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1104 if (!ft->g) {
1105 err = -ENOMEM;
1106 goto err_destroy_vlan_table;
1107 }
1108
1109 err = mlx5e_create_vlan_table_groups(ft);
1110 if (err)
1111 goto err_free_g;
1112
1113 mlx5e_add_vlan_rules(priv);
1114
1115 return 0;
1116
1117err_free_g:
1118 kfree(ft->g);
1119err_destroy_vlan_table:
1120 mlx5_destroy_flow_table(ft->t);
1121 ft->t = NULL;
1122
1123 return err;
1124}
1125
1126static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1127{
1128 mlx5e_del_vlan_rules(priv);
1129 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1130}
1131
1132int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1133{
1134 int err;
1135
1136 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1137 MLX5_FLOW_NAMESPACE_KERNEL);
1138
1139 if (!priv->fs.ns)
1140 return -EOPNOTSUPP;
1141
1142 err = mlx5e_arfs_create_tables(priv);
1143 if (err) {
1144 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1145 err);
1146 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1147 }
1148
1149 err = mlx5e_create_ttc_table(priv);
1150 if (err) {
1151 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1152 err);
1153 goto err_destroy_arfs_tables;
1154 }
1155
1156 err = mlx5e_create_l2_table(priv);
1157 if (err) {
1158 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1159 err);
1160 goto err_destroy_ttc_table;
1161 }
1162
1163 err = mlx5e_create_vlan_table(priv);
1164 if (err) {
1165 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1166 err);
1167 goto err_destroy_l2_table;
1168 }
1169
1170 mlx5e_ethtool_init_steering(priv);
1171
1172 return 0;
1173
1174err_destroy_l2_table:
1175 mlx5e_destroy_l2_table(priv);
1176err_destroy_ttc_table:
1177 mlx5e_destroy_ttc_table(priv);
1178err_destroy_arfs_tables:
1179 mlx5e_arfs_destroy_tables(priv);
1180
1181 return err;
1182}
1183
1184void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1185{
1186 mlx5e_destroy_vlan_table(priv);
1187 mlx5e_destroy_l2_table(priv);
1188 mlx5e_destroy_ttc_table(priv);
1189 mlx5e_arfs_destroy_tables(priv);
1190 mlx5e_ethtool_cleanup_steering(priv);
1191}
1192