1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/list.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37#include <linux/mlx5/fs.h>
38#include "en.h"
39#include "lib/mpfs.h"
40
41static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50};
51
52enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57};
58
59enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63};
64
65struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70};
71
72static inline int mlx5e_hash_l2(u8 *addr)
73{
74 return addr[5];
75}
76
77static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78{
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102}
103
104static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105{
106 hlist_del(&hn->hlist);
107 kfree(hn);
108}
109
110static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111{
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151}
152
153enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
159};
160
161static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
164{
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
169 int err = 0;
170
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
173
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
175
176 switch (rule_type) {
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178
179
180
181
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
191 break;
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
197 break;
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
206 vid);
207 break;
208 default:
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
216 vid);
217 break;
218 }
219
220 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
221
222 if (IS_ERR(*rule_p)) {
223 err = PTR_ERR(*rule_p);
224 *rule_p = NULL;
225 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
226 }
227
228 return err;
229}
230
231static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
232 enum mlx5e_vlan_rule_type rule_type, u16 vid)
233{
234 struct mlx5_flow_spec *spec;
235 int err = 0;
236
237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
238 if (!spec)
239 return -ENOMEM;
240
241 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
242 mlx5e_vport_context_update_vlans(priv);
243
244 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
245
246 kvfree(spec);
247
248 return err;
249}
250
251static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
252 enum mlx5e_vlan_rule_type rule_type, u16 vid)
253{
254 switch (rule_type) {
255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
256 if (priv->fs.vlan.untagged_rule) {
257 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
258 priv->fs.vlan.untagged_rule = NULL;
259 }
260 break;
261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
262 if (priv->fs.vlan.any_cvlan_rule) {
263 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
264 priv->fs.vlan.any_cvlan_rule = NULL;
265 }
266 break;
267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
268 if (priv->fs.vlan.any_svlan_rule) {
269 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
270 priv->fs.vlan.any_svlan_rule = NULL;
271 }
272 break;
273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
274 if (priv->fs.vlan.active_svlans_rule[vid]) {
275 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
276 priv->fs.vlan.active_svlans_rule[vid] = NULL;
277 }
278 break;
279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
280 mlx5e_vport_context_update_vlans(priv);
281 if (priv->fs.vlan.active_cvlans_rule[vid]) {
282 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
283 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
284 }
285 mlx5e_vport_context_update_vlans(priv);
286 break;
287 }
288}
289
290static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
291{
292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
293 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
294}
295
296static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
297{
298 int err;
299
300 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
301 if (err)
302 return err;
303
304 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
305}
306
307void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
308{
309 if (!priv->fs.vlan.cvlan_filter_disabled)
310 return;
311
312 priv->fs.vlan.cvlan_filter_disabled = false;
313 if (priv->netdev->flags & IFF_PROMISC)
314 return;
315 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
316}
317
318void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
319{
320 if (priv->fs.vlan.cvlan_filter_disabled)
321 return;
322
323 priv->fs.vlan.cvlan_filter_disabled = true;
324 if (priv->netdev->flags & IFF_PROMISC)
325 return;
326 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
327}
328
329static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
330{
331 int err;
332
333 set_bit(vid, priv->fs.vlan.active_cvlans);
334
335 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
336 if (err)
337 clear_bit(vid, priv->fs.vlan.active_cvlans);
338
339 return err;
340}
341
342static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
343{
344 struct net_device *netdev = priv->netdev;
345 int err;
346
347 set_bit(vid, priv->fs.vlan.active_svlans);
348
349 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
350 if (err) {
351 clear_bit(vid, priv->fs.vlan.active_svlans);
352 return err;
353 }
354
355
356 netdev_update_features(netdev);
357 return err;
358}
359
360int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
361{
362 struct mlx5e_priv *priv = netdev_priv(dev);
363
364 if (be16_to_cpu(proto) == ETH_P_8021Q)
365 return mlx5e_vlan_rx_add_cvid(priv, vid);
366 else if (be16_to_cpu(proto) == ETH_P_8021AD)
367 return mlx5e_vlan_rx_add_svid(priv, vid);
368
369 return -EOPNOTSUPP;
370}
371
372int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
373{
374 struct mlx5e_priv *priv = netdev_priv(dev);
375
376 if (be16_to_cpu(proto) == ETH_P_8021Q) {
377 clear_bit(vid, priv->fs.vlan.active_cvlans);
378 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
379 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
380 clear_bit(vid, priv->fs.vlan.active_svlans);
381 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
382 netdev_update_features(dev);
383 }
384
385 return 0;
386}
387
388static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
389{
390 int i;
391
392 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
393
394 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
395 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
396 }
397
398 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
399 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
400
401 if (priv->fs.vlan.cvlan_filter_disabled &&
402 !(priv->netdev->flags & IFF_PROMISC))
403 mlx5e_add_any_vid_rules(priv);
404}
405
406static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
407{
408 int i;
409
410 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
411
412 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
413 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
414 }
415
416 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
417 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
418
419 if (priv->fs.vlan.cvlan_filter_disabled &&
420 !(priv->netdev->flags & IFF_PROMISC))
421 mlx5e_del_any_vid_rules(priv);
422}
423
424#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
425 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
426 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
427
428static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
429 struct mlx5e_l2_hash_node *hn)
430{
431 u8 action = hn->action;
432 u8 mac_addr[ETH_ALEN];
433 int l2_err = 0;
434
435 ether_addr_copy(mac_addr, hn->ai.addr);
436
437 switch (action) {
438 case MLX5E_ACTION_ADD:
439 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
440 if (!is_multicast_ether_addr(mac_addr)) {
441 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
442 hn->mpfs = !l2_err;
443 }
444 hn->action = MLX5E_ACTION_NONE;
445 break;
446
447 case MLX5E_ACTION_DEL:
448 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
449 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
450 mlx5e_del_l2_flow_rule(priv, &hn->ai);
451 mlx5e_del_l2_from_hash(hn);
452 break;
453 }
454
455 if (l2_err)
456 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
457 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
458}
459
460static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
461{
462 struct net_device *netdev = priv->netdev;
463 struct netdev_hw_addr *ha;
464
465 netif_addr_lock_bh(netdev);
466
467 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
468 priv->netdev->dev_addr);
469
470 netdev_for_each_uc_addr(ha, netdev)
471 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
472
473 netdev_for_each_mc_addr(ha, netdev)
474 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
475
476 netif_addr_unlock_bh(netdev);
477}
478
479static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
480 u8 addr_array[][ETH_ALEN], int size)
481{
482 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
483 struct net_device *ndev = priv->netdev;
484 struct mlx5e_l2_hash_node *hn;
485 struct hlist_head *addr_list;
486 struct hlist_node *tmp;
487 int i = 0;
488 int hi;
489
490 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
491
492 if (is_uc)
493 ether_addr_copy(addr_array[i++], ndev->dev_addr);
494 else if (priv->fs.l2.broadcast_enabled)
495 ether_addr_copy(addr_array[i++], ndev->broadcast);
496
497 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
498 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
499 continue;
500 if (i >= size)
501 break;
502 ether_addr_copy(addr_array[i++], hn->ai.addr);
503 }
504}
505
506static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
507 int list_type)
508{
509 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
510 struct mlx5e_l2_hash_node *hn;
511 u8 (*addr_array)[ETH_ALEN] = NULL;
512 struct hlist_head *addr_list;
513 struct hlist_node *tmp;
514 int max_size;
515 int size;
516 int err;
517 int hi;
518
519 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
520 max_size = is_uc ?
521 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
522 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
523
524 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
525 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
526 size++;
527
528 if (size > max_size) {
529 netdev_warn(priv->netdev,
530 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
531 is_uc ? "UC" : "MC", size, max_size);
532 size = max_size;
533 }
534
535 if (size) {
536 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
537 if (!addr_array) {
538 err = -ENOMEM;
539 goto out;
540 }
541 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
542 }
543
544 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
545out:
546 if (err)
547 netdev_err(priv->netdev,
548 "Failed to modify vport %s list err(%d)\n",
549 is_uc ? "UC" : "MC", err);
550 kfree(addr_array);
551}
552
553static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
554{
555 struct mlx5e_l2_table *ea = &priv->fs.l2;
556
557 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
558 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
559 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
560 ea->allmulti_enabled,
561 ea->promisc_enabled);
562}
563
564static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
565{
566 struct mlx5e_l2_hash_node *hn;
567 struct hlist_node *tmp;
568 int i;
569
570 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
571 mlx5e_execute_l2_action(priv, hn);
572
573 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
574 mlx5e_execute_l2_action(priv, hn);
575}
576
577static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
578{
579 struct mlx5e_l2_hash_node *hn;
580 struct hlist_node *tmp;
581 int i;
582
583 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
584 hn->action = MLX5E_ACTION_DEL;
585 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
586 hn->action = MLX5E_ACTION_DEL;
587
588 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
589 mlx5e_sync_netdev_addr(priv);
590
591 mlx5e_apply_netdev_addr(priv);
592}
593
594void mlx5e_set_rx_mode_work(struct work_struct *work)
595{
596 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
597 set_rx_mode_work);
598
599 struct mlx5e_l2_table *ea = &priv->fs.l2;
600 struct net_device *ndev = priv->netdev;
601
602 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
603 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
604 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
605 bool broadcast_enabled = rx_mode_enable;
606
607 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
608 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
609 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
610 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
611 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
612 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
613
614 if (enable_promisc) {
615 if (!priv->channels.params.vlan_strip_disable)
616 netdev_warn_once(ndev,
617 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
618 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
619 if (!priv->fs.vlan.cvlan_filter_disabled)
620 mlx5e_add_any_vid_rules(priv);
621 }
622 if (enable_allmulti)
623 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
624 if (enable_broadcast)
625 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
626
627 mlx5e_handle_netdev_addr(priv);
628
629 if (disable_broadcast)
630 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
631 if (disable_allmulti)
632 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
633 if (disable_promisc) {
634 if (!priv->fs.vlan.cvlan_filter_disabled)
635 mlx5e_del_any_vid_rules(priv);
636 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
637 }
638
639 ea->promisc_enabled = promisc_enabled;
640 ea->allmulti_enabled = allmulti_enabled;
641 ea->broadcast_enabled = broadcast_enabled;
642
643 mlx5e_vport_context_update(priv);
644}
645
646static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
647{
648 int i;
649
650 for (i = ft->num_groups - 1; i >= 0; i--) {
651 if (!IS_ERR_OR_NULL(ft->g[i]))
652 mlx5_destroy_flow_group(ft->g[i]);
653 ft->g[i] = NULL;
654 }
655 ft->num_groups = 0;
656}
657
658void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
659{
660 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
661}
662
663void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
664{
665 mlx5e_destroy_groups(ft);
666 kfree(ft->g);
667 mlx5_destroy_flow_table(ft->t);
668 ft->t = NULL;
669}
670
671static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
672{
673 int i;
674
675 for (i = 0; i < MLX5E_NUM_TT; i++) {
676 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
677 mlx5_del_flow_rules(ttc->rules[i]);
678 ttc->rules[i] = NULL;
679 }
680 }
681
682 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
683 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
684 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
685 ttc->tunnel_rules[i] = NULL;
686 }
687 }
688}
689
690struct mlx5e_etype_proto {
691 u16 etype;
692 u8 proto;
693};
694
695static struct mlx5e_etype_proto ttc_rules[] = {
696 [MLX5E_TT_IPV4_TCP] = {
697 .etype = ETH_P_IP,
698 .proto = IPPROTO_TCP,
699 },
700 [MLX5E_TT_IPV6_TCP] = {
701 .etype = ETH_P_IPV6,
702 .proto = IPPROTO_TCP,
703 },
704 [MLX5E_TT_IPV4_UDP] = {
705 .etype = ETH_P_IP,
706 .proto = IPPROTO_UDP,
707 },
708 [MLX5E_TT_IPV6_UDP] = {
709 .etype = ETH_P_IPV6,
710 .proto = IPPROTO_UDP,
711 },
712 [MLX5E_TT_IPV4_IPSEC_AH] = {
713 .etype = ETH_P_IP,
714 .proto = IPPROTO_AH,
715 },
716 [MLX5E_TT_IPV6_IPSEC_AH] = {
717 .etype = ETH_P_IPV6,
718 .proto = IPPROTO_AH,
719 },
720 [MLX5E_TT_IPV4_IPSEC_ESP] = {
721 .etype = ETH_P_IP,
722 .proto = IPPROTO_ESP,
723 },
724 [MLX5E_TT_IPV6_IPSEC_ESP] = {
725 .etype = ETH_P_IPV6,
726 .proto = IPPROTO_ESP,
727 },
728 [MLX5E_TT_IPV4] = {
729 .etype = ETH_P_IP,
730 .proto = 0,
731 },
732 [MLX5E_TT_IPV6] = {
733 .etype = ETH_P_IPV6,
734 .proto = 0,
735 },
736 [MLX5E_TT_ANY] = {
737 .etype = 0,
738 .proto = 0,
739 },
740};
741
742static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
743 [MLX5E_TT_IPV4_GRE] = {
744 .etype = ETH_P_IP,
745 .proto = IPPROTO_GRE,
746 },
747 [MLX5E_TT_IPV6_GRE] = {
748 .etype = ETH_P_IPV6,
749 .proto = IPPROTO_GRE,
750 },
751};
752
753static u8 mlx5e_etype_to_ipv(u16 ethertype)
754{
755 if (ethertype == ETH_P_IP)
756 return 4;
757
758 if (ethertype == ETH_P_IPV6)
759 return 6;
760
761 return 0;
762}
763
764static struct mlx5_flow_handle *
765mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
766 struct mlx5_flow_table *ft,
767 struct mlx5_flow_destination *dest,
768 u16 etype,
769 u8 proto)
770{
771 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
772 MLX5_DECLARE_FLOW_ACT(flow_act);
773 struct mlx5_flow_handle *rule;
774 struct mlx5_flow_spec *spec;
775 int err = 0;
776 u8 ipv;
777
778 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
779 if (!spec)
780 return ERR_PTR(-ENOMEM);
781
782 if (proto) {
783 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
784 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
785 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
786 }
787
788 ipv = mlx5e_etype_to_ipv(etype);
789 if (match_ipv_outer && ipv) {
790 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
791 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
792 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
793 } else if (etype) {
794 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
795 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
796 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
797 }
798
799 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
800 if (IS_ERR(rule)) {
801 err = PTR_ERR(rule);
802 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
803 }
804
805 kvfree(spec);
806 return err ? ERR_PTR(err) : rule;
807}
808
809static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
810 struct ttc_params *params,
811 struct mlx5e_ttc_table *ttc)
812{
813 struct mlx5_flow_destination dest = {};
814 struct mlx5_flow_handle **rules;
815 struct mlx5_flow_table *ft;
816 int tt;
817 int err;
818
819 ft = ttc->ft.t;
820 rules = ttc->rules;
821
822 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
823 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
824 if (tt == MLX5E_TT_ANY)
825 dest.tir_num = params->any_tt_tirn;
826 else
827 dest.tir_num = params->indir_tirn[tt];
828 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
829 ttc_rules[tt].etype,
830 ttc_rules[tt].proto);
831 if (IS_ERR(rules[tt]))
832 goto del_rules;
833 }
834
835 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
836 return 0;
837
838 rules = ttc->tunnel_rules;
839 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
840 dest.ft = params->inner_ttc->ft.t;
841 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
842 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
843 ttc_tunnel_rules[tt].etype,
844 ttc_tunnel_rules[tt].proto);
845 if (IS_ERR(rules[tt]))
846 goto del_rules;
847 }
848
849 return 0;
850
851del_rules:
852 err = PTR_ERR(rules[tt]);
853 rules[tt] = NULL;
854 mlx5e_cleanup_ttc_rules(ttc);
855 return err;
856}
857
858#define MLX5E_TTC_NUM_GROUPS 3
859#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
860#define MLX5E_TTC_GROUP2_SIZE BIT(1)
861#define MLX5E_TTC_GROUP3_SIZE BIT(0)
862#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
863 MLX5E_TTC_GROUP2_SIZE +\
864 MLX5E_TTC_GROUP3_SIZE)
865
866#define MLX5E_INNER_TTC_NUM_GROUPS 3
867#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
868#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
869#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
870#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
871 MLX5E_INNER_TTC_GROUP2_SIZE +\
872 MLX5E_INNER_TTC_GROUP3_SIZE)
873
874static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
875 bool use_ipv)
876{
877 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
878 struct mlx5e_flow_table *ft = &ttc->ft;
879 int ix = 0;
880 u32 *in;
881 int err;
882 u8 *mc;
883
884 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
885 sizeof(*ft->g), GFP_KERNEL);
886 if (!ft->g)
887 return -ENOMEM;
888 in = kvzalloc(inlen, GFP_KERNEL);
889 if (!in) {
890 kfree(ft->g);
891 return -ENOMEM;
892 }
893
894
895 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
896 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
897 if (use_ipv)
898 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
899 else
900 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
901 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
902 MLX5_SET_CFG(in, start_flow_index, ix);
903 ix += MLX5E_TTC_GROUP1_SIZE;
904 MLX5_SET_CFG(in, end_flow_index, ix - 1);
905 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
906 if (IS_ERR(ft->g[ft->num_groups]))
907 goto err;
908 ft->num_groups++;
909
910
911 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
912 MLX5_SET_CFG(in, start_flow_index, ix);
913 ix += MLX5E_TTC_GROUP2_SIZE;
914 MLX5_SET_CFG(in, end_flow_index, ix - 1);
915 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
916 if (IS_ERR(ft->g[ft->num_groups]))
917 goto err;
918 ft->num_groups++;
919
920
921 memset(in, 0, inlen);
922 MLX5_SET_CFG(in, start_flow_index, ix);
923 ix += MLX5E_TTC_GROUP3_SIZE;
924 MLX5_SET_CFG(in, end_flow_index, ix - 1);
925 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
926 if (IS_ERR(ft->g[ft->num_groups]))
927 goto err;
928 ft->num_groups++;
929
930 kvfree(in);
931 return 0;
932
933err:
934 err = PTR_ERR(ft->g[ft->num_groups]);
935 ft->g[ft->num_groups] = NULL;
936 kvfree(in);
937
938 return err;
939}
940
941static struct mlx5_flow_handle *
942mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
943 struct mlx5_flow_table *ft,
944 struct mlx5_flow_destination *dest,
945 u16 etype, u8 proto)
946{
947 MLX5_DECLARE_FLOW_ACT(flow_act);
948 struct mlx5_flow_handle *rule;
949 struct mlx5_flow_spec *spec;
950 int err = 0;
951 u8 ipv;
952
953 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
954 if (!spec)
955 return ERR_PTR(-ENOMEM);
956
957 ipv = mlx5e_etype_to_ipv(etype);
958 if (etype && ipv) {
959 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
960 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
961 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
962 }
963
964 if (proto) {
965 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
966 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
967 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
968 }
969
970 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
971 if (IS_ERR(rule)) {
972 err = PTR_ERR(rule);
973 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
974 }
975
976 kvfree(spec);
977 return err ? ERR_PTR(err) : rule;
978}
979
980static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
981 struct ttc_params *params,
982 struct mlx5e_ttc_table *ttc)
983{
984 struct mlx5_flow_destination dest = {};
985 struct mlx5_flow_handle **rules;
986 struct mlx5_flow_table *ft;
987 int err;
988 int tt;
989
990 ft = ttc->ft.t;
991 rules = ttc->rules;
992
993 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
994 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
995 if (tt == MLX5E_TT_ANY)
996 dest.tir_num = params->any_tt_tirn;
997 else
998 dest.tir_num = params->indir_tirn[tt];
999
1000 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1001 ttc_rules[tt].etype,
1002 ttc_rules[tt].proto);
1003 if (IS_ERR(rules[tt]))
1004 goto del_rules;
1005 }
1006
1007 return 0;
1008
1009del_rules:
1010 err = PTR_ERR(rules[tt]);
1011 rules[tt] = NULL;
1012 mlx5e_cleanup_ttc_rules(ttc);
1013 return err;
1014}
1015
1016static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1017{
1018 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1019 struct mlx5e_flow_table *ft = &ttc->ft;
1020 int ix = 0;
1021 u32 *in;
1022 int err;
1023 u8 *mc;
1024
1025 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1026 if (!ft->g)
1027 return -ENOMEM;
1028 in = kvzalloc(inlen, GFP_KERNEL);
1029 if (!in) {
1030 kfree(ft->g);
1031 return -ENOMEM;
1032 }
1033
1034
1035 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1036 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1037 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1038 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1039 MLX5_SET_CFG(in, start_flow_index, ix);
1040 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1041 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1042 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1043 if (IS_ERR(ft->g[ft->num_groups]))
1044 goto err;
1045 ft->num_groups++;
1046
1047
1048 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1049 MLX5_SET_CFG(in, start_flow_index, ix);
1050 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1051 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1052 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1053 if (IS_ERR(ft->g[ft->num_groups]))
1054 goto err;
1055 ft->num_groups++;
1056
1057
1058 memset(in, 0, inlen);
1059 MLX5_SET_CFG(in, start_flow_index, ix);
1060 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1061 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1062 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1063 if (IS_ERR(ft->g[ft->num_groups]))
1064 goto err;
1065 ft->num_groups++;
1066
1067 kvfree(in);
1068 return 0;
1069
1070err:
1071 err = PTR_ERR(ft->g[ft->num_groups]);
1072 ft->g[ft->num_groups] = NULL;
1073 kvfree(in);
1074
1075 return err;
1076}
1077
1078void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1079 struct ttc_params *ttc_params)
1080{
1081 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1082 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1083}
1084
1085void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1086{
1087 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1088
1089 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1090 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1091 ft_attr->prio = MLX5E_NIC_PRIO;
1092}
1093
1094void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1095
1096{
1097 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1098
1099 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1100 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1101 ft_attr->prio = MLX5E_NIC_PRIO;
1102}
1103
1104int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1105 struct mlx5e_ttc_table *ttc)
1106{
1107 struct mlx5e_flow_table *ft = &ttc->ft;
1108 int err;
1109
1110 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1111 return 0;
1112
1113 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1114 if (IS_ERR(ft->t)) {
1115 err = PTR_ERR(ft->t);
1116 ft->t = NULL;
1117 return err;
1118 }
1119
1120 err = mlx5e_create_inner_ttc_table_groups(ttc);
1121 if (err)
1122 goto err;
1123
1124 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1125 if (err)
1126 goto err;
1127
1128 return 0;
1129
1130err:
1131 mlx5e_destroy_flow_table(ft);
1132 return err;
1133}
1134
1135void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1136 struct mlx5e_ttc_table *ttc)
1137{
1138 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1139 return;
1140
1141 mlx5e_cleanup_ttc_rules(ttc);
1142 mlx5e_destroy_flow_table(&ttc->ft);
1143}
1144
1145void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1146 struct mlx5e_ttc_table *ttc)
1147{
1148 mlx5e_cleanup_ttc_rules(ttc);
1149 mlx5e_destroy_flow_table(&ttc->ft);
1150}
1151
1152int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1153 struct mlx5e_ttc_table *ttc)
1154{
1155 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1156 struct mlx5e_flow_table *ft = &ttc->ft;
1157 int err;
1158
1159 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1160 if (IS_ERR(ft->t)) {
1161 err = PTR_ERR(ft->t);
1162 ft->t = NULL;
1163 return err;
1164 }
1165
1166 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1167 if (err)
1168 goto err;
1169
1170 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1171 if (err)
1172 goto err;
1173
1174 return 0;
1175err:
1176 mlx5e_destroy_flow_table(ft);
1177 return err;
1178}
1179
1180static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1181 struct mlx5e_l2_rule *ai)
1182{
1183 if (!IS_ERR_OR_NULL(ai->rule)) {
1184 mlx5_del_flow_rules(ai->rule);
1185 ai->rule = NULL;
1186 }
1187}
1188
1189static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1190 struct mlx5e_l2_rule *ai, int type)
1191{
1192 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1193 struct mlx5_flow_destination dest = {};
1194 MLX5_DECLARE_FLOW_ACT(flow_act);
1195 struct mlx5_flow_spec *spec;
1196 int err = 0;
1197 u8 *mc_dmac;
1198 u8 *mv_dmac;
1199
1200 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1201 if (!spec)
1202 return -ENOMEM;
1203
1204 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1205 outer_headers.dmac_47_16);
1206 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1207 outer_headers.dmac_47_16);
1208
1209 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1210 dest.ft = priv->fs.ttc.ft.t;
1211
1212 switch (type) {
1213 case MLX5E_FULLMATCH:
1214 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1215 eth_broadcast_addr(mc_dmac);
1216 ether_addr_copy(mv_dmac, ai->addr);
1217 break;
1218
1219 case MLX5E_ALLMULTI:
1220 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1221 mc_dmac[0] = 0x01;
1222 mv_dmac[0] = 0x01;
1223 break;
1224
1225 case MLX5E_PROMISC:
1226 break;
1227 }
1228
1229 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1230 if (IS_ERR(ai->rule)) {
1231 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1232 __func__, mv_dmac);
1233 err = PTR_ERR(ai->rule);
1234 ai->rule = NULL;
1235 }
1236
1237 kvfree(spec);
1238
1239 return err;
1240}
1241
1242#define MLX5E_NUM_L2_GROUPS 3
1243#define MLX5E_L2_GROUP1_SIZE BIT(0)
1244#define MLX5E_L2_GROUP2_SIZE BIT(15)
1245#define MLX5E_L2_GROUP3_SIZE BIT(0)
1246#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1247 MLX5E_L2_GROUP2_SIZE +\
1248 MLX5E_L2_GROUP3_SIZE)
1249static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1250{
1251 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1252 struct mlx5e_flow_table *ft = &l2_table->ft;
1253 int ix = 0;
1254 u8 *mc_dmac;
1255 u32 *in;
1256 int err;
1257 u8 *mc;
1258
1259 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1260 if (!ft->g)
1261 return -ENOMEM;
1262 in = kvzalloc(inlen, GFP_KERNEL);
1263 if (!in) {
1264 kfree(ft->g);
1265 return -ENOMEM;
1266 }
1267
1268 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1269 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1270 outer_headers.dmac_47_16);
1271
1272 MLX5_SET_CFG(in, start_flow_index, ix);
1273 ix += MLX5E_L2_GROUP1_SIZE;
1274 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1275 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1276 if (IS_ERR(ft->g[ft->num_groups]))
1277 goto err_destroy_groups;
1278 ft->num_groups++;
1279
1280
1281 eth_broadcast_addr(mc_dmac);
1282 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1283 MLX5_SET_CFG(in, start_flow_index, ix);
1284 ix += MLX5E_L2_GROUP2_SIZE;
1285 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1286 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1287 if (IS_ERR(ft->g[ft->num_groups]))
1288 goto err_destroy_groups;
1289 ft->num_groups++;
1290
1291
1292 eth_zero_addr(mc_dmac);
1293 mc_dmac[0] = 0x01;
1294 MLX5_SET_CFG(in, start_flow_index, ix);
1295 ix += MLX5E_L2_GROUP3_SIZE;
1296 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1297 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1298 if (IS_ERR(ft->g[ft->num_groups]))
1299 goto err_destroy_groups;
1300 ft->num_groups++;
1301
1302 kvfree(in);
1303 return 0;
1304
1305err_destroy_groups:
1306 err = PTR_ERR(ft->g[ft->num_groups]);
1307 ft->g[ft->num_groups] = NULL;
1308 mlx5e_destroy_groups(ft);
1309 kvfree(in);
1310
1311 return err;
1312}
1313
1314static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1315{
1316 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1317}
1318
1319static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1320{
1321 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1322 struct mlx5e_flow_table *ft = &l2_table->ft;
1323 struct mlx5_flow_table_attr ft_attr = {};
1324 int err;
1325
1326 ft->num_groups = 0;
1327
1328 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1329 ft_attr.level = MLX5E_L2_FT_LEVEL;
1330 ft_attr.prio = MLX5E_NIC_PRIO;
1331
1332 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1333 if (IS_ERR(ft->t)) {
1334 err = PTR_ERR(ft->t);
1335 ft->t = NULL;
1336 return err;
1337 }
1338
1339 err = mlx5e_create_l2_table_groups(l2_table);
1340 if (err)
1341 goto err_destroy_flow_table;
1342
1343 return 0;
1344
1345err_destroy_flow_table:
1346 mlx5_destroy_flow_table(ft->t);
1347 ft->t = NULL;
1348
1349 return err;
1350}
1351
1352#define MLX5E_NUM_VLAN_GROUPS 4
1353#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1354#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1355#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1356#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1357#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1358 MLX5E_VLAN_GROUP1_SIZE +\
1359 MLX5E_VLAN_GROUP2_SIZE +\
1360 MLX5E_VLAN_GROUP3_SIZE)
1361
1362static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1363 int inlen)
1364{
1365 int err;
1366 int ix = 0;
1367 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1368
1369 memset(in, 0, inlen);
1370 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1371 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1372 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1373 MLX5_SET_CFG(in, start_flow_index, ix);
1374 ix += MLX5E_VLAN_GROUP0_SIZE;
1375 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1376 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1377 if (IS_ERR(ft->g[ft->num_groups]))
1378 goto err_destroy_groups;
1379 ft->num_groups++;
1380
1381 memset(in, 0, inlen);
1382 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1383 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1384 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1385 MLX5_SET_CFG(in, start_flow_index, ix);
1386 ix += MLX5E_VLAN_GROUP1_SIZE;
1387 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1388 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1389 if (IS_ERR(ft->g[ft->num_groups]))
1390 goto err_destroy_groups;
1391 ft->num_groups++;
1392
1393 memset(in, 0, inlen);
1394 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1395 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1396 MLX5_SET_CFG(in, start_flow_index, ix);
1397 ix += MLX5E_VLAN_GROUP2_SIZE;
1398 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1399 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1400 if (IS_ERR(ft->g[ft->num_groups]))
1401 goto err_destroy_groups;
1402 ft->num_groups++;
1403
1404 memset(in, 0, inlen);
1405 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1406 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1407 MLX5_SET_CFG(in, start_flow_index, ix);
1408 ix += MLX5E_VLAN_GROUP3_SIZE;
1409 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1410 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1411 if (IS_ERR(ft->g[ft->num_groups]))
1412 goto err_destroy_groups;
1413 ft->num_groups++;
1414
1415 return 0;
1416
1417err_destroy_groups:
1418 err = PTR_ERR(ft->g[ft->num_groups]);
1419 ft->g[ft->num_groups] = NULL;
1420 mlx5e_destroy_groups(ft);
1421
1422 return err;
1423}
1424
1425static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1426{
1427 u32 *in;
1428 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1429 int err;
1430
1431 in = kvzalloc(inlen, GFP_KERNEL);
1432 if (!in)
1433 return -ENOMEM;
1434
1435 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1436
1437 kvfree(in);
1438 return err;
1439}
1440
1441static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1442{
1443 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1444 struct mlx5_flow_table_attr ft_attr = {};
1445 int err;
1446
1447 ft->num_groups = 0;
1448
1449 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1450 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1451 ft_attr.prio = MLX5E_NIC_PRIO;
1452
1453 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1454
1455 if (IS_ERR(ft->t)) {
1456 err = PTR_ERR(ft->t);
1457 ft->t = NULL;
1458 return err;
1459 }
1460 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1461 if (!ft->g) {
1462 err = -ENOMEM;
1463 goto err_destroy_vlan_table;
1464 }
1465
1466 err = mlx5e_create_vlan_table_groups(ft);
1467 if (err)
1468 goto err_free_g;
1469
1470 mlx5e_add_vlan_rules(priv);
1471
1472 return 0;
1473
1474err_free_g:
1475 kfree(ft->g);
1476err_destroy_vlan_table:
1477 mlx5_destroy_flow_table(ft->t);
1478 ft->t = NULL;
1479
1480 return err;
1481}
1482
1483static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1484{
1485 mlx5e_del_vlan_rules(priv);
1486 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1487}
1488
1489int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1490{
1491 struct ttc_params ttc_params = {};
1492 int tt, err;
1493
1494 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1495 MLX5_FLOW_NAMESPACE_KERNEL);
1496
1497 if (!priv->fs.ns)
1498 return -EOPNOTSUPP;
1499
1500 err = mlx5e_arfs_create_tables(priv);
1501 if (err) {
1502 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1503 err);
1504 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1505 }
1506
1507 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1508 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1509 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1510 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1511
1512 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1513 if (err) {
1514 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1515 err);
1516 goto err_destroy_arfs_tables;
1517 }
1518
1519 mlx5e_set_ttc_ft_params(&ttc_params);
1520 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1521 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1522
1523 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1524 if (err) {
1525 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1526 err);
1527 goto err_destroy_inner_ttc_table;
1528 }
1529
1530 err = mlx5e_create_l2_table(priv);
1531 if (err) {
1532 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1533 err);
1534 goto err_destroy_ttc_table;
1535 }
1536
1537 err = mlx5e_create_vlan_table(priv);
1538 if (err) {
1539 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1540 err);
1541 goto err_destroy_l2_table;
1542 }
1543
1544 mlx5e_ethtool_init_steering(priv);
1545
1546 return 0;
1547
1548err_destroy_l2_table:
1549 mlx5e_destroy_l2_table(priv);
1550err_destroy_ttc_table:
1551 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1552err_destroy_inner_ttc_table:
1553 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1554err_destroy_arfs_tables:
1555 mlx5e_arfs_destroy_tables(priv);
1556
1557 return err;
1558}
1559
1560void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1561{
1562 mlx5e_destroy_vlan_table(priv);
1563 mlx5e_destroy_l2_table(priv);
1564 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1565 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1566 mlx5e_arfs_destroy_tables(priv);
1567 mlx5e_ethtool_cleanup_steering(priv);
1568}
1569