1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/list.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37#include <linux/mlx5/fs.h>
38#include "en.h"
39#include "lib/mpfs.h"
40
41static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50};
51
52enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57};
58
59enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63};
64
65struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70};
71
72static inline int mlx5e_hash_l2(u8 *addr)
73{
74 return addr[5];
75}
76
77static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78{
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102}
103
104static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105{
106 hlist_del(&hn->hlist);
107 kfree(hn);
108}
109
110static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111{
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151}
152
153enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
158};
159
160static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
161 enum mlx5e_vlan_rule_type rule_type,
162 u16 vid, struct mlx5_flow_spec *spec)
163{
164 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
165 struct mlx5_flow_destination dest;
166 struct mlx5_flow_handle **rule_p;
167 MLX5_DECLARE_FLOW_ACT(flow_act);
168 int err = 0;
169
170 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
171 dest.ft = priv->fs.l2.ft.t;
172
173 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
174
175 switch (rule_type) {
176 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
177 rule_p = &priv->fs.vlan.untagged_rule;
178 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
179 outer_headers.cvlan_tag);
180 break;
181 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
182 rule_p = &priv->fs.vlan.any_cvlan_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
186 break;
187 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
188 rule_p = &priv->fs.vlan.any_svlan_rule;
189 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
190 outer_headers.svlan_tag);
191 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
192 break;
193 default:
194 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
195 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
196 outer_headers.cvlan_tag);
197 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
198 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
199 outer_headers.first_vid);
200 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
201 vid);
202 break;
203 }
204
205 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
206
207 if (IS_ERR(*rule_p)) {
208 err = PTR_ERR(*rule_p);
209 *rule_p = NULL;
210 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
211 }
212
213 return err;
214}
215
216static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
217 enum mlx5e_vlan_rule_type rule_type, u16 vid)
218{
219 struct mlx5_flow_spec *spec;
220 int err = 0;
221
222 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
223 if (!spec)
224 return -ENOMEM;
225
226 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
227 mlx5e_vport_context_update_vlans(priv);
228
229 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
230
231 kvfree(spec);
232
233 return err;
234}
235
236static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
237 enum mlx5e_vlan_rule_type rule_type, u16 vid)
238{
239 switch (rule_type) {
240 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
241 if (priv->fs.vlan.untagged_rule) {
242 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
243 priv->fs.vlan.untagged_rule = NULL;
244 }
245 break;
246 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
247 if (priv->fs.vlan.any_cvlan_rule) {
248 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
249 priv->fs.vlan.any_cvlan_rule = NULL;
250 }
251 break;
252 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
253 if (priv->fs.vlan.any_svlan_rule) {
254 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
255 priv->fs.vlan.any_svlan_rule = NULL;
256 }
257 break;
258 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
259 mlx5e_vport_context_update_vlans(priv);
260 if (priv->fs.vlan.active_vlans_rule[vid]) {
261 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
262 priv->fs.vlan.active_vlans_rule[vid] = NULL;
263 }
264 mlx5e_vport_context_update_vlans(priv);
265 break;
266 }
267}
268
269static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
270{
271 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
272 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
273}
274
275static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
276{
277 int err;
278
279 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
280 if (err)
281 return err;
282
283 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
284}
285
286void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
287{
288 if (!priv->fs.vlan.filter_disabled)
289 return;
290
291 priv->fs.vlan.filter_disabled = false;
292 if (priv->netdev->flags & IFF_PROMISC)
293 return;
294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295}
296
297void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
298{
299 if (priv->fs.vlan.filter_disabled)
300 return;
301
302 priv->fs.vlan.filter_disabled = true;
303 if (priv->netdev->flags & IFF_PROMISC)
304 return;
305 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
306}
307
308int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
309 u16 vid)
310{
311 struct mlx5e_priv *priv = netdev_priv(dev);
312
313 set_bit(vid, priv->fs.vlan.active_vlans);
314
315 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
316}
317
318int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
319 u16 vid)
320{
321 struct mlx5e_priv *priv = netdev_priv(dev);
322
323 clear_bit(vid, priv->fs.vlan.active_vlans);
324
325 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
326
327 return 0;
328}
329
330static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
331{
332 int i;
333
334 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
335
336 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
337 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
338 }
339
340 if (priv->fs.vlan.filter_disabled &&
341 !(priv->netdev->flags & IFF_PROMISC))
342 mlx5e_add_any_vid_rules(priv);
343}
344
345static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
346{
347 int i;
348
349 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
350
351 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
352 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
353 }
354
355 if (priv->fs.vlan.filter_disabled &&
356 !(priv->netdev->flags & IFF_PROMISC))
357 mlx5e_del_any_vid_rules(priv);
358}
359
360#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
361 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
362 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
363
364static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
365 struct mlx5e_l2_hash_node *hn)
366{
367 u8 action = hn->action;
368 u8 mac_addr[ETH_ALEN];
369 int l2_err = 0;
370
371 ether_addr_copy(mac_addr, hn->ai.addr);
372
373 switch (action) {
374 case MLX5E_ACTION_ADD:
375 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
376 if (!is_multicast_ether_addr(mac_addr)) {
377 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
378 hn->mpfs = !l2_err;
379 }
380 hn->action = MLX5E_ACTION_NONE;
381 break;
382
383 case MLX5E_ACTION_DEL:
384 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
385 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
386 mlx5e_del_l2_flow_rule(priv, &hn->ai);
387 mlx5e_del_l2_from_hash(hn);
388 break;
389 }
390
391 if (l2_err)
392 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
393 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
394}
395
396static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
397{
398 struct net_device *netdev = priv->netdev;
399 struct netdev_hw_addr *ha;
400
401 netif_addr_lock_bh(netdev);
402
403 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
404 priv->netdev->dev_addr);
405
406 netdev_for_each_uc_addr(ha, netdev)
407 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
408
409 netdev_for_each_mc_addr(ha, netdev)
410 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
411
412 netif_addr_unlock_bh(netdev);
413}
414
415static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
416 u8 addr_array[][ETH_ALEN], int size)
417{
418 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
419 struct net_device *ndev = priv->netdev;
420 struct mlx5e_l2_hash_node *hn;
421 struct hlist_head *addr_list;
422 struct hlist_node *tmp;
423 int i = 0;
424 int hi;
425
426 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
427
428 if (is_uc)
429 ether_addr_copy(addr_array[i++], ndev->dev_addr);
430 else if (priv->fs.l2.broadcast_enabled)
431 ether_addr_copy(addr_array[i++], ndev->broadcast);
432
433 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
434 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
435 continue;
436 if (i >= size)
437 break;
438 ether_addr_copy(addr_array[i++], hn->ai.addr);
439 }
440}
441
442static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
443 int list_type)
444{
445 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
446 struct mlx5e_l2_hash_node *hn;
447 u8 (*addr_array)[ETH_ALEN] = NULL;
448 struct hlist_head *addr_list;
449 struct hlist_node *tmp;
450 int max_size;
451 int size;
452 int err;
453 int hi;
454
455 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
456 max_size = is_uc ?
457 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
458 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
459
460 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
461 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
462 size++;
463
464 if (size > max_size) {
465 netdev_warn(priv->netdev,
466 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
467 is_uc ? "UC" : "MC", size, max_size);
468 size = max_size;
469 }
470
471 if (size) {
472 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
473 if (!addr_array) {
474 err = -ENOMEM;
475 goto out;
476 }
477 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
478 }
479
480 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
481out:
482 if (err)
483 netdev_err(priv->netdev,
484 "Failed to modify vport %s list err(%d)\n",
485 is_uc ? "UC" : "MC", err);
486 kfree(addr_array);
487}
488
489static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
490{
491 struct mlx5e_l2_table *ea = &priv->fs.l2;
492
493 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
494 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
495 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
496 ea->allmulti_enabled,
497 ea->promisc_enabled);
498}
499
500static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
501{
502 struct mlx5e_l2_hash_node *hn;
503 struct hlist_node *tmp;
504 int i;
505
506 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
507 mlx5e_execute_l2_action(priv, hn);
508
509 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
510 mlx5e_execute_l2_action(priv, hn);
511}
512
513static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
514{
515 struct mlx5e_l2_hash_node *hn;
516 struct hlist_node *tmp;
517 int i;
518
519 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
520 hn->action = MLX5E_ACTION_DEL;
521 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
522 hn->action = MLX5E_ACTION_DEL;
523
524 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
525 mlx5e_sync_netdev_addr(priv);
526
527 mlx5e_apply_netdev_addr(priv);
528}
529
530void mlx5e_set_rx_mode_work(struct work_struct *work)
531{
532 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
533 set_rx_mode_work);
534
535 struct mlx5e_l2_table *ea = &priv->fs.l2;
536 struct net_device *ndev = priv->netdev;
537
538 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
539 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
540 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
541 bool broadcast_enabled = rx_mode_enable;
542
543 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
544 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
545 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
546 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
547 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
548 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
549
550 if (enable_promisc) {
551 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
552 if (!priv->fs.vlan.filter_disabled)
553 mlx5e_add_any_vid_rules(priv);
554 }
555 if (enable_allmulti)
556 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
557 if (enable_broadcast)
558 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
559
560 mlx5e_handle_netdev_addr(priv);
561
562 if (disable_broadcast)
563 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
564 if (disable_allmulti)
565 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
566 if (disable_promisc) {
567 if (!priv->fs.vlan.filter_disabled)
568 mlx5e_del_any_vid_rules(priv);
569 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
570 }
571
572 ea->promisc_enabled = promisc_enabled;
573 ea->allmulti_enabled = allmulti_enabled;
574 ea->broadcast_enabled = broadcast_enabled;
575
576 mlx5e_vport_context_update(priv);
577}
578
579static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
580{
581 int i;
582
583 for (i = ft->num_groups - 1; i >= 0; i--) {
584 if (!IS_ERR_OR_NULL(ft->g[i]))
585 mlx5_destroy_flow_group(ft->g[i]);
586 ft->g[i] = NULL;
587 }
588 ft->num_groups = 0;
589}
590
591void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
592{
593 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
594}
595
596void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
597{
598 mlx5e_destroy_groups(ft);
599 kfree(ft->g);
600 mlx5_destroy_flow_table(ft->t);
601 ft->t = NULL;
602}
603
604static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
605{
606 int i;
607
608 for (i = 0; i < MLX5E_NUM_TT; i++) {
609 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
610 mlx5_del_flow_rules(ttc->rules[i]);
611 ttc->rules[i] = NULL;
612 }
613 }
614
615 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
616 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
617 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
618 ttc->tunnel_rules[i] = NULL;
619 }
620 }
621}
622
623struct mlx5e_etype_proto {
624 u16 etype;
625 u8 proto;
626};
627
628static struct mlx5e_etype_proto ttc_rules[] = {
629 [MLX5E_TT_IPV4_TCP] = {
630 .etype = ETH_P_IP,
631 .proto = IPPROTO_TCP,
632 },
633 [MLX5E_TT_IPV6_TCP] = {
634 .etype = ETH_P_IPV6,
635 .proto = IPPROTO_TCP,
636 },
637 [MLX5E_TT_IPV4_UDP] = {
638 .etype = ETH_P_IP,
639 .proto = IPPROTO_UDP,
640 },
641 [MLX5E_TT_IPV6_UDP] = {
642 .etype = ETH_P_IPV6,
643 .proto = IPPROTO_UDP,
644 },
645 [MLX5E_TT_IPV4_IPSEC_AH] = {
646 .etype = ETH_P_IP,
647 .proto = IPPROTO_AH,
648 },
649 [MLX5E_TT_IPV6_IPSEC_AH] = {
650 .etype = ETH_P_IPV6,
651 .proto = IPPROTO_AH,
652 },
653 [MLX5E_TT_IPV4_IPSEC_ESP] = {
654 .etype = ETH_P_IP,
655 .proto = IPPROTO_ESP,
656 },
657 [MLX5E_TT_IPV6_IPSEC_ESP] = {
658 .etype = ETH_P_IPV6,
659 .proto = IPPROTO_ESP,
660 },
661 [MLX5E_TT_IPV4] = {
662 .etype = ETH_P_IP,
663 .proto = 0,
664 },
665 [MLX5E_TT_IPV6] = {
666 .etype = ETH_P_IPV6,
667 .proto = 0,
668 },
669 [MLX5E_TT_ANY] = {
670 .etype = 0,
671 .proto = 0,
672 },
673};
674
675static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
676 [MLX5E_TT_IPV4_GRE] = {
677 .etype = ETH_P_IP,
678 .proto = IPPROTO_GRE,
679 },
680 [MLX5E_TT_IPV6_GRE] = {
681 .etype = ETH_P_IPV6,
682 .proto = IPPROTO_GRE,
683 },
684};
685
686static u8 mlx5e_etype_to_ipv(u16 ethertype)
687{
688 if (ethertype == ETH_P_IP)
689 return 4;
690
691 if (ethertype == ETH_P_IPV6)
692 return 6;
693
694 return 0;
695}
696
697static struct mlx5_flow_handle *
698mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
699 struct mlx5_flow_table *ft,
700 struct mlx5_flow_destination *dest,
701 u16 etype,
702 u8 proto)
703{
704 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
705 MLX5_DECLARE_FLOW_ACT(flow_act);
706 struct mlx5_flow_handle *rule;
707 struct mlx5_flow_spec *spec;
708 int err = 0;
709 u8 ipv;
710
711 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
712 if (!spec)
713 return ERR_PTR(-ENOMEM);
714
715 if (proto) {
716 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
717 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
718 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
719 }
720
721 ipv = mlx5e_etype_to_ipv(etype);
722 if (match_ipv_outer && ipv) {
723 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
724 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
725 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
726 } else if (etype) {
727 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
728 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
729 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
730 }
731
732 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
733 if (IS_ERR(rule)) {
734 err = PTR_ERR(rule);
735 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
736 }
737
738 kvfree(spec);
739 return err ? ERR_PTR(err) : rule;
740}
741
742static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
743{
744 struct mlx5_flow_destination dest;
745 struct mlx5e_ttc_table *ttc;
746 struct mlx5_flow_handle **rules;
747 struct mlx5_flow_table *ft;
748 int tt;
749 int err;
750
751 ttc = &priv->fs.ttc;
752 ft = ttc->ft.t;
753 rules = ttc->rules;
754
755 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
756 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
757 if (tt == MLX5E_TT_ANY)
758 dest.tir_num = priv->direct_tir[0].tirn;
759 else
760 dest.tir_num = priv->indir_tir[tt].tirn;
761 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
762 ttc_rules[tt].etype,
763 ttc_rules[tt].proto);
764 if (IS_ERR(rules[tt]))
765 goto del_rules;
766 }
767
768 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
769 return 0;
770
771 rules = ttc->tunnel_rules;
772 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
773 dest.ft = priv->fs.inner_ttc.ft.t;
774 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
775 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
776 ttc_tunnel_rules[tt].etype,
777 ttc_tunnel_rules[tt].proto);
778 if (IS_ERR(rules[tt]))
779 goto del_rules;
780 }
781
782 return 0;
783
784del_rules:
785 err = PTR_ERR(rules[tt]);
786 rules[tt] = NULL;
787 mlx5e_cleanup_ttc_rules(ttc);
788 return err;
789}
790
791#define MLX5E_TTC_NUM_GROUPS 3
792#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
793#define MLX5E_TTC_GROUP2_SIZE BIT(1)
794#define MLX5E_TTC_GROUP3_SIZE BIT(0)
795#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
796 MLX5E_TTC_GROUP2_SIZE +\
797 MLX5E_TTC_GROUP3_SIZE)
798
799#define MLX5E_INNER_TTC_NUM_GROUPS 3
800#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
801#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
802#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
803#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
804 MLX5E_INNER_TTC_GROUP2_SIZE +\
805 MLX5E_INNER_TTC_GROUP3_SIZE)
806
807static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
808 bool use_ipv)
809{
810 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
811 struct mlx5e_flow_table *ft = &ttc->ft;
812 int ix = 0;
813 u32 *in;
814 int err;
815 u8 *mc;
816
817 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
818 sizeof(*ft->g), GFP_KERNEL);
819 if (!ft->g)
820 return -ENOMEM;
821 in = kvzalloc(inlen, GFP_KERNEL);
822 if (!in) {
823 kfree(ft->g);
824 return -ENOMEM;
825 }
826
827
828 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
829 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
830 if (use_ipv)
831 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
832 else
833 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
834 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
835 MLX5_SET_CFG(in, start_flow_index, ix);
836 ix += MLX5E_TTC_GROUP1_SIZE;
837 MLX5_SET_CFG(in, end_flow_index, ix - 1);
838 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
839 if (IS_ERR(ft->g[ft->num_groups]))
840 goto err;
841 ft->num_groups++;
842
843
844 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
845 MLX5_SET_CFG(in, start_flow_index, ix);
846 ix += MLX5E_TTC_GROUP2_SIZE;
847 MLX5_SET_CFG(in, end_flow_index, ix - 1);
848 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
849 if (IS_ERR(ft->g[ft->num_groups]))
850 goto err;
851 ft->num_groups++;
852
853
854 memset(in, 0, inlen);
855 MLX5_SET_CFG(in, start_flow_index, ix);
856 ix += MLX5E_TTC_GROUP3_SIZE;
857 MLX5_SET_CFG(in, end_flow_index, ix - 1);
858 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
859 if (IS_ERR(ft->g[ft->num_groups]))
860 goto err;
861 ft->num_groups++;
862
863 kvfree(in);
864 return 0;
865
866err:
867 err = PTR_ERR(ft->g[ft->num_groups]);
868 ft->g[ft->num_groups] = NULL;
869 kvfree(in);
870
871 return err;
872}
873
874static struct mlx5_flow_handle *
875mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
876 struct mlx5_flow_table *ft,
877 struct mlx5_flow_destination *dest,
878 u16 etype, u8 proto)
879{
880 MLX5_DECLARE_FLOW_ACT(flow_act);
881 struct mlx5_flow_handle *rule;
882 struct mlx5_flow_spec *spec;
883 int err = 0;
884 u8 ipv;
885
886 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
887 if (!spec)
888 return ERR_PTR(-ENOMEM);
889
890 ipv = mlx5e_etype_to_ipv(etype);
891 if (etype && ipv) {
892 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
893 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
894 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
895 }
896
897 if (proto) {
898 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
899 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
900 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
901 }
902
903 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
904 if (IS_ERR(rule)) {
905 err = PTR_ERR(rule);
906 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
907 }
908
909 kvfree(spec);
910 return err ? ERR_PTR(err) : rule;
911}
912
913static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
914{
915 struct mlx5_flow_destination dest;
916 struct mlx5_flow_handle **rules;
917 struct mlx5e_ttc_table *ttc;
918 struct mlx5_flow_table *ft;
919 int err;
920 int tt;
921
922 ttc = &priv->fs.inner_ttc;
923 ft = ttc->ft.t;
924 rules = ttc->rules;
925
926 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
927 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
928 if (tt == MLX5E_TT_ANY)
929 dest.tir_num = priv->direct_tir[0].tirn;
930 else
931 dest.tir_num = priv->inner_indir_tir[tt].tirn;
932
933 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
934 ttc_rules[tt].etype,
935 ttc_rules[tt].proto);
936 if (IS_ERR(rules[tt]))
937 goto del_rules;
938 }
939
940 return 0;
941
942del_rules:
943 err = PTR_ERR(rules[tt]);
944 rules[tt] = NULL;
945 mlx5e_cleanup_ttc_rules(ttc);
946 return err;
947}
948
949static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
950{
951 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
952 struct mlx5e_flow_table *ft = &ttc->ft;
953 int ix = 0;
954 u32 *in;
955 int err;
956 u8 *mc;
957
958 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
959 if (!ft->g)
960 return -ENOMEM;
961 in = kvzalloc(inlen, GFP_KERNEL);
962 if (!in) {
963 kfree(ft->g);
964 return -ENOMEM;
965 }
966
967
968 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
969 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
970 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
971 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
972 MLX5_SET_CFG(in, start_flow_index, ix);
973 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
974 MLX5_SET_CFG(in, end_flow_index, ix - 1);
975 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
976 if (IS_ERR(ft->g[ft->num_groups]))
977 goto err;
978 ft->num_groups++;
979
980
981 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
982 MLX5_SET_CFG(in, start_flow_index, ix);
983 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
984 MLX5_SET_CFG(in, end_flow_index, ix - 1);
985 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
986 if (IS_ERR(ft->g[ft->num_groups]))
987 goto err;
988 ft->num_groups++;
989
990
991 memset(in, 0, inlen);
992 MLX5_SET_CFG(in, start_flow_index, ix);
993 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
994 MLX5_SET_CFG(in, end_flow_index, ix - 1);
995 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
996 if (IS_ERR(ft->g[ft->num_groups]))
997 goto err;
998 ft->num_groups++;
999
1000 kvfree(in);
1001 return 0;
1002
1003err:
1004 err = PTR_ERR(ft->g[ft->num_groups]);
1005 ft->g[ft->num_groups] = NULL;
1006 kvfree(in);
1007
1008 return err;
1009}
1010
1011static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
1012{
1013 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1014 struct mlx5_flow_table_attr ft_attr = {};
1015 struct mlx5e_flow_table *ft = &ttc->ft;
1016 int err;
1017
1018 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1019 return 0;
1020
1021 ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1022 ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
1023 ft_attr.prio = MLX5E_NIC_PRIO;
1024
1025 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1026 if (IS_ERR(ft->t)) {
1027 err = PTR_ERR(ft->t);
1028 ft->t = NULL;
1029 return err;
1030 }
1031
1032 err = mlx5e_create_inner_ttc_table_groups(ttc);
1033 if (err)
1034 goto err;
1035
1036 err = mlx5e_generate_inner_ttc_table_rules(priv);
1037 if (err)
1038 goto err;
1039
1040 return 0;
1041
1042err:
1043 mlx5e_destroy_flow_table(ft);
1044 return err;
1045}
1046
1047static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
1048{
1049 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1050
1051 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1052 return;
1053
1054 mlx5e_cleanup_ttc_rules(ttc);
1055 mlx5e_destroy_flow_table(&ttc->ft);
1056}
1057
1058void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
1059{
1060 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1061
1062 mlx5e_cleanup_ttc_rules(ttc);
1063 mlx5e_destroy_flow_table(&ttc->ft);
1064}
1065
1066int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
1067{
1068 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1069 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1070 struct mlx5_flow_table_attr ft_attr = {};
1071 struct mlx5e_flow_table *ft = &ttc->ft;
1072 int err;
1073
1074 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
1075 ft_attr.level = MLX5E_TTC_FT_LEVEL;
1076 ft_attr.prio = MLX5E_NIC_PRIO;
1077
1078 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1079 if (IS_ERR(ft->t)) {
1080 err = PTR_ERR(ft->t);
1081 ft->t = NULL;
1082 return err;
1083 }
1084
1085 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1086 if (err)
1087 goto err;
1088
1089 err = mlx5e_generate_ttc_table_rules(priv);
1090 if (err)
1091 goto err;
1092
1093 return 0;
1094err:
1095 mlx5e_destroy_flow_table(ft);
1096 return err;
1097}
1098
1099static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1100 struct mlx5e_l2_rule *ai)
1101{
1102 if (!IS_ERR_OR_NULL(ai->rule)) {
1103 mlx5_del_flow_rules(ai->rule);
1104 ai->rule = NULL;
1105 }
1106}
1107
1108static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1109 struct mlx5e_l2_rule *ai, int type)
1110{
1111 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1112 struct mlx5_flow_destination dest;
1113 MLX5_DECLARE_FLOW_ACT(flow_act);
1114 struct mlx5_flow_spec *spec;
1115 int err = 0;
1116 u8 *mc_dmac;
1117 u8 *mv_dmac;
1118
1119 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1120 if (!spec)
1121 return -ENOMEM;
1122
1123 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1124 outer_headers.dmac_47_16);
1125 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1126 outer_headers.dmac_47_16);
1127
1128 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1129 dest.ft = priv->fs.ttc.ft.t;
1130
1131 switch (type) {
1132 case MLX5E_FULLMATCH:
1133 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1134 eth_broadcast_addr(mc_dmac);
1135 ether_addr_copy(mv_dmac, ai->addr);
1136 break;
1137
1138 case MLX5E_ALLMULTI:
1139 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1140 mc_dmac[0] = 0x01;
1141 mv_dmac[0] = 0x01;
1142 break;
1143
1144 case MLX5E_PROMISC:
1145 break;
1146 }
1147
1148 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1149 if (IS_ERR(ai->rule)) {
1150 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1151 __func__, mv_dmac);
1152 err = PTR_ERR(ai->rule);
1153 ai->rule = NULL;
1154 }
1155
1156 kvfree(spec);
1157
1158 return err;
1159}
1160
1161#define MLX5E_NUM_L2_GROUPS 3
1162#define MLX5E_L2_GROUP1_SIZE BIT(0)
1163#define MLX5E_L2_GROUP2_SIZE BIT(15)
1164#define MLX5E_L2_GROUP3_SIZE BIT(0)
1165#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1166 MLX5E_L2_GROUP2_SIZE +\
1167 MLX5E_L2_GROUP3_SIZE)
1168static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1169{
1170 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1171 struct mlx5e_flow_table *ft = &l2_table->ft;
1172 int ix = 0;
1173 u8 *mc_dmac;
1174 u32 *in;
1175 int err;
1176 u8 *mc;
1177
1178 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1179 if (!ft->g)
1180 return -ENOMEM;
1181 in = kvzalloc(inlen, GFP_KERNEL);
1182 if (!in) {
1183 kfree(ft->g);
1184 return -ENOMEM;
1185 }
1186
1187 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1188 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1189 outer_headers.dmac_47_16);
1190
1191 MLX5_SET_CFG(in, start_flow_index, ix);
1192 ix += MLX5E_L2_GROUP1_SIZE;
1193 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1194 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1195 if (IS_ERR(ft->g[ft->num_groups]))
1196 goto err_destroy_groups;
1197 ft->num_groups++;
1198
1199
1200 eth_broadcast_addr(mc_dmac);
1201 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1202 MLX5_SET_CFG(in, start_flow_index, ix);
1203 ix += MLX5E_L2_GROUP2_SIZE;
1204 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1205 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1206 if (IS_ERR(ft->g[ft->num_groups]))
1207 goto err_destroy_groups;
1208 ft->num_groups++;
1209
1210
1211 eth_zero_addr(mc_dmac);
1212 mc_dmac[0] = 0x01;
1213 MLX5_SET_CFG(in, start_flow_index, ix);
1214 ix += MLX5E_L2_GROUP3_SIZE;
1215 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1216 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1217 if (IS_ERR(ft->g[ft->num_groups]))
1218 goto err_destroy_groups;
1219 ft->num_groups++;
1220
1221 kvfree(in);
1222 return 0;
1223
1224err_destroy_groups:
1225 err = PTR_ERR(ft->g[ft->num_groups]);
1226 ft->g[ft->num_groups] = NULL;
1227 mlx5e_destroy_groups(ft);
1228 kvfree(in);
1229
1230 return err;
1231}
1232
1233static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1234{
1235 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1236}
1237
1238static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1239{
1240 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1241 struct mlx5e_flow_table *ft = &l2_table->ft;
1242 struct mlx5_flow_table_attr ft_attr = {};
1243 int err;
1244
1245 ft->num_groups = 0;
1246
1247 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1248 ft_attr.level = MLX5E_L2_FT_LEVEL;
1249 ft_attr.prio = MLX5E_NIC_PRIO;
1250
1251 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1252 if (IS_ERR(ft->t)) {
1253 err = PTR_ERR(ft->t);
1254 ft->t = NULL;
1255 return err;
1256 }
1257
1258 err = mlx5e_create_l2_table_groups(l2_table);
1259 if (err)
1260 goto err_destroy_flow_table;
1261
1262 return 0;
1263
1264err_destroy_flow_table:
1265 mlx5_destroy_flow_table(ft->t);
1266 ft->t = NULL;
1267
1268 return err;
1269}
1270
1271#define MLX5E_NUM_VLAN_GROUPS 3
1272#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1273#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1274#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1275#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1276 MLX5E_VLAN_GROUP1_SIZE +\
1277 MLX5E_VLAN_GROUP2_SIZE)
1278
1279static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1280 int inlen)
1281{
1282 int err;
1283 int ix = 0;
1284 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1285
1286 memset(in, 0, inlen);
1287 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1288 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1289 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1290 MLX5_SET_CFG(in, start_flow_index, ix);
1291 ix += MLX5E_VLAN_GROUP0_SIZE;
1292 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1293 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1294 if (IS_ERR(ft->g[ft->num_groups]))
1295 goto err_destroy_groups;
1296 ft->num_groups++;
1297
1298 memset(in, 0, inlen);
1299 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1300 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1301 MLX5_SET_CFG(in, start_flow_index, ix);
1302 ix += MLX5E_VLAN_GROUP1_SIZE;
1303 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1304 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1305 if (IS_ERR(ft->g[ft->num_groups]))
1306 goto err_destroy_groups;
1307 ft->num_groups++;
1308
1309 memset(in, 0, inlen);
1310 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1311 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1312 MLX5_SET_CFG(in, start_flow_index, ix);
1313 ix += MLX5E_VLAN_GROUP2_SIZE;
1314 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1315 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1316 if (IS_ERR(ft->g[ft->num_groups]))
1317 goto err_destroy_groups;
1318 ft->num_groups++;
1319
1320 return 0;
1321
1322err_destroy_groups:
1323 err = PTR_ERR(ft->g[ft->num_groups]);
1324 ft->g[ft->num_groups] = NULL;
1325 mlx5e_destroy_groups(ft);
1326
1327 return err;
1328}
1329
1330static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1331{
1332 u32 *in;
1333 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1334 int err;
1335
1336 in = kvzalloc(inlen, GFP_KERNEL);
1337 if (!in)
1338 return -ENOMEM;
1339
1340 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1341
1342 kvfree(in);
1343 return err;
1344}
1345
1346static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1347{
1348 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1349 struct mlx5_flow_table_attr ft_attr = {};
1350 int err;
1351
1352 ft->num_groups = 0;
1353
1354 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1355 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1356 ft_attr.prio = MLX5E_NIC_PRIO;
1357
1358 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1359
1360 if (IS_ERR(ft->t)) {
1361 err = PTR_ERR(ft->t);
1362 ft->t = NULL;
1363 return err;
1364 }
1365 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1366 if (!ft->g) {
1367 err = -ENOMEM;
1368 goto err_destroy_vlan_table;
1369 }
1370
1371 err = mlx5e_create_vlan_table_groups(ft);
1372 if (err)
1373 goto err_free_g;
1374
1375 mlx5e_add_vlan_rules(priv);
1376
1377 return 0;
1378
1379err_free_g:
1380 kfree(ft->g);
1381err_destroy_vlan_table:
1382 mlx5_destroy_flow_table(ft->t);
1383 ft->t = NULL;
1384
1385 return err;
1386}
1387
1388static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1389{
1390 mlx5e_del_vlan_rules(priv);
1391 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1392}
1393
1394int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1395{
1396 int err;
1397
1398 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1399 MLX5_FLOW_NAMESPACE_KERNEL);
1400
1401 if (!priv->fs.ns)
1402 return -EOPNOTSUPP;
1403
1404 err = mlx5e_arfs_create_tables(priv);
1405 if (err) {
1406 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1407 err);
1408 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1409 }
1410
1411 err = mlx5e_create_inner_ttc_table(priv);
1412 if (err) {
1413 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1414 err);
1415 goto err_destroy_arfs_tables;
1416 }
1417
1418 err = mlx5e_create_ttc_table(priv);
1419 if (err) {
1420 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1421 err);
1422 goto err_destroy_inner_ttc_table;
1423 }
1424
1425 err = mlx5e_create_l2_table(priv);
1426 if (err) {
1427 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1428 err);
1429 goto err_destroy_ttc_table;
1430 }
1431
1432 err = mlx5e_create_vlan_table(priv);
1433 if (err) {
1434 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1435 err);
1436 goto err_destroy_l2_table;
1437 }
1438
1439 mlx5e_ethtool_init_steering(priv);
1440
1441 return 0;
1442
1443err_destroy_l2_table:
1444 mlx5e_destroy_l2_table(priv);
1445err_destroy_ttc_table:
1446 mlx5e_destroy_ttc_table(priv);
1447err_destroy_inner_ttc_table:
1448 mlx5e_destroy_inner_ttc_table(priv);
1449err_destroy_arfs_tables:
1450 mlx5e_arfs_destroy_tables(priv);
1451
1452 return err;
1453}
1454
1455void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1456{
1457 mlx5e_destroy_vlan_table(priv);
1458 mlx5e_destroy_l2_table(priv);
1459 mlx5e_destroy_ttc_table(priv);
1460 mlx5e_destroy_inner_ttc_table(priv);
1461 mlx5e_arfs_destroy_tables(priv);
1462 mlx5e_ethtool_cleanup_steering(priv);
1463}
1464