1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <net/flow_dissector.h>
34#include <net/flow_offload.h>
35#include <net/sch_generic.h>
36#include <net/pkt_cls.h>
37#include <net/tc_act/tc_gact.h>
38#include <net/tc_act/tc_skbedit.h>
39#include <linux/mlx5/fs.h>
40#include <linux/mlx5/device.h>
41#include <linux/rhashtable.h>
42#include <linux/refcount.h>
43#include <linux/completion.h>
44#include <net/tc_act/tc_mirred.h>
45#include <net/tc_act/tc_vlan.h>
46#include <net/tc_act/tc_tunnel_key.h>
47#include <net/tc_act/tc_pedit.h>
48#include <net/tc_act/tc_csum.h>
49#include <net/tc_act/tc_mpls.h>
50#include <net/psample.h>
51#include <net/arp.h>
52#include <net/ipv6_stubs.h>
53#include <net/bareudp.h>
54#include <net/bonding.h>
55#include "en.h"
56#include "en/tc/post_act.h"
57#include "en_rep.h"
58#include "en/rep/tc.h"
59#include "en/rep/neigh.h"
60#include "en_tc.h"
61#include "eswitch.h"
62#include "fs_core.h"
63#include "en/port.h"
64#include "en/tc_tun.h"
65#include "en/mapping.h"
66#include "en/tc_ct.h"
67#include "en/mod_hdr.h"
68#include "en/tc_tun_encap.h"
69#include "en/tc/sample.h"
70#include "lib/devcom.h"
71#include "lib/geneve.h"
72#include "lib/fs_chains.h"
73#include "diag/en_tc_tracepoint.h"
74#include <asm/div64.h>
75#include "lag.h"
76#include "lag_mp.h"
77
78#define nic_chains(priv) ((priv)->fs.tc.chains)
79#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
80
81#define MLX5E_TC_TABLE_NUM_GROUPS 4
82#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
83
84struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
85 [CHAIN_TO_REG] = {
86 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
87 .moffset = 0,
88 .mlen = 16,
89 },
90 [VPORT_TO_REG] = {
91 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
92 .moffset = 16,
93 .mlen = 16,
94 },
95 [TUNNEL_TO_REG] = {
96 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
97 .moffset = 8,
98 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
99 .soffset = MLX5_BYTE_OFF(fte_match_param,
100 misc_parameters_2.metadata_reg_c_1),
101 },
102 [ZONE_TO_REG] = zone_to_reg_ct,
103 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
104 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
105 [MARK_TO_REG] = mark_to_reg_ct,
106 [LABELS_TO_REG] = labels_to_reg_ct,
107 [FTEID_TO_REG] = fteid_to_reg_ct,
108
109
110
111
112 [NIC_CHAIN_TO_REG] = {
113 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
114 .moffset = 0,
115 .mlen = 16,
116 },
117 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
118};
119
120
121
122
123
124
125
126static struct lock_class_key tc_ht_lock_key;
127
128static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
129
130void
131mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
132 enum mlx5e_tc_attr_to_reg type,
133 u32 val,
134 u32 mask)
135{
136 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
137 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
138 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
139 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
140 u32 max_mask = GENMASK(match_len - 1, 0);
141 __be32 curr_mask_be, curr_val_be;
142 u32 curr_mask, curr_val;
143
144 fmask = headers_c + soffset;
145 fval = headers_v + soffset;
146
147 memcpy(&curr_mask_be, fmask, 4);
148 memcpy(&curr_val_be, fval, 4);
149
150 curr_mask = be32_to_cpu(curr_mask_be);
151 curr_val = be32_to_cpu(curr_val_be);
152
153
154 WARN_ON(mask > max_mask);
155 mask <<= moffset;
156 val <<= moffset;
157 max_mask <<= moffset;
158
159
160 curr_mask &= ~max_mask;
161 curr_val &= ~max_mask;
162
163
164 curr_mask |= mask;
165 curr_val |= val;
166
167
168 curr_mask_be = cpu_to_be32(curr_mask);
169 curr_val_be = cpu_to_be32(curr_val);
170
171 memcpy(fmask, &curr_mask_be, 4);
172 memcpy(fval, &curr_val_be, 4);
173
174 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
175}
176
177void
178mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
179 enum mlx5e_tc_attr_to_reg type,
180 u32 *val,
181 u32 *mask)
182{
183 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
184 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
185 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
186 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
187 u32 max_mask = GENMASK(match_len - 1, 0);
188 __be32 curr_mask_be, curr_val_be;
189 u32 curr_mask, curr_val;
190
191 fmask = headers_c + soffset;
192 fval = headers_v + soffset;
193
194 memcpy(&curr_mask_be, fmask, 4);
195 memcpy(&curr_val_be, fval, 4);
196
197 curr_mask = be32_to_cpu(curr_mask_be);
198 curr_val = be32_to_cpu(curr_val_be);
199
200 *mask = (curr_mask >> moffset) & max_mask;
201 *val = (curr_val >> moffset) & max_mask;
202}
203
204int
205mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
206 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
207 enum mlx5_flow_namespace_type ns,
208 enum mlx5e_tc_attr_to_reg type,
209 u32 data)
210{
211 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
212 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
213 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
214 char *modact;
215 int err;
216
217 err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
218 if (err)
219 return err;
220
221 modact = mod_hdr_acts->actions +
222 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
223
224
225 if (mlen == 32)
226 mlen = 0;
227
228 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
229 MLX5_SET(set_action_in, modact, field, mfield);
230 MLX5_SET(set_action_in, modact, offset, moffset);
231 MLX5_SET(set_action_in, modact, length, mlen);
232 MLX5_SET(set_action_in, modact, data, data);
233 err = mod_hdr_acts->num_actions;
234 mod_hdr_acts->num_actions++;
235
236 return err;
237}
238
239struct mlx5e_tc_int_port_priv *
240mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
241{
242 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
243 struct mlx5_rep_uplink_priv *uplink_priv;
244 struct mlx5e_rep_priv *uplink_rpriv;
245
246 if (is_mdev_switchdev_mode(priv->mdev)) {
247 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
248 uplink_priv = &uplink_rpriv->uplink_priv;
249
250 return uplink_priv->int_port_priv;
251 }
252
253 return NULL;
254}
255
256static struct mlx5_tc_ct_priv *
257get_ct_priv(struct mlx5e_priv *priv)
258{
259 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
260 struct mlx5_rep_uplink_priv *uplink_priv;
261 struct mlx5e_rep_priv *uplink_rpriv;
262
263 if (is_mdev_switchdev_mode(priv->mdev)) {
264 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
265 uplink_priv = &uplink_rpriv->uplink_priv;
266
267 return uplink_priv->ct_priv;
268 }
269
270 return priv->fs.tc.ct;
271}
272
273#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
274static struct mlx5e_tc_psample *
275get_sample_priv(struct mlx5e_priv *priv)
276{
277 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
278 struct mlx5_rep_uplink_priv *uplink_priv;
279 struct mlx5e_rep_priv *uplink_rpriv;
280
281 if (is_mdev_switchdev_mode(priv->mdev)) {
282 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
283 uplink_priv = &uplink_rpriv->uplink_priv;
284
285 return uplink_priv->tc_psample;
286 }
287
288 return NULL;
289}
290#endif
291
292struct mlx5_flow_handle *
293mlx5_tc_rule_insert(struct mlx5e_priv *priv,
294 struct mlx5_flow_spec *spec,
295 struct mlx5_flow_attr *attr)
296{
297 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
298
299 if (is_mdev_switchdev_mode(priv->mdev))
300 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
301
302 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
303}
304
305void
306mlx5_tc_rule_delete(struct mlx5e_priv *priv,
307 struct mlx5_flow_handle *rule,
308 struct mlx5_flow_attr *attr)
309{
310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
311
312 if (is_mdev_switchdev_mode(priv->mdev)) {
313 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
314
315 return;
316 }
317
318 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
319}
320
321int
322mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
323 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
324 enum mlx5_flow_namespace_type ns,
325 enum mlx5e_tc_attr_to_reg type,
326 u32 data)
327{
328 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
329
330 return ret < 0 ? ret : 0;
331}
332
333void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
334 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
335 enum mlx5e_tc_attr_to_reg type,
336 int act_id, u32 data)
337{
338 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
339 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
340 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
341 char *modact;
342
343 modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
344
345
346 if (mlen == 32)
347 mlen = 0;
348
349 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
350 MLX5_SET(set_action_in, modact, field, mfield);
351 MLX5_SET(set_action_in, modact, offset, moffset);
352 MLX5_SET(set_action_in, modact, length, mlen);
353 MLX5_SET(set_action_in, modact, data, data);
354}
355
356struct mlx5e_hairpin {
357 struct mlx5_hairpin *pair;
358
359 struct mlx5_core_dev *func_mdev;
360 struct mlx5e_priv *func_priv;
361 u32 tdn;
362 struct mlx5e_tir direct_tir;
363
364 int num_channels;
365 struct mlx5e_rqt indir_rqt;
366 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
367 struct mlx5_ttc_table *ttc;
368};
369
370struct mlx5e_hairpin_entry {
371
372 struct hlist_node hairpin_hlist;
373
374
375 spinlock_t flows_lock;
376
377 struct list_head flows;
378
379
380
381 struct list_head dead_peer_wait_list;
382
383 u16 peer_vhca_id;
384 u8 prio;
385 struct mlx5e_hairpin *hp;
386 refcount_t refcnt;
387 struct completion res_ready;
388};
389
390static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
391 struct mlx5e_tc_flow *flow);
392
393struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
394{
395 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
396 return ERR_PTR(-EINVAL);
397 return flow;
398}
399
400void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
401{
402 if (refcount_dec_and_test(&flow->refcnt)) {
403 mlx5e_tc_del_flow(priv, flow);
404 kfree_rcu(flow, rcu_head);
405 }
406}
407
408bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
409{
410 return flow_flag_test(flow, ESWITCH);
411}
412
413static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
414{
415 return flow_flag_test(flow, FT);
416}
417
418bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
419{
420 return flow_flag_test(flow, OFFLOADED);
421}
422
423static int get_flow_name_space(struct mlx5e_tc_flow *flow)
424{
425 return mlx5e_is_eswitch_flow(flow) ?
426 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
427}
428
429static struct mod_hdr_tbl *
430get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
431{
432 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
433
434 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
435 &esw->offloads.mod_hdr :
436 &priv->fs.tc.mod_hdr;
437}
438
439static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
440 struct mlx5e_tc_flow *flow,
441 struct mlx5e_tc_flow_parse_attr *parse_attr)
442{
443 struct mlx5_modify_hdr *modify_hdr;
444 struct mlx5e_mod_hdr_handle *mh;
445
446 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
447 get_flow_name_space(flow),
448 &parse_attr->mod_hdr_acts);
449 if (IS_ERR(mh))
450 return PTR_ERR(mh);
451
452 modify_hdr = mlx5e_mod_hdr_get(mh);
453 flow->attr->modify_hdr = modify_hdr;
454 flow->mh = mh;
455
456 return 0;
457}
458
459static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
460 struct mlx5e_tc_flow *flow)
461{
462
463 if (!flow->mh)
464 return;
465
466 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
467 flow->mh);
468 flow->mh = NULL;
469}
470
471static
472struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
473{
474 struct mlx5_core_dev *mdev;
475 struct net_device *netdev;
476 struct mlx5e_priv *priv;
477
478 netdev = dev_get_by_index(net, ifindex);
479 if (!netdev)
480 return ERR_PTR(-ENODEV);
481
482 priv = netdev_priv(netdev);
483 mdev = priv->mdev;
484 dev_put(netdev);
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499 return mdev;
500}
501
502static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
503{
504 struct mlx5e_tir_builder *builder;
505 int err;
506
507 builder = mlx5e_tir_builder_alloc(false);
508 if (!builder)
509 return -ENOMEM;
510
511 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
512 if (err)
513 goto out;
514
515 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
516 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
517 if (err)
518 goto create_tir_err;
519
520out:
521 mlx5e_tir_builder_free(builder);
522 return err;
523
524create_tir_err:
525 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
526
527 goto out;
528}
529
530static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
531{
532 mlx5e_tir_destroy(&hp->direct_tir);
533 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
534}
535
536static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
537{
538 struct mlx5e_priv *priv = hp->func_priv;
539 struct mlx5_core_dev *mdev = priv->mdev;
540 struct mlx5e_rss_params_indir *indir;
541 int err;
542
543 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
544 if (!indir)
545 return -ENOMEM;
546
547 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
548 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
549 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
550 indir);
551
552 kvfree(indir);
553 return err;
554}
555
556static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
557{
558 struct mlx5e_priv *priv = hp->func_priv;
559 struct mlx5e_rss_params_hash rss_hash;
560 enum mlx5_traffic_types tt, max_tt;
561 struct mlx5e_tir_builder *builder;
562 int err = 0;
563
564 builder = mlx5e_tir_builder_alloc(false);
565 if (!builder)
566 return -ENOMEM;
567
568 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
569
570 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
571 struct mlx5e_rss_params_traffic_type rss_tt;
572
573 rss_tt = mlx5e_rss_get_default_tt_config(tt);
574
575 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
576 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
577 false);
578 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
579
580 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
581 if (err) {
582 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
583 goto err_destroy_tirs;
584 }
585
586 mlx5e_tir_builder_clear(builder);
587 }
588
589out:
590 mlx5e_tir_builder_free(builder);
591 return err;
592
593err_destroy_tirs:
594 max_tt = tt;
595 for (tt = 0; tt < max_tt; tt++)
596 mlx5e_tir_destroy(&hp->indir_tir[tt]);
597
598 goto out;
599}
600
601static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
602{
603 int tt;
604
605 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
606 mlx5e_tir_destroy(&hp->indir_tir[tt]);
607}
608
609static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
610 struct ttc_params *ttc_params)
611{
612 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
613 int tt;
614
615 memset(ttc_params, 0, sizeof(*ttc_params));
616
617 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
618 MLX5_FLOW_NAMESPACE_KERNEL);
619 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
620 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
621 ttc_params->dests[tt].tir_num =
622 tt == MLX5_TT_ANY ?
623 mlx5e_tir_get_tirn(&hp->direct_tir) :
624 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
625 }
626
627 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
628 ft_attr->prio = MLX5E_TC_PRIO;
629}
630
631static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
632{
633 struct mlx5e_priv *priv = hp->func_priv;
634 struct ttc_params ttc_params;
635 int err;
636
637 err = mlx5e_hairpin_create_indirect_rqt(hp);
638 if (err)
639 return err;
640
641 err = mlx5e_hairpin_create_indirect_tirs(hp);
642 if (err)
643 goto err_create_indirect_tirs;
644
645 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
646 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
647 if (IS_ERR(hp->ttc)) {
648 err = PTR_ERR(hp->ttc);
649 goto err_create_ttc_table;
650 }
651
652 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
653 hp->num_channels,
654 mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
655
656 return 0;
657
658err_create_ttc_table:
659 mlx5e_hairpin_destroy_indirect_tirs(hp);
660err_create_indirect_tirs:
661 mlx5e_rqt_destroy(&hp->indir_rqt);
662
663 return err;
664}
665
666static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
667{
668 mlx5_destroy_ttc_table(hp->ttc);
669 mlx5e_hairpin_destroy_indirect_tirs(hp);
670 mlx5e_rqt_destroy(&hp->indir_rqt);
671}
672
673static struct mlx5e_hairpin *
674mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
675 int peer_ifindex)
676{
677 struct mlx5_core_dev *func_mdev, *peer_mdev;
678 struct mlx5e_hairpin *hp;
679 struct mlx5_hairpin *pair;
680 int err;
681
682 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
683 if (!hp)
684 return ERR_PTR(-ENOMEM);
685
686 func_mdev = priv->mdev;
687 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
688 if (IS_ERR(peer_mdev)) {
689 err = PTR_ERR(peer_mdev);
690 goto create_pair_err;
691 }
692
693 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
694 if (IS_ERR(pair)) {
695 err = PTR_ERR(pair);
696 goto create_pair_err;
697 }
698 hp->pair = pair;
699 hp->func_mdev = func_mdev;
700 hp->func_priv = priv;
701 hp->num_channels = params->num_channels;
702
703 err = mlx5e_hairpin_create_transport(hp);
704 if (err)
705 goto create_transport_err;
706
707 if (hp->num_channels > 1) {
708 err = mlx5e_hairpin_rss_init(hp);
709 if (err)
710 goto rss_init_err;
711 }
712
713 return hp;
714
715rss_init_err:
716 mlx5e_hairpin_destroy_transport(hp);
717create_transport_err:
718 mlx5_core_hairpin_destroy(hp->pair);
719create_pair_err:
720 kfree(hp);
721 return ERR_PTR(err);
722}
723
724static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
725{
726 if (hp->num_channels > 1)
727 mlx5e_hairpin_rss_cleanup(hp);
728 mlx5e_hairpin_destroy_transport(hp);
729 mlx5_core_hairpin_destroy(hp->pair);
730 kvfree(hp);
731}
732
733static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
734{
735 return (peer_vhca_id << 16 | prio);
736}
737
738static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
739 u16 peer_vhca_id, u8 prio)
740{
741 struct mlx5e_hairpin_entry *hpe;
742 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
743
744 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
745 hairpin_hlist, hash_key) {
746 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
747 refcount_inc(&hpe->refcnt);
748 return hpe;
749 }
750 }
751
752 return NULL;
753}
754
755static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
756 struct mlx5e_hairpin_entry *hpe)
757{
758
759 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
760 return;
761 hash_del(&hpe->hairpin_hlist);
762 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
763
764 if (!IS_ERR_OR_NULL(hpe->hp)) {
765 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
766 dev_name(hpe->hp->pair->peer_mdev->device));
767
768 mlx5e_hairpin_destroy(hpe->hp);
769 }
770
771 WARN_ON(!list_empty(&hpe->flows));
772 kfree(hpe);
773}
774
775#define UNKNOWN_MATCH_PRIO 8
776
777static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
778 struct mlx5_flow_spec *spec, u8 *match_prio,
779 struct netlink_ext_ack *extack)
780{
781 void *headers_c, *headers_v;
782 u8 prio_val, prio_mask = 0;
783 bool vlan_present;
784
785#ifdef CONFIG_MLX5_CORE_EN_DCB
786 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
787 NL_SET_ERR_MSG_MOD(extack,
788 "only PCP trust state supported for hairpin");
789 return -EOPNOTSUPP;
790 }
791#endif
792 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
793 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
794
795 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
796 if (vlan_present) {
797 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
798 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
799 }
800
801 if (!vlan_present || !prio_mask) {
802 prio_val = UNKNOWN_MATCH_PRIO;
803 } else if (prio_mask != 0x7) {
804 NL_SET_ERR_MSG_MOD(extack,
805 "masked priority match not supported for hairpin");
806 return -EOPNOTSUPP;
807 }
808
809 *match_prio = prio_val;
810 return 0;
811}
812
813static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
814 struct mlx5e_tc_flow *flow,
815 struct mlx5e_tc_flow_parse_attr *parse_attr,
816 struct netlink_ext_ack *extack)
817{
818 int peer_ifindex = parse_attr->mirred_ifindex[0];
819 struct mlx5_hairpin_params params;
820 struct mlx5_core_dev *peer_mdev;
821 struct mlx5e_hairpin_entry *hpe;
822 struct mlx5e_hairpin *hp;
823 u64 link_speed64;
824 u32 link_speed;
825 u8 match_prio;
826 u16 peer_id;
827 int err;
828
829 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
830 if (IS_ERR(peer_mdev)) {
831 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
832 return PTR_ERR(peer_mdev);
833 }
834
835 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
836 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
837 return -EOPNOTSUPP;
838 }
839
840 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
841 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
842 extack);
843 if (err)
844 return err;
845
846 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
847 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
848 if (hpe) {
849 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
850 wait_for_completion(&hpe->res_ready);
851
852 if (IS_ERR(hpe->hp)) {
853 err = -EREMOTEIO;
854 goto out_err;
855 }
856 goto attach_flow;
857 }
858
859 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
860 if (!hpe) {
861 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
862 return -ENOMEM;
863 }
864
865 spin_lock_init(&hpe->flows_lock);
866 INIT_LIST_HEAD(&hpe->flows);
867 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
868 hpe->peer_vhca_id = peer_id;
869 hpe->prio = match_prio;
870 refcount_set(&hpe->refcnt, 1);
871 init_completion(&hpe->res_ready);
872
873 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
874 hash_hairpin_info(peer_id, match_prio));
875 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
876
877 params.log_data_size = 16;
878 params.log_data_size = min_t(u8, params.log_data_size,
879 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
880 params.log_data_size = max_t(u8, params.log_data_size,
881 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
882
883 params.log_num_packets = params.log_data_size -
884 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
885 params.log_num_packets = min_t(u8, params.log_num_packets,
886 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
887
888 params.q_counter = priv->q_counter;
889
890 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
891 link_speed = max_t(u32, link_speed, 50000);
892 link_speed64 = link_speed;
893 do_div(link_speed64, 50000);
894 params.num_channels = link_speed64;
895
896 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
897 hpe->hp = hp;
898 complete_all(&hpe->res_ready);
899 if (IS_ERR(hp)) {
900 err = PTR_ERR(hp);
901 goto out_err;
902 }
903
904 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
905 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
906 dev_name(hp->pair->peer_mdev->device),
907 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
908
909attach_flow:
910 if (hpe->hp->num_channels > 1) {
911 flow_flag_set(flow, HAIRPIN_RSS);
912 flow->attr->nic_attr->hairpin_ft =
913 mlx5_get_ttc_flow_table(hpe->hp->ttc);
914 } else {
915 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
916 }
917
918 flow->hpe = hpe;
919 spin_lock(&hpe->flows_lock);
920 list_add(&flow->hairpin, &hpe->flows);
921 spin_unlock(&hpe->flows_lock);
922
923 return 0;
924
925out_err:
926 mlx5e_hairpin_put(priv, hpe);
927 return err;
928}
929
930static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
931 struct mlx5e_tc_flow *flow)
932{
933
934 if (!flow->hpe)
935 return;
936
937 spin_lock(&flow->hpe->flows_lock);
938 list_del(&flow->hairpin);
939 spin_unlock(&flow->hpe->flows_lock);
940
941 mlx5e_hairpin_put(priv, flow->hpe);
942 flow->hpe = NULL;
943}
944
945struct mlx5_flow_handle *
946mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
947 struct mlx5_flow_spec *spec,
948 struct mlx5_flow_attr *attr)
949{
950 struct mlx5_flow_context *flow_context = &spec->flow_context;
951 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
952 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
953 struct mlx5e_tc_table *tc = &priv->fs.tc;
954 struct mlx5_flow_destination dest[2] = {};
955 struct mlx5_flow_act flow_act = {
956 .action = attr->action,
957 .flags = FLOW_ACT_NO_APPEND,
958 };
959 struct mlx5_flow_handle *rule;
960 struct mlx5_flow_table *ft;
961 int dest_ix = 0;
962
963 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
964 flow_context->flow_tag = nic_attr->flow_tag;
965
966 if (attr->dest_ft) {
967 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
968 dest[dest_ix].ft = attr->dest_ft;
969 dest_ix++;
970 } else if (nic_attr->hairpin_ft) {
971 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
972 dest[dest_ix].ft = nic_attr->hairpin_ft;
973 dest_ix++;
974 } else if (nic_attr->hairpin_tirn) {
975 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
976 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
977 dest_ix++;
978 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
979 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
980 if (attr->dest_chain) {
981 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
982 attr->dest_chain, 1,
983 MLX5E_TC_FT_LEVEL);
984 if (IS_ERR(dest[dest_ix].ft))
985 return ERR_CAST(dest[dest_ix].ft);
986 } else {
987 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
988 }
989 dest_ix++;
990 }
991
992 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
993 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
994 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
995
996 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
997 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
998 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
999 dest_ix++;
1000 }
1001
1002 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1003 flow_act.modify_hdr = attr->modify_hdr;
1004
1005 mutex_lock(&tc->t_lock);
1006 if (IS_ERR_OR_NULL(tc->t)) {
1007
1008 tc->t =
1009 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1010
1011 if (IS_ERR(tc->t)) {
1012 mutex_unlock(&tc->t_lock);
1013 netdev_err(priv->netdev,
1014 "Failed to create tc offload table\n");
1015 rule = ERR_CAST(priv->fs.tc.t);
1016 goto err_ft_get;
1017 }
1018 }
1019 mutex_unlock(&tc->t_lock);
1020
1021 if (attr->chain || attr->prio)
1022 ft = mlx5_chains_get_table(nic_chains,
1023 attr->chain, attr->prio,
1024 MLX5E_TC_FT_LEVEL);
1025 else
1026 ft = attr->ft;
1027
1028 if (IS_ERR(ft)) {
1029 rule = ERR_CAST(ft);
1030 goto err_ft_get;
1031 }
1032
1033 if (attr->outer_match_level != MLX5_MATCH_NONE)
1034 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1035
1036 rule = mlx5_add_flow_rules(ft, spec,
1037 &flow_act, dest, dest_ix);
1038 if (IS_ERR(rule))
1039 goto err_rule;
1040
1041 return rule;
1042
1043err_rule:
1044 if (attr->chain || attr->prio)
1045 mlx5_chains_put_table(nic_chains,
1046 attr->chain, attr->prio,
1047 MLX5E_TC_FT_LEVEL);
1048err_ft_get:
1049 if (attr->dest_chain)
1050 mlx5_chains_put_table(nic_chains,
1051 attr->dest_chain, 1,
1052 MLX5E_TC_FT_LEVEL);
1053
1054 return ERR_CAST(rule);
1055}
1056
1057static int
1058mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1059 struct mlx5e_tc_flow_parse_attr *parse_attr,
1060 struct mlx5e_tc_flow *flow,
1061 struct netlink_ext_ack *extack)
1062{
1063 struct mlx5_flow_attr *attr = flow->attr;
1064 struct mlx5_core_dev *dev = priv->mdev;
1065 struct mlx5_fc *counter = NULL;
1066 int err;
1067
1068 if (flow_flag_test(flow, HAIRPIN)) {
1069 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1070 if (err)
1071 return err;
1072 }
1073
1074 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1075 counter = mlx5_fc_create(dev, true);
1076 if (IS_ERR(counter))
1077 return PTR_ERR(counter);
1078
1079 attr->counter = counter;
1080 }
1081
1082 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1083 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1084 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1085 if (err)
1086 return err;
1087 }
1088
1089 if (flow_flag_test(flow, CT))
1090 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1091 attr, &parse_attr->mod_hdr_acts);
1092 else
1093 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1094 attr);
1095
1096 return PTR_ERR_OR_ZERO(flow->rule[0]);
1097}
1098
1099void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1100 struct mlx5_flow_handle *rule,
1101 struct mlx5_flow_attr *attr)
1102{
1103 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1104
1105 mlx5_del_flow_rules(rule);
1106
1107 if (attr->chain || attr->prio)
1108 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1109 MLX5E_TC_FT_LEVEL);
1110
1111 if (attr->dest_chain)
1112 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1113 MLX5E_TC_FT_LEVEL);
1114}
1115
1116static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1117 struct mlx5e_tc_flow *flow)
1118{
1119 struct mlx5_flow_attr *attr = flow->attr;
1120 struct mlx5e_tc_table *tc = &priv->fs.tc;
1121
1122 flow_flag_clear(flow, OFFLOADED);
1123
1124 if (flow_flag_test(flow, CT))
1125 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1126 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1127 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1128
1129
1130
1131
1132 mutex_lock(&priv->fs.tc.t_lock);
1133 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1134 !IS_ERR_OR_NULL(tc->t)) {
1135 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1136 priv->fs.tc.t = NULL;
1137 }
1138 mutex_unlock(&priv->fs.tc.t_lock);
1139
1140 kvfree(attr->parse_attr);
1141
1142 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1143 mlx5e_detach_mod_hdr(priv, flow);
1144
1145 mlx5_fc_destroy(priv->mdev, attr->counter);
1146
1147 if (flow_flag_test(flow, HAIRPIN))
1148 mlx5e_hairpin_flow_del(priv, flow);
1149
1150 kfree(flow->attr);
1151}
1152
1153struct mlx5_flow_handle *
1154mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1155 struct mlx5e_tc_flow *flow,
1156 struct mlx5_flow_spec *spec,
1157 struct mlx5_flow_attr *attr)
1158{
1159 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1160 struct mlx5_flow_handle *rule;
1161
1162 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1163 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1164
1165 if (flow_flag_test(flow, CT)) {
1166 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1167
1168 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1169 flow, spec, attr,
1170 mod_hdr_acts);
1171#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1172 } else if (flow_flag_test(flow, SAMPLE)) {
1173 rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
1174 mlx5e_tc_get_flow_tun_id(flow));
1175#endif
1176 } else {
1177 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1178 }
1179
1180 if (IS_ERR(rule))
1181 return rule;
1182
1183 if (attr->esw_attr->split_count) {
1184 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1185 if (IS_ERR(flow->rule[1])) {
1186 if (flow_flag_test(flow, CT))
1187 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1188 else
1189 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1190 return flow->rule[1];
1191 }
1192 }
1193
1194 return rule;
1195}
1196
1197void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1198 struct mlx5e_tc_flow *flow,
1199 struct mlx5_flow_attr *attr)
1200{
1201 flow_flag_clear(flow, OFFLOADED);
1202
1203 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1204 goto offload_rule_0;
1205
1206 if (attr->esw_attr->split_count)
1207 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1208
1209 if (flow_flag_test(flow, CT))
1210 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1211#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1212 else if (flow_flag_test(flow, SAMPLE))
1213 mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1214#endif
1215 else
1216offload_rule_0:
1217 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1218}
1219
1220struct mlx5_flow_handle *
1221mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1222 struct mlx5e_tc_flow *flow,
1223 struct mlx5_flow_spec *spec)
1224{
1225 struct mlx5_flow_attr *slow_attr;
1226 struct mlx5_flow_handle *rule;
1227
1228 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1229 if (!slow_attr)
1230 return ERR_PTR(-ENOMEM);
1231
1232 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1233 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1234 slow_attr->esw_attr->split_count = 0;
1235 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1236
1237 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1238 if (!IS_ERR(rule))
1239 flow_flag_set(flow, SLOW);
1240
1241 kfree(slow_attr);
1242
1243 return rule;
1244}
1245
1246void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1247 struct mlx5e_tc_flow *flow)
1248{
1249 struct mlx5_flow_attr *slow_attr;
1250
1251 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1252 if (!slow_attr) {
1253 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1254 return;
1255 }
1256
1257 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1258 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1259 slow_attr->esw_attr->split_count = 0;
1260 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1261 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1262 flow_flag_clear(flow, SLOW);
1263 kfree(slow_attr);
1264}
1265
1266
1267
1268
1269static void unready_flow_add(struct mlx5e_tc_flow *flow,
1270 struct list_head *unready_flows)
1271{
1272 flow_flag_set(flow, NOT_READY);
1273 list_add_tail(&flow->unready, unready_flows);
1274}
1275
1276
1277
1278
1279static void unready_flow_del(struct mlx5e_tc_flow *flow)
1280{
1281 list_del(&flow->unready);
1282 flow_flag_clear(flow, NOT_READY);
1283}
1284
1285static void add_unready_flow(struct mlx5e_tc_flow *flow)
1286{
1287 struct mlx5_rep_uplink_priv *uplink_priv;
1288 struct mlx5e_rep_priv *rpriv;
1289 struct mlx5_eswitch *esw;
1290
1291 esw = flow->priv->mdev->priv.eswitch;
1292 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1293 uplink_priv = &rpriv->uplink_priv;
1294
1295 mutex_lock(&uplink_priv->unready_flows_lock);
1296 unready_flow_add(flow, &uplink_priv->unready_flows);
1297 mutex_unlock(&uplink_priv->unready_flows_lock);
1298}
1299
1300static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1301{
1302 struct mlx5_rep_uplink_priv *uplink_priv;
1303 struct mlx5e_rep_priv *rpriv;
1304 struct mlx5_eswitch *esw;
1305
1306 esw = flow->priv->mdev->priv.eswitch;
1307 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1308 uplink_priv = &rpriv->uplink_priv;
1309
1310 mutex_lock(&uplink_priv->unready_flows_lock);
1311 unready_flow_del(flow);
1312 mutex_unlock(&uplink_priv->unready_flows_lock);
1313}
1314
1315static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
1316
1317bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1318{
1319 struct mlx5_core_dev *out_mdev, *route_mdev;
1320 struct mlx5e_priv *out_priv, *route_priv;
1321
1322 out_priv = netdev_priv(out_dev);
1323 out_mdev = out_priv->mdev;
1324 route_priv = netdev_priv(route_dev);
1325 route_mdev = route_priv->mdev;
1326
1327 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1328 route_mdev->coredev_type != MLX5_COREDEV_VF)
1329 return false;
1330
1331 return same_hw_devs(out_priv, route_priv);
1332}
1333
1334int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1335{
1336 struct mlx5e_priv *out_priv, *route_priv;
1337 struct mlx5_devcom *devcom = NULL;
1338 struct mlx5_core_dev *route_mdev;
1339 struct mlx5_eswitch *esw;
1340 u16 vhca_id;
1341 int err;
1342
1343 out_priv = netdev_priv(out_dev);
1344 esw = out_priv->mdev->priv.eswitch;
1345 route_priv = netdev_priv(route_dev);
1346 route_mdev = route_priv->mdev;
1347
1348 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1349 if (mlx5_lag_is_active(out_priv->mdev)) {
1350
1351
1352
1353
1354 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1355 if (err != -ENOENT)
1356 return err;
1357
1358 devcom = out_priv->mdev->priv.devcom;
1359 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1360 if (!esw)
1361 return -ENODEV;
1362 }
1363
1364 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1365 if (devcom)
1366 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1367 return err;
1368}
1369
1370int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1371 struct mlx5e_tc_flow_parse_attr *parse_attr,
1372 struct mlx5e_tc_flow *flow)
1373{
1374 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
1375 struct mlx5_modify_hdr *mod_hdr;
1376
1377 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1378 get_flow_name_space(flow),
1379 mod_hdr_acts->num_actions,
1380 mod_hdr_acts->actions);
1381 if (IS_ERR(mod_hdr))
1382 return PTR_ERR(mod_hdr);
1383
1384 WARN_ON(flow->attr->modify_hdr);
1385 flow->attr->modify_hdr = mod_hdr;
1386
1387 return 0;
1388}
1389
1390static int
1391mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1392 struct mlx5e_tc_flow *flow,
1393 struct netlink_ext_ack *extack)
1394{
1395 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1396 struct mlx5e_tc_flow_parse_attr *parse_attr;
1397 struct mlx5_flow_attr *attr = flow->attr;
1398 bool vf_tun = false, encap_valid = true;
1399 struct net_device *encap_dev = NULL;
1400 struct mlx5_esw_flow_attr *esw_attr;
1401 struct mlx5_fc *counter = NULL;
1402 struct mlx5e_rep_priv *rpriv;
1403 struct mlx5e_priv *out_priv;
1404 u32 max_prio, max_chain;
1405 int err = 0;
1406 int out_index;
1407
1408 parse_attr = attr->parse_attr;
1409 esw_attr = attr->esw_attr;
1410
1411
1412
1413
1414
1415
1416 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1417 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1418 NL_SET_ERR_MSG_MOD(extack,
1419 "Requested chain is out of supported range");
1420 err = -EOPNOTSUPP;
1421 goto err_out;
1422 }
1423
1424 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1425 if (attr->prio > max_prio) {
1426 NL_SET_ERR_MSG_MOD(extack,
1427 "Requested priority is out of supported range");
1428 err = -EOPNOTSUPP;
1429 goto err_out;
1430 }
1431
1432 if (flow_flag_test(flow, TUN_RX)) {
1433 err = mlx5e_attach_decap_route(priv, flow);
1434 if (err)
1435 goto err_out;
1436
1437 if (!attr->chain && esw_attr->int_port &&
1438 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1439
1440
1441
1442
1443
1444
1445
1446 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1447 esw_attr->in_rep->vport);
1448
1449 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1450 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1451 metadata);
1452 if (err)
1453 goto err_out;
1454
1455 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1456 }
1457 }
1458
1459 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1460 err = mlx5e_attach_decap(priv, flow, extack);
1461 if (err)
1462 goto err_out;
1463 }
1464
1465 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1466 struct mlx5e_tc_int_port *int_port;
1467
1468 if (attr->chain) {
1469 NL_SET_ERR_MSG_MOD(extack,
1470 "Internal port rule is only supported on chain 0");
1471 err = -EOPNOTSUPP;
1472 goto err_out;
1473 }
1474
1475 if (attr->dest_chain) {
1476 NL_SET_ERR_MSG_MOD(extack,
1477 "Internal port rule offload doesn't support goto action");
1478 err = -EOPNOTSUPP;
1479 goto err_out;
1480 }
1481
1482 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1483 parse_attr->filter_dev->ifindex,
1484 flow_flag_test(flow, EGRESS) ?
1485 MLX5E_TC_INT_PORT_EGRESS :
1486 MLX5E_TC_INT_PORT_INGRESS);
1487 if (IS_ERR(int_port)) {
1488 err = PTR_ERR(int_port);
1489 goto err_out;
1490 }
1491
1492 esw_attr->int_port = int_port;
1493 }
1494
1495 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1496 struct net_device *out_dev;
1497 int mirred_ifindex;
1498
1499 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1500 continue;
1501
1502 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1503 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1504 if (!out_dev) {
1505 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1506 err = -ENODEV;
1507 goto err_out;
1508 }
1509 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1510 extack, &encap_dev, &encap_valid);
1511 dev_put(out_dev);
1512 if (err)
1513 goto err_out;
1514
1515 if (esw_attr->dests[out_index].flags &
1516 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1517 !esw_attr->dest_int_port)
1518 vf_tun = true;
1519 out_priv = netdev_priv(encap_dev);
1520 rpriv = out_priv->ppriv;
1521 esw_attr->dests[out_index].rep = rpriv->rep;
1522 esw_attr->dests[out_index].mdev = out_priv->mdev;
1523 }
1524
1525 if (vf_tun && esw_attr->out_count > 1) {
1526 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1527 err = -EOPNOTSUPP;
1528 goto err_out;
1529 }
1530
1531 err = mlx5_eswitch_add_vlan_action(esw, attr);
1532 if (err)
1533 goto err_out;
1534
1535 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1536 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1537 if (vf_tun) {
1538 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
1539 if (err)
1540 goto err_out;
1541 } else {
1542 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1543 if (err)
1544 goto err_out;
1545 }
1546 }
1547
1548 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1549 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1550 if (IS_ERR(counter)) {
1551 err = PTR_ERR(counter);
1552 goto err_out;
1553 }
1554
1555 attr->counter = counter;
1556 }
1557
1558
1559
1560
1561
1562 if (!encap_valid)
1563 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1564 else
1565 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1566
1567 if (IS_ERR(flow->rule[0])) {
1568 err = PTR_ERR(flow->rule[0]);
1569 goto err_out;
1570 }
1571 flow_flag_set(flow, OFFLOADED);
1572
1573 return 0;
1574
1575err_out:
1576 flow_flag_set(flow, FAILED);
1577 return err;
1578}
1579
1580static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1581{
1582 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1583 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1584 spec->match_value,
1585 misc_parameters_3);
1586 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1587 headers_v,
1588 geneve_tlv_option_0_data);
1589
1590 return !!geneve_tlv_opt_0_data;
1591}
1592
1593static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1594 struct mlx5e_tc_flow *flow)
1595{
1596 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1597 struct mlx5_flow_attr *attr = flow->attr;
1598 struct mlx5_esw_flow_attr *esw_attr;
1599 bool vf_tun = false;
1600 int out_index;
1601
1602 esw_attr = attr->esw_attr;
1603 mlx5e_put_flow_tunnel_id(flow);
1604
1605 if (flow_flag_test(flow, NOT_READY))
1606 remove_unready_flow(flow);
1607
1608 if (mlx5e_is_offloaded_flow(flow)) {
1609 if (flow_flag_test(flow, SLOW))
1610 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1611 else
1612 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1613 }
1614 complete_all(&flow->del_hw_done);
1615
1616 if (mlx5_flow_has_geneve_opt(flow))
1617 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1618
1619 mlx5_eswitch_del_vlan_action(esw, attr);
1620
1621 if (flow->decap_route)
1622 mlx5e_detach_decap_route(priv, flow);
1623
1624 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1625 if (esw_attr->dests[out_index].flags &
1626 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1627 !esw_attr->dest_int_port)
1628 vf_tun = true;
1629 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1630 mlx5e_detach_encap(priv, flow, out_index);
1631 kfree(attr->parse_attr->tun_info[out_index]);
1632 }
1633 }
1634
1635 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1636
1637 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1638 dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
1639 if (vf_tun && attr->modify_hdr)
1640 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1641 else
1642 mlx5e_detach_mod_hdr(priv, flow);
1643 }
1644 kfree(attr->sample_attr);
1645 kvfree(attr->parse_attr);
1646 kvfree(attr->esw_attr->rx_tun_attr);
1647
1648 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1649 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1650
1651 if (esw_attr->int_port)
1652 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
1653
1654 if (esw_attr->dest_int_port)
1655 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
1656
1657 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1658 mlx5e_detach_decap(priv, flow);
1659
1660 kfree(flow->attr);
1661}
1662
1663struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1664{
1665 return flow->attr->counter;
1666}
1667
1668
1669void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1670{
1671 struct mlx5e_tc_flow *flow, *tmp;
1672
1673 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1674 mlx5e_flow_put(priv, flow);
1675}
1676
1677static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1678{
1679 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1680
1681 if (!flow_flag_test(flow, ESWITCH) ||
1682 !flow_flag_test(flow, DUP))
1683 return;
1684
1685 mutex_lock(&esw->offloads.peer_mutex);
1686 list_del(&flow->peer);
1687 mutex_unlock(&esw->offloads.peer_mutex);
1688
1689 flow_flag_clear(flow, DUP);
1690
1691 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1692 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1693 kfree(flow->peer_flow);
1694 }
1695
1696 flow->peer_flow = NULL;
1697}
1698
1699static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1700{
1701 struct mlx5_core_dev *dev = flow->priv->mdev;
1702 struct mlx5_devcom *devcom = dev->priv.devcom;
1703 struct mlx5_eswitch *peer_esw;
1704
1705 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1706 if (!peer_esw)
1707 return;
1708
1709 __mlx5e_tc_del_fdb_peer_flow(flow);
1710 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1711}
1712
1713static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1714 struct mlx5e_tc_flow *flow)
1715{
1716 if (mlx5e_is_eswitch_flow(flow)) {
1717 mlx5e_tc_del_fdb_peer_flow(flow);
1718 mlx5e_tc_del_fdb_flow(priv, flow);
1719 } else {
1720 mlx5e_tc_del_nic_flow(priv, flow);
1721 }
1722}
1723
1724static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
1725{
1726 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1727 struct flow_action *flow_action = &rule->action;
1728 const struct flow_action_entry *act;
1729 int i;
1730
1731 if (chain)
1732 return false;
1733
1734 flow_action_for_each(i, act, flow_action) {
1735 switch (act->id) {
1736 case FLOW_ACTION_GOTO:
1737 return true;
1738 case FLOW_ACTION_SAMPLE:
1739 return true;
1740 default:
1741 continue;
1742 }
1743 }
1744
1745 return false;
1746}
1747
1748static int
1749enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1750 struct flow_dissector_key_enc_opts *opts,
1751 struct netlink_ext_ack *extack,
1752 bool *dont_care)
1753{
1754 struct geneve_opt *opt;
1755 int off = 0;
1756
1757 *dont_care = true;
1758
1759 while (opts->len > off) {
1760 opt = (struct geneve_opt *)&opts->data[off];
1761
1762 if (!(*dont_care) || opt->opt_class || opt->type ||
1763 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1764 *dont_care = false;
1765
1766 if (opt->opt_class != htons(U16_MAX) ||
1767 opt->type != U8_MAX) {
1768 NL_SET_ERR_MSG(extack,
1769 "Partial match of tunnel options in chain > 0 isn't supported");
1770 netdev_warn(priv->netdev,
1771 "Partial match of tunnel options in chain > 0 isn't supported");
1772 return -EOPNOTSUPP;
1773 }
1774 }
1775
1776 off += sizeof(struct geneve_opt) + opt->length * 4;
1777 }
1778
1779 return 0;
1780}
1781
1782#define COPY_DISSECTOR(rule, diss_key, dst)\
1783({ \
1784 struct flow_rule *__rule = (rule);\
1785 typeof(dst) __dst = dst;\
1786\
1787 memcpy(__dst,\
1788 skb_flow_dissector_target(__rule->match.dissector,\
1789 diss_key,\
1790 __rule->match.key),\
1791 sizeof(*__dst));\
1792})
1793
1794static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1795 struct mlx5e_tc_flow *flow,
1796 struct flow_cls_offload *f,
1797 struct net_device *filter_dev)
1798{
1799 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1800 struct netlink_ext_ack *extack = f->common.extack;
1801 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1802 struct flow_match_enc_opts enc_opts_match;
1803 struct tunnel_match_enc_opts tun_enc_opts;
1804 struct mlx5_rep_uplink_priv *uplink_priv;
1805 struct mlx5_flow_attr *attr = flow->attr;
1806 struct mlx5e_rep_priv *uplink_rpriv;
1807 struct tunnel_match_key tunnel_key;
1808 bool enc_opts_is_dont_care = true;
1809 u32 tun_id, enc_opts_id = 0;
1810 struct mlx5_eswitch *esw;
1811 u32 value, mask;
1812 int err;
1813
1814 esw = priv->mdev->priv.eswitch;
1815 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1816 uplink_priv = &uplink_rpriv->uplink_priv;
1817
1818 memset(&tunnel_key, 0, sizeof(tunnel_key));
1819 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1820 &tunnel_key.enc_control);
1821 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1822 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1823 &tunnel_key.enc_ipv4);
1824 else
1825 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1826 &tunnel_key.enc_ipv6);
1827 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1828 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1829 &tunnel_key.enc_tp);
1830 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1831 &tunnel_key.enc_key_id);
1832 tunnel_key.filter_ifindex = filter_dev->ifindex;
1833
1834 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1835 if (err)
1836 return err;
1837
1838 flow_rule_match_enc_opts(rule, &enc_opts_match);
1839 err = enc_opts_is_dont_care_or_full_match(priv,
1840 enc_opts_match.mask,
1841 extack,
1842 &enc_opts_is_dont_care);
1843 if (err)
1844 goto err_enc_opts;
1845
1846 if (!enc_opts_is_dont_care) {
1847 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1848 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1849 sizeof(*enc_opts_match.key));
1850 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1851 sizeof(*enc_opts_match.mask));
1852
1853 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1854 &tun_enc_opts, &enc_opts_id);
1855 if (err)
1856 goto err_enc_opts;
1857 }
1858
1859 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1860 mask = enc_opts_id ? TUNNEL_ID_MASK :
1861 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1862
1863 if (attr->chain) {
1864 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1865 TUNNEL_TO_REG, value, mask);
1866 } else {
1867 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1868 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1869 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
1870 TUNNEL_TO_REG, value);
1871 if (err)
1872 goto err_set;
1873
1874 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1875 }
1876
1877 flow->tunnel_id = value;
1878 return 0;
1879
1880err_set:
1881 if (enc_opts_id)
1882 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1883 enc_opts_id);
1884err_enc_opts:
1885 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1886 return err;
1887}
1888
1889static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1890{
1891 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1892 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1893 struct mlx5_rep_uplink_priv *uplink_priv;
1894 struct mlx5e_rep_priv *uplink_rpriv;
1895 struct mlx5_eswitch *esw;
1896
1897 esw = flow->priv->mdev->priv.eswitch;
1898 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1899 uplink_priv = &uplink_rpriv->uplink_priv;
1900
1901 if (tun_id)
1902 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1903 if (enc_opts_id)
1904 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1905 enc_opts_id);
1906}
1907
1908u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1909{
1910 return flow->tunnel_id;
1911}
1912
1913void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1914 struct flow_match_basic *match, bool outer,
1915 void *headers_c, void *headers_v)
1916{
1917 bool ip_version_cap;
1918
1919#if 0
1920 ip_version_cap = outer ?
1921 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1922 ft_field_support.outer_ip_version) :
1923 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1924 ft_field_support.inner_ip_version);
1925#endif
1926 ip_version_cap = false;
1927
1928 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1929 (match->key->n_proto == htons(ETH_P_IP) ||
1930 match->key->n_proto == htons(ETH_P_IPV6))) {
1931 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1932 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1933 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1934 } else {
1935 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1936 ntohs(match->mask->n_proto));
1937 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1938 ntohs(match->key->n_proto));
1939 }
1940}
1941
1942u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
1943{
1944 void *headers_v;
1945 u16 ethertype;
1946 u8 ip_version;
1947
1948 if (outer)
1949 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1950 else
1951 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
1952
1953 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
1954
1955 if (!ip_version) {
1956 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1957 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
1958 ip_version = 4;
1959 else if (ethertype == ETH_P_IPV6)
1960 ip_version = 6;
1961 }
1962 return ip_version;
1963}
1964
1965static int parse_tunnel_attr(struct mlx5e_priv *priv,
1966 struct mlx5e_tc_flow *flow,
1967 struct mlx5_flow_spec *spec,
1968 struct flow_cls_offload *f,
1969 struct net_device *filter_dev,
1970 u8 *match_level,
1971 bool *match_inner)
1972{
1973 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
1974 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1975 struct netlink_ext_ack *extack = f->common.extack;
1976 bool needs_mapping, sets_mapping;
1977 int err;
1978
1979 if (!mlx5e_is_eswitch_flow(flow))
1980 return -EOPNOTSUPP;
1981
1982 needs_mapping = !!flow->attr->chain;
1983 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
1984 *match_inner = !needs_mapping;
1985
1986 if ((needs_mapping || sets_mapping) &&
1987 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1988 NL_SET_ERR_MSG(extack,
1989 "Chains on tunnel devices isn't supported without register loopback support");
1990 netdev_warn(priv->netdev,
1991 "Chains on tunnel devices isn't supported without register loopback support");
1992 return -EOPNOTSUPP;
1993 }
1994
1995 if (!flow->attr->chain) {
1996 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1997 match_level);
1998 if (err) {
1999 NL_SET_ERR_MSG_MOD(extack,
2000 "Failed to parse tunnel attributes");
2001 netdev_warn(priv->netdev,
2002 "Failed to parse tunnel attributes");
2003 return err;
2004 }
2005
2006
2007
2008
2009 if (!netif_is_bareudp(filter_dev))
2010 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2011 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2012 if (err)
2013 return err;
2014 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2015 struct mlx5_flow_spec *tmp_spec;
2016
2017 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2018 if (!tmp_spec) {
2019 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2020 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2021 return -ENOMEM;
2022 }
2023 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2024
2025 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2026 if (err) {
2027 kvfree(tmp_spec);
2028 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2029 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2030 return err;
2031 }
2032 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2033 kvfree(tmp_spec);
2034 if (err)
2035 return err;
2036 }
2037
2038 if (!needs_mapping && !sets_mapping)
2039 return 0;
2040
2041 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2042}
2043
2044static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2045{
2046 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2047 inner_headers);
2048}
2049
2050static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2051{
2052 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2053 inner_headers);
2054}
2055
2056static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2057{
2058 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2059 outer_headers);
2060}
2061
2062static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2063{
2064 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2065 outer_headers);
2066}
2067
2068static void *get_match_headers_value(u32 flags,
2069 struct mlx5_flow_spec *spec)
2070{
2071 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2072 get_match_inner_headers_value(spec) :
2073 get_match_outer_headers_value(spec);
2074}
2075
2076static void *get_match_headers_criteria(u32 flags,
2077 struct mlx5_flow_spec *spec)
2078{
2079 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2080 get_match_inner_headers_criteria(spec) :
2081 get_match_outer_headers_criteria(spec);
2082}
2083
2084static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2085 struct flow_cls_offload *f)
2086{
2087 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2088 struct netlink_ext_ack *extack = f->common.extack;
2089 struct net_device *ingress_dev;
2090 struct flow_match_meta match;
2091
2092 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2093 return 0;
2094
2095 flow_rule_match_meta(rule, &match);
2096 if (!match.mask->ingress_ifindex)
2097 return 0;
2098
2099 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2100 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2101 return -EOPNOTSUPP;
2102 }
2103
2104 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2105 match.key->ingress_ifindex);
2106 if (!ingress_dev) {
2107 NL_SET_ERR_MSG_MOD(extack,
2108 "Can't find the ingress port to match on");
2109 return -ENOENT;
2110 }
2111
2112 if (ingress_dev != filter_dev) {
2113 NL_SET_ERR_MSG_MOD(extack,
2114 "Can't match on the ingress filter port");
2115 return -EOPNOTSUPP;
2116 }
2117
2118 return 0;
2119}
2120
2121static bool skip_key_basic(struct net_device *filter_dev,
2122 struct flow_cls_offload *f)
2123{
2124
2125
2126
2127
2128
2129 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2130 return true;
2131
2132 return false;
2133}
2134
2135static int __parse_cls_flower(struct mlx5e_priv *priv,
2136 struct mlx5e_tc_flow *flow,
2137 struct mlx5_flow_spec *spec,
2138 struct flow_cls_offload *f,
2139 struct net_device *filter_dev,
2140 u8 *inner_match_level, u8 *outer_match_level)
2141{
2142 struct netlink_ext_ack *extack = f->common.extack;
2143 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2144 outer_headers);
2145 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2146 outer_headers);
2147 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2148 misc_parameters);
2149 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2150 misc_parameters);
2151 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2152 misc_parameters_3);
2153 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2154 misc_parameters_3);
2155 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2156 struct flow_dissector *dissector = rule->match.dissector;
2157 enum fs_flow_table_type fs_type;
2158 u16 addr_type = 0;
2159 u8 ip_proto = 0;
2160 u8 *match_level;
2161 int err;
2162
2163 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2164 match_level = outer_match_level;
2165
2166 if (dissector->used_keys &
2167 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2168 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2169 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2170 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2171 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2172 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2173 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2174 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2175 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2176 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2177 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2178 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2179 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2180 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2181 BIT(FLOW_DISSECTOR_KEY_TCP) |
2182 BIT(FLOW_DISSECTOR_KEY_IP) |
2183 BIT(FLOW_DISSECTOR_KEY_CT) |
2184 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2185 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2186 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2187 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2188 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2189 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2190 dissector->used_keys);
2191 return -EOPNOTSUPP;
2192 }
2193
2194 if (mlx5e_get_tc_tun(filter_dev)) {
2195 bool match_inner = false;
2196
2197 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2198 outer_match_level, &match_inner);
2199 if (err)
2200 return err;
2201
2202 if (match_inner) {
2203
2204
2205
2206
2207 match_level = inner_match_level;
2208 headers_c = get_match_inner_headers_criteria(spec);
2209 headers_v = get_match_inner_headers_value(spec);
2210 }
2211 }
2212
2213 err = mlx5e_flower_parse_meta(filter_dev, f);
2214 if (err)
2215 return err;
2216
2217 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2218 !skip_key_basic(filter_dev, f)) {
2219 struct flow_match_basic match;
2220
2221 flow_rule_match_basic(rule, &match);
2222 mlx5e_tc_set_ethertype(priv->mdev, &match,
2223 match_level == outer_match_level,
2224 headers_c, headers_v);
2225
2226 if (match.mask->n_proto)
2227 *match_level = MLX5_MATCH_L2;
2228 }
2229 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2230 is_vlan_dev(filter_dev)) {
2231 struct flow_dissector_key_vlan filter_dev_mask;
2232 struct flow_dissector_key_vlan filter_dev_key;
2233 struct flow_match_vlan match;
2234
2235 if (is_vlan_dev(filter_dev)) {
2236 match.key = &filter_dev_key;
2237 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2238 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2239 match.key->vlan_priority = 0;
2240 match.mask = &filter_dev_mask;
2241 memset(match.mask, 0xff, sizeof(*match.mask));
2242 match.mask->vlan_priority = 0;
2243 } else {
2244 flow_rule_match_vlan(rule, &match);
2245 }
2246 if (match.mask->vlan_id ||
2247 match.mask->vlan_priority ||
2248 match.mask->vlan_tpid) {
2249 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2250 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2251 svlan_tag, 1);
2252 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2253 svlan_tag, 1);
2254 } else {
2255 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2256 cvlan_tag, 1);
2257 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2258 cvlan_tag, 1);
2259 }
2260
2261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2262 match.mask->vlan_id);
2263 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2264 match.key->vlan_id);
2265
2266 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2267 match.mask->vlan_priority);
2268 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2269 match.key->vlan_priority);
2270
2271 *match_level = MLX5_MATCH_L2;
2272 }
2273 } else if (*match_level != MLX5_MATCH_NONE) {
2274
2275
2276
2277
2278 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2279 *match_level = MLX5_MATCH_L2;
2280 }
2281
2282 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2283 struct flow_match_vlan match;
2284
2285 flow_rule_match_cvlan(rule, &match);
2286 if (match.mask->vlan_id ||
2287 match.mask->vlan_priority ||
2288 match.mask->vlan_tpid) {
2289 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2290 fs_type)) {
2291 NL_SET_ERR_MSG_MOD(extack,
2292 "Matching on CVLAN is not supported");
2293 return -EOPNOTSUPP;
2294 }
2295
2296 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2297 MLX5_SET(fte_match_set_misc, misc_c,
2298 outer_second_svlan_tag, 1);
2299 MLX5_SET(fte_match_set_misc, misc_v,
2300 outer_second_svlan_tag, 1);
2301 } else {
2302 MLX5_SET(fte_match_set_misc, misc_c,
2303 outer_second_cvlan_tag, 1);
2304 MLX5_SET(fte_match_set_misc, misc_v,
2305 outer_second_cvlan_tag, 1);
2306 }
2307
2308 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2309 match.mask->vlan_id);
2310 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2311 match.key->vlan_id);
2312 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2313 match.mask->vlan_priority);
2314 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2315 match.key->vlan_priority);
2316
2317 *match_level = MLX5_MATCH_L2;
2318 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2319 }
2320 }
2321
2322 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2323 struct flow_match_eth_addrs match;
2324
2325 flow_rule_match_eth_addrs(rule, &match);
2326 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2327 dmac_47_16),
2328 match.mask->dst);
2329 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2330 dmac_47_16),
2331 match.key->dst);
2332
2333 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2334 smac_47_16),
2335 match.mask->src);
2336 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2337 smac_47_16),
2338 match.key->src);
2339
2340 if (!is_zero_ether_addr(match.mask->src) ||
2341 !is_zero_ether_addr(match.mask->dst))
2342 *match_level = MLX5_MATCH_L2;
2343 }
2344
2345 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2346 struct flow_match_control match;
2347
2348 flow_rule_match_control(rule, &match);
2349 addr_type = match.key->addr_type;
2350
2351
2352 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2353 return -EOPNOTSUPP;
2354
2355 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2356 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2357 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2358 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2359
2360
2361 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2362 *match_level = MLX5_MATCH_L2;
2363
2364 else
2365 *match_level = MLX5_MATCH_L3;
2366 }
2367 }
2368
2369 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2370 struct flow_match_basic match;
2371
2372 flow_rule_match_basic(rule, &match);
2373 ip_proto = match.key->ip_proto;
2374
2375 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2376 match.mask->ip_proto);
2377 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2378 match.key->ip_proto);
2379
2380 if (match.mask->ip_proto)
2381 *match_level = MLX5_MATCH_L3;
2382 }
2383
2384 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2385 struct flow_match_ipv4_addrs match;
2386
2387 flow_rule_match_ipv4_addrs(rule, &match);
2388 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2389 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2390 &match.mask->src, sizeof(match.mask->src));
2391 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2392 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2393 &match.key->src, sizeof(match.key->src));
2394 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2395 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2396 &match.mask->dst, sizeof(match.mask->dst));
2397 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2398 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2399 &match.key->dst, sizeof(match.key->dst));
2400
2401 if (match.mask->src || match.mask->dst)
2402 *match_level = MLX5_MATCH_L3;
2403 }
2404
2405 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2406 struct flow_match_ipv6_addrs match;
2407
2408 flow_rule_match_ipv6_addrs(rule, &match);
2409 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2410 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2411 &match.mask->src, sizeof(match.mask->src));
2412 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2413 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2414 &match.key->src, sizeof(match.key->src));
2415
2416 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2417 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2418 &match.mask->dst, sizeof(match.mask->dst));
2419 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2420 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2421 &match.key->dst, sizeof(match.key->dst));
2422
2423 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2424 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2425 *match_level = MLX5_MATCH_L3;
2426 }
2427
2428 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2429 struct flow_match_ip match;
2430
2431 flow_rule_match_ip(rule, &match);
2432 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2433 match.mask->tos & 0x3);
2434 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2435 match.key->tos & 0x3);
2436
2437 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2438 match.mask->tos >> 2);
2439 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2440 match.key->tos >> 2);
2441
2442 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2443 match.mask->ttl);
2444 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2445 match.key->ttl);
2446
2447 if (match.mask->ttl &&
2448 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2449 ft_field_support.outer_ipv4_ttl)) {
2450 NL_SET_ERR_MSG_MOD(extack,
2451 "Matching on TTL is not supported");
2452 return -EOPNOTSUPP;
2453 }
2454
2455 if (match.mask->tos || match.mask->ttl)
2456 *match_level = MLX5_MATCH_L3;
2457 }
2458
2459
2460
2461 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2462 struct flow_match_ports match;
2463
2464 flow_rule_match_ports(rule, &match);
2465 switch (ip_proto) {
2466 case IPPROTO_TCP:
2467 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2468 tcp_sport, ntohs(match.mask->src));
2469 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2470 tcp_sport, ntohs(match.key->src));
2471
2472 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2473 tcp_dport, ntohs(match.mask->dst));
2474 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2475 tcp_dport, ntohs(match.key->dst));
2476 break;
2477
2478 case IPPROTO_UDP:
2479 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2480 udp_sport, ntohs(match.mask->src));
2481 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2482 udp_sport, ntohs(match.key->src));
2483
2484 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2485 udp_dport, ntohs(match.mask->dst));
2486 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2487 udp_dport, ntohs(match.key->dst));
2488 break;
2489 default:
2490 NL_SET_ERR_MSG_MOD(extack,
2491 "Only UDP and TCP transports are supported for L4 matching");
2492 netdev_err(priv->netdev,
2493 "Only UDP and TCP transport are supported\n");
2494 return -EINVAL;
2495 }
2496
2497 if (match.mask->src || match.mask->dst)
2498 *match_level = MLX5_MATCH_L4;
2499 }
2500
2501 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2502 struct flow_match_tcp match;
2503
2504 flow_rule_match_tcp(rule, &match);
2505 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2506 ntohs(match.mask->flags));
2507 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2508 ntohs(match.key->flags));
2509
2510 if (match.mask->flags)
2511 *match_level = MLX5_MATCH_L4;
2512 }
2513
2514 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2515 struct flow_match_icmp match;
2516
2517 flow_rule_match_icmp(rule, &match);
2518 switch (ip_proto) {
2519 case IPPROTO_ICMP:
2520 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2521 MLX5_FLEX_PROTO_ICMP))
2522 return -EOPNOTSUPP;
2523 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2524 match.mask->type);
2525 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2526 match.key->type);
2527 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2528 match.mask->code);
2529 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2530 match.key->code);
2531 break;
2532 case IPPROTO_ICMPV6:
2533 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2534 MLX5_FLEX_PROTO_ICMPV6))
2535 return -EOPNOTSUPP;
2536 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2537 match.mask->type);
2538 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2539 match.key->type);
2540 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2541 match.mask->code);
2542 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2543 match.key->code);
2544 break;
2545 default:
2546 NL_SET_ERR_MSG_MOD(extack,
2547 "Code and type matching only with ICMP and ICMPv6");
2548 netdev_err(priv->netdev,
2549 "Code and type matching only with ICMP and ICMPv6\n");
2550 return -EINVAL;
2551 }
2552 if (match.mask->code || match.mask->type) {
2553 *match_level = MLX5_MATCH_L4;
2554 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2555 }
2556 }
2557
2558 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2559 !netif_is_bareudp(filter_dev)) {
2560 NL_SET_ERR_MSG_MOD(extack,
2561 "Matching on MPLS is supported only for MPLS over UDP");
2562 netdev_err(priv->netdev,
2563 "Matching on MPLS is supported only for MPLS over UDP\n");
2564 return -EOPNOTSUPP;
2565 }
2566
2567 return 0;
2568}
2569
2570static int parse_cls_flower(struct mlx5e_priv *priv,
2571 struct mlx5e_tc_flow *flow,
2572 struct mlx5_flow_spec *spec,
2573 struct flow_cls_offload *f,
2574 struct net_device *filter_dev)
2575{
2576 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2577 struct netlink_ext_ack *extack = f->common.extack;
2578 struct mlx5_core_dev *dev = priv->mdev;
2579 struct mlx5_eswitch *esw = dev->priv.eswitch;
2580 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2581 struct mlx5_eswitch_rep *rep;
2582 bool is_eswitch_flow;
2583 int err;
2584
2585 inner_match_level = MLX5_MATCH_NONE;
2586 outer_match_level = MLX5_MATCH_NONE;
2587
2588 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2589 &inner_match_level, &outer_match_level);
2590 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2591 outer_match_level : inner_match_level;
2592
2593 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2594 if (!err && is_eswitch_flow) {
2595 rep = rpriv->rep;
2596 if (rep->vport != MLX5_VPORT_UPLINK &&
2597 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2598 esw->offloads.inline_mode < non_tunnel_match_level)) {
2599 NL_SET_ERR_MSG_MOD(extack,
2600 "Flow is not offloaded due to min inline setting");
2601 netdev_warn(priv->netdev,
2602 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2603 non_tunnel_match_level, esw->offloads.inline_mode);
2604 return -EOPNOTSUPP;
2605 }
2606 }
2607
2608 flow->attr->inner_match_level = inner_match_level;
2609 flow->attr->outer_match_level = outer_match_level;
2610
2611
2612 return err;
2613}
2614
2615struct pedit_headers {
2616 struct ethhdr eth;
2617 struct vlan_hdr vlan;
2618 struct iphdr ip4;
2619 struct ipv6hdr ip6;
2620 struct tcphdr tcp;
2621 struct udphdr udp;
2622};
2623
2624struct pedit_headers_action {
2625 struct pedit_headers vals;
2626 struct pedit_headers masks;
2627 u32 pedits;
2628};
2629
2630static int pedit_header_offsets[] = {
2631 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2632 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2633 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2634 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2635 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2636};
2637
2638#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2639
2640static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2641 struct pedit_headers_action *hdrs)
2642{
2643 u32 *curr_pmask, *curr_pval;
2644
2645 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2646 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2647
2648 if (*curr_pmask & mask)
2649 goto out_err;
2650
2651 *curr_pmask |= mask;
2652 *curr_pval |= (val & mask);
2653
2654 return 0;
2655
2656out_err:
2657 return -EOPNOTSUPP;
2658}
2659
2660struct mlx5_fields {
2661 u8 field;
2662 u8 field_bsize;
2663 u32 field_mask;
2664 u32 offset;
2665 u32 match_offset;
2666};
2667
2668#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2669 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2670 offsetof(struct pedit_headers, field) + (off), \
2671 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2672
2673
2674
2675
2676#define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2677 type matchmaskx = *(type *)(matchmaskp); \
2678 type matchvalx = *(type *)(matchvalp); \
2679 type maskx = *(type *)(maskp); \
2680 type valx = *(type *)(valp); \
2681 \
2682 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2683 matchmaskx)); \
2684})
2685
2686static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2687 void *matchmaskp, u8 bsize)
2688{
2689 bool same = false;
2690
2691 switch (bsize) {
2692 case 8:
2693 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2694 break;
2695 case 16:
2696 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2697 break;
2698 case 32:
2699 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2700 break;
2701 }
2702
2703 return same;
2704}
2705
2706static struct mlx5_fields fields[] = {
2707 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2708 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2709 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2710 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2711 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2712 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2713
2714 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2715 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2716 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2717 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2718
2719 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2720 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2721 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2722 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2723 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2724 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2725 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2726 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2727 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2728 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2729 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2730 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2731 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2732 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2733 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2734 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2735 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2736 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2737
2738 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2739 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2740
2741 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2742
2743 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2744 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2745};
2746
2747static unsigned long mask_to_le(unsigned long mask, int size)
2748{
2749 __be32 mask_be32;
2750 __be16 mask_be16;
2751
2752 if (size == 32) {
2753 mask_be32 = (__force __be32)(mask);
2754 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2755 } else if (size == 16) {
2756 mask_be32 = (__force __be32)(mask);
2757 mask_be16 = *(__be16 *)&mask_be32;
2758 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2759 }
2760
2761 return mask;
2762}
2763static int offload_pedit_fields(struct mlx5e_priv *priv,
2764 int namespace,
2765 struct pedit_headers_action *hdrs,
2766 struct mlx5e_tc_flow_parse_attr *parse_attr,
2767 u32 *action_flags,
2768 struct netlink_ext_ack *extack)
2769{
2770 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2771 int i, action_size, first, last, next_z;
2772 void *headers_c, *headers_v, *action, *vals_p;
2773 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2774 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2775 struct mlx5_fields *f;
2776 unsigned long mask, field_mask;
2777 int err;
2778 u8 cmd;
2779
2780 mod_acts = &parse_attr->mod_hdr_acts;
2781 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2782 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2783
2784 set_masks = &hdrs[0].masks;
2785 add_masks = &hdrs[1].masks;
2786 set_vals = &hdrs[0].vals;
2787 add_vals = &hdrs[1].vals;
2788
2789 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2790
2791 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2792 bool skip;
2793
2794 f = &fields[i];
2795
2796 s_mask = 0;
2797 a_mask = 0;
2798
2799 s_masks_p = (void *)set_masks + f->offset;
2800 a_masks_p = (void *)add_masks + f->offset;
2801
2802 s_mask = *s_masks_p & f->field_mask;
2803 a_mask = *a_masks_p & f->field_mask;
2804
2805 if (!s_mask && !a_mask)
2806 continue;
2807
2808 if (s_mask && a_mask) {
2809 NL_SET_ERR_MSG_MOD(extack,
2810 "can't set and add to the same HW field");
2811 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2812 return -EOPNOTSUPP;
2813 }
2814
2815 skip = false;
2816 if (s_mask) {
2817 void *match_mask = headers_c + f->match_offset;
2818 void *match_val = headers_v + f->match_offset;
2819
2820 cmd = MLX5_ACTION_TYPE_SET;
2821 mask = s_mask;
2822 vals_p = (void *)set_vals + f->offset;
2823
2824 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2825 match_mask, f->field_bsize))
2826 skip = true;
2827
2828 *s_masks_p &= ~f->field_mask;
2829 } else {
2830 cmd = MLX5_ACTION_TYPE_ADD;
2831 mask = a_mask;
2832 vals_p = (void *)add_vals + f->offset;
2833
2834 if ((*(u32 *)vals_p & f->field_mask) == 0)
2835 skip = true;
2836
2837 *a_masks_p &= ~f->field_mask;
2838 }
2839 if (skip)
2840 continue;
2841
2842 mask = mask_to_le(mask, f->field_bsize);
2843
2844 first = find_first_bit(&mask, f->field_bsize);
2845 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2846 last = find_last_bit(&mask, f->field_bsize);
2847 if (first < next_z && next_z < last) {
2848 NL_SET_ERR_MSG_MOD(extack,
2849 "rewrite of few sub-fields isn't supported");
2850 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2851 mask);
2852 return -EOPNOTSUPP;
2853 }
2854
2855 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2856 if (err) {
2857 NL_SET_ERR_MSG_MOD(extack,
2858 "too many pedit actions, can't offload");
2859 mlx5_core_warn(priv->mdev,
2860 "mlx5: parsed %d pedit actions, can't do more\n",
2861 mod_acts->num_actions);
2862 return err;
2863 }
2864
2865 action = mod_acts->actions +
2866 (mod_acts->num_actions * action_size);
2867 MLX5_SET(set_action_in, action, action_type, cmd);
2868 MLX5_SET(set_action_in, action, field, f->field);
2869
2870 if (cmd == MLX5_ACTION_TYPE_SET) {
2871 int start;
2872
2873 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2874
2875
2876 start = find_first_bit(&field_mask, f->field_bsize);
2877
2878 MLX5_SET(set_action_in, action, offset, first - start);
2879
2880 MLX5_SET(set_action_in, action, length, (last - first + 1));
2881 }
2882
2883 if (f->field_bsize == 32)
2884 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2885 else if (f->field_bsize == 16)
2886 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2887 else if (f->field_bsize == 8)
2888 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2889
2890 ++mod_acts->num_actions;
2891 }
2892
2893 return 0;
2894}
2895
2896static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2897 int namespace)
2898{
2899 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
2900 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2901 else
2902 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2903}
2904
2905int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2906 int namespace,
2907 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2908{
2909 int action_size, new_num_actions, max_hw_actions;
2910 size_t new_sz, old_sz;
2911 void *ret;
2912
2913 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2914 return 0;
2915
2916 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2917
2918 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2919 namespace);
2920 new_num_actions = min(max_hw_actions,
2921 mod_hdr_acts->actions ?
2922 mod_hdr_acts->max_actions * 2 : 1);
2923 if (mod_hdr_acts->max_actions == new_num_actions)
2924 return -ENOSPC;
2925
2926 new_sz = action_size * new_num_actions;
2927 old_sz = mod_hdr_acts->max_actions * action_size;
2928 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2929 if (!ret)
2930 return -ENOMEM;
2931
2932 memset(ret + old_sz, 0, new_sz - old_sz);
2933 mod_hdr_acts->actions = ret;
2934 mod_hdr_acts->max_actions = new_num_actions;
2935
2936 return 0;
2937}
2938
2939void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2940{
2941 kfree(mod_hdr_acts->actions);
2942 mod_hdr_acts->actions = NULL;
2943 mod_hdr_acts->num_actions = 0;
2944 mod_hdr_acts->max_actions = 0;
2945}
2946
2947static const struct pedit_headers zero_masks = {};
2948
2949static int
2950parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2951 const struct flow_action_entry *act, int namespace,
2952 struct mlx5e_tc_flow_parse_attr *parse_attr,
2953 struct pedit_headers_action *hdrs,
2954 struct netlink_ext_ack *extack)
2955{
2956 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2957 int err = -EOPNOTSUPP;
2958 u32 mask, val, offset;
2959 u8 htype;
2960
2961 htype = act->mangle.htype;
2962 err = -EOPNOTSUPP;
2963
2964 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2965 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2966 goto out_err;
2967 }
2968
2969 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2970 NL_SET_ERR_MSG_MOD(extack,
2971 "The pedit offload action is not supported");
2972 goto out_err;
2973 }
2974
2975 mask = act->mangle.mask;
2976 val = act->mangle.val;
2977 offset = act->mangle.offset;
2978
2979 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2980 if (err)
2981 goto out_err;
2982
2983 hdrs[cmd].pedits++;
2984
2985 return 0;
2986out_err:
2987 return err;
2988}
2989
2990static int
2991parse_pedit_to_reformat(struct mlx5e_priv *priv,
2992 const struct flow_action_entry *act,
2993 struct mlx5e_tc_flow_parse_attr *parse_attr,
2994 struct netlink_ext_ack *extack)
2995{
2996 u32 mask, val, offset;
2997 u32 *p;
2998
2999 if (act->id != FLOW_ACTION_MANGLE)
3000 return -EOPNOTSUPP;
3001
3002 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
3003 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
3004 return -EOPNOTSUPP;
3005 }
3006
3007 mask = ~act->mangle.mask;
3008 val = act->mangle.val;
3009 offset = act->mangle.offset;
3010 p = (u32 *)&parse_attr->eth;
3011 *(p + (offset >> 2)) |= (val & mask);
3012
3013 return 0;
3014}
3015
3016static int parse_tc_pedit_action(struct mlx5e_priv *priv,
3017 const struct flow_action_entry *act, int namespace,
3018 struct mlx5e_tc_flow_parse_attr *parse_attr,
3019 struct pedit_headers_action *hdrs,
3020 struct mlx5e_tc_flow *flow,
3021 struct netlink_ext_ack *extack)
3022{
3023 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
3024 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
3025
3026 return parse_pedit_to_modify_hdr(priv, act, namespace,
3027 parse_attr, hdrs, extack);
3028}
3029
3030static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3031 struct mlx5e_tc_flow_parse_attr *parse_attr,
3032 struct pedit_headers_action *hdrs,
3033 u32 *action_flags,
3034 struct netlink_ext_ack *extack)
3035{
3036 struct pedit_headers *cmd_masks;
3037 int err;
3038 u8 cmd;
3039
3040 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
3041 action_flags, extack);
3042 if (err < 0)
3043 goto out_dealloc_parsed_actions;
3044
3045 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3046 cmd_masks = &hdrs[cmd].masks;
3047 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3048 NL_SET_ERR_MSG_MOD(extack,
3049 "attempt to offload an unsupported field");
3050 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3051 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3052 16, 1, cmd_masks, sizeof(zero_masks), true);
3053 err = -EOPNOTSUPP;
3054 goto out_dealloc_parsed_actions;
3055 }
3056 }
3057
3058 return 0;
3059
3060out_dealloc_parsed_actions:
3061 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3062 return err;
3063}
3064
3065static bool csum_offload_supported(struct mlx5e_priv *priv,
3066 u32 action,
3067 u32 update_flags,
3068 struct netlink_ext_ack *extack)
3069{
3070 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
3071 TCA_CSUM_UPDATE_FLAG_UDP;
3072
3073
3074 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
3075 NL_SET_ERR_MSG_MOD(extack,
3076 "TC csum action is only offloaded with pedit");
3077 netdev_warn(priv->netdev,
3078 "TC csum action is only offloaded with pedit\n");
3079 return false;
3080 }
3081
3082 if (update_flags & ~prot_flags) {
3083 NL_SET_ERR_MSG_MOD(extack,
3084 "can't offload TC csum action for some header/s");
3085 netdev_warn(priv->netdev,
3086 "can't offload TC csum action for some header/s - flags %#x\n",
3087 update_flags);
3088 return false;
3089 }
3090
3091 return true;
3092}
3093
3094struct ip_ttl_word {
3095 __u8 ttl;
3096 __u8 protocol;
3097 __sum16 check;
3098};
3099
3100struct ipv6_hoplimit_word {
3101 __be16 payload_len;
3102 __u8 nexthdr;
3103 __u8 hop_limit;
3104};
3105
3106static int is_action_keys_supported(const struct flow_action_entry *act,
3107 bool ct_flow, bool *modify_ip_header,
3108 bool *modify_tuple,
3109 struct netlink_ext_ack *extack)
3110{
3111 u32 mask, offset;
3112 u8 htype;
3113
3114 htype = act->mangle.htype;
3115 offset = act->mangle.offset;
3116 mask = ~act->mangle.mask;
3117
3118
3119
3120
3121 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3122 struct ip_ttl_word *ttl_word =
3123 (struct ip_ttl_word *)&mask;
3124
3125 if (offset != offsetof(struct iphdr, ttl) ||
3126 ttl_word->protocol ||
3127 ttl_word->check) {
3128 *modify_ip_header = true;
3129 }
3130
3131 if (offset >= offsetof(struct iphdr, saddr))
3132 *modify_tuple = true;
3133
3134 if (ct_flow && *modify_tuple) {
3135 NL_SET_ERR_MSG_MOD(extack,
3136 "can't offload re-write of ipv4 address with action ct");
3137 return -EOPNOTSUPP;
3138 }
3139 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3140 struct ipv6_hoplimit_word *hoplimit_word =
3141 (struct ipv6_hoplimit_word *)&mask;
3142
3143 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3144 hoplimit_word->payload_len ||
3145 hoplimit_word->nexthdr) {
3146 *modify_ip_header = true;
3147 }
3148
3149 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3150 *modify_tuple = true;
3151
3152 if (ct_flow && *modify_tuple) {
3153 NL_SET_ERR_MSG_MOD(extack,
3154 "can't offload re-write of ipv6 address with action ct");
3155 return -EOPNOTSUPP;
3156 }
3157 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3158 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3159 *modify_tuple = true;
3160 if (ct_flow) {
3161 NL_SET_ERR_MSG_MOD(extack,
3162 "can't offload re-write of transport header ports with action ct");
3163 return -EOPNOTSUPP;
3164 }
3165 }
3166
3167 return 0;
3168}
3169
3170static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3171 bool ct_flow, struct netlink_ext_ack *extack,
3172 struct mlx5e_priv *priv,
3173 struct mlx5_flow_spec *spec)
3174{
3175 if (!modify_tuple || ct_clear)
3176 return true;
3177
3178 if (ct_flow) {
3179 NL_SET_ERR_MSG_MOD(extack,
3180 "can't offload tuple modification with non-clear ct()");
3181 netdev_info(priv->netdev,
3182 "can't offload tuple modification with non-clear ct()");
3183 return false;
3184 }
3185
3186
3187
3188
3189
3190 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3191 NL_SET_ERR_MSG_MOD(extack,
3192 "can't offload tuple modification with ct matches and no ct(clear) action");
3193 netdev_info(priv->netdev,
3194 "can't offload tuple modification with ct matches and no ct(clear) action");
3195 return false;
3196 }
3197
3198 return true;
3199}
3200
3201static bool modify_header_match_supported(struct mlx5e_priv *priv,
3202 struct mlx5_flow_spec *spec,
3203 struct flow_action *flow_action,
3204 u32 actions, bool ct_flow,
3205 bool ct_clear,
3206 struct netlink_ext_ack *extack)
3207{
3208 const struct flow_action_entry *act;
3209 bool modify_ip_header, modify_tuple;
3210 void *headers_c;
3211 void *headers_v;
3212 u16 ethertype;
3213 u8 ip_proto;
3214 int i, err;
3215
3216 headers_c = get_match_headers_criteria(actions, spec);
3217 headers_v = get_match_headers_value(actions, spec);
3218 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3219
3220
3221 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3222 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3223 goto out_ok;
3224
3225 modify_ip_header = false;
3226 modify_tuple = false;
3227 flow_action_for_each(i, act, flow_action) {
3228 if (act->id != FLOW_ACTION_MANGLE &&
3229 act->id != FLOW_ACTION_ADD)
3230 continue;
3231
3232 err = is_action_keys_supported(act, ct_flow,
3233 &modify_ip_header,
3234 &modify_tuple, extack);
3235 if (err)
3236 return err;
3237 }
3238
3239 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3240 priv, spec))
3241 return false;
3242
3243 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3244 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3245 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3246 NL_SET_ERR_MSG_MOD(extack,
3247 "can't offload re-write of non TCP/UDP");
3248 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3249 ip_proto);
3250 return false;
3251 }
3252
3253out_ok:
3254 return true;
3255}
3256
3257static bool actions_match_supported(struct mlx5e_priv *priv,
3258 struct flow_action *flow_action,
3259 struct mlx5e_tc_flow_parse_attr *parse_attr,
3260 struct mlx5e_tc_flow *flow,
3261 struct netlink_ext_ack *extack)
3262{
3263 bool ct_flow = false, ct_clear = false;
3264 u32 actions;
3265
3266 ct_clear = flow->attr->ct_attr.ct_action &
3267 TCA_CT_ACT_CLEAR;
3268 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3269 actions = flow->attr->action;
3270
3271 if (mlx5e_is_eswitch_flow(flow)) {
3272 if (flow->attr->esw_attr->split_count && ct_flow &&
3273 !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
3274
3275
3276
3277 NL_SET_ERR_MSG_MOD(extack,
3278 "Can't offload mirroring with action ct");
3279 return false;
3280 }
3281 }
3282
3283 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3284 return modify_header_match_supported(priv, &parse_attr->spec,
3285 flow_action, actions,
3286 ct_flow, ct_clear,
3287 extack);
3288
3289 return true;
3290}
3291
3292static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3293{
3294 return priv->mdev == peer_priv->mdev;
3295}
3296
3297static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3298{
3299 struct mlx5_core_dev *fmdev, *pmdev;
3300 u64 fsystem_guid, psystem_guid;
3301
3302 fmdev = priv->mdev;
3303 pmdev = peer_priv->mdev;
3304
3305 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3306 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3307
3308 return (fsystem_guid == psystem_guid);
3309}
3310
3311static bool same_vf_reps(struct mlx5e_priv *priv,
3312 struct net_device *out_dev)
3313{
3314 return mlx5e_eswitch_vf_rep(priv->netdev) &&
3315 priv->netdev == out_dev;
3316}
3317
3318static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3319 const struct flow_action_entry *act,
3320 struct mlx5e_tc_flow_parse_attr *parse_attr,
3321 struct pedit_headers_action *hdrs,
3322 u32 *action, struct netlink_ext_ack *extack)
3323{
3324 u16 mask16 = VLAN_VID_MASK;
3325 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3326 const struct flow_action_entry pedit_act = {
3327 .id = FLOW_ACTION_MANGLE,
3328 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3329 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3330 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3331 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3332 };
3333 u8 match_prio_mask, match_prio_val;
3334 void *headers_c, *headers_v;
3335 int err;
3336
3337 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3338 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3339
3340 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3341 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3342 NL_SET_ERR_MSG_MOD(extack,
3343 "VLAN rewrite action must have VLAN protocol match");
3344 return -EOPNOTSUPP;
3345 }
3346
3347 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3348 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3349 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3350 NL_SET_ERR_MSG_MOD(extack,
3351 "Changing VLAN prio is not supported");
3352 return -EOPNOTSUPP;
3353 }
3354
3355 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3356 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3357
3358 return err;
3359}
3360
3361static int
3362add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3363 struct mlx5e_tc_flow_parse_attr *parse_attr,
3364 struct pedit_headers_action *hdrs,
3365 u32 *action, struct netlink_ext_ack *extack)
3366{
3367 const struct flow_action_entry prio_tag_act = {
3368 .vlan.vid = 0,
3369 .vlan.prio =
3370 MLX5_GET(fte_match_set_lyr_2_4,
3371 get_match_headers_value(*action,
3372 &parse_attr->spec),
3373 first_prio) &
3374 MLX5_GET(fte_match_set_lyr_2_4,
3375 get_match_headers_criteria(*action,
3376 &parse_attr->spec),
3377 first_prio),
3378 };
3379
3380 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3381 &prio_tag_act, parse_attr, hdrs, action,
3382 extack);
3383}
3384
3385static int validate_goto_chain(struct mlx5e_priv *priv,
3386 struct mlx5e_tc_flow *flow,
3387 const struct flow_action_entry *act,
3388 u32 actions,
3389 struct netlink_ext_ack *extack)
3390{
3391 bool is_esw = mlx5e_is_eswitch_flow(flow);
3392 struct mlx5_flow_attr *attr = flow->attr;
3393 bool ft_flow = mlx5e_is_ft_flow(flow);
3394 u32 dest_chain = act->chain_index;
3395 struct mlx5_fs_chains *chains;
3396 struct mlx5_eswitch *esw;
3397 u32 reformat_and_fwd;
3398 u32 max_chain;
3399
3400 esw = priv->mdev->priv.eswitch;
3401 chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3402 max_chain = mlx5_chains_get_chain_range(chains);
3403 reformat_and_fwd = is_esw ?
3404 MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3405 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3406
3407 if (ft_flow) {
3408 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3409 return -EOPNOTSUPP;
3410 }
3411
3412 if (!mlx5_chains_backwards_supported(chains) &&
3413 dest_chain <= attr->chain) {
3414 NL_SET_ERR_MSG_MOD(extack,
3415 "Goto lower numbered chain isn't supported");
3416 return -EOPNOTSUPP;
3417 }
3418
3419 if (dest_chain > max_chain) {
3420 NL_SET_ERR_MSG_MOD(extack,
3421 "Requested destination chain is out of supported range");
3422 return -EOPNOTSUPP;
3423 }
3424
3425 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3426 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3427 !reformat_and_fwd) {
3428 NL_SET_ERR_MSG_MOD(extack,
3429 "Goto chain is not allowed if action has reformat or decap");
3430 return -EOPNOTSUPP;
3431 }
3432
3433 return 0;
3434}
3435
3436static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3437 struct flow_action *flow_action,
3438 struct mlx5e_tc_flow_parse_attr *parse_attr,
3439 struct mlx5e_tc_flow *flow,
3440 struct netlink_ext_ack *extack)
3441{
3442 struct mlx5_flow_attr *attr = flow->attr;
3443 struct pedit_headers_action hdrs[2] = {};
3444 const struct flow_action_entry *act;
3445 struct mlx5_nic_flow_attr *nic_attr;
3446 u32 action = 0;
3447 int err, i;
3448
3449 if (!flow_action_has_entries(flow_action))
3450 return -EINVAL;
3451
3452 if (!flow_action_hw_stats_check(flow_action, extack,
3453 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3454 return -EOPNOTSUPP;
3455
3456 nic_attr = attr->nic_attr;
3457
3458 nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3459
3460 flow_action_for_each(i, act, flow_action) {
3461 switch (act->id) {
3462 case FLOW_ACTION_ACCEPT:
3463 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3464 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3465 break;
3466 case FLOW_ACTION_DROP:
3467 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3468 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3469 flow_table_properties_nic_receive.flow_counter))
3470 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3471 break;
3472 case FLOW_ACTION_MANGLE:
3473 case FLOW_ACTION_ADD:
3474 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3475 parse_attr, hdrs, NULL, extack);
3476 if (err)
3477 return err;
3478
3479 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3480 break;
3481 case FLOW_ACTION_VLAN_MANGLE:
3482 err = add_vlan_rewrite_action(priv,
3483 MLX5_FLOW_NAMESPACE_KERNEL,
3484 act, parse_attr, hdrs,
3485 &action, extack);
3486 if (err)
3487 return err;
3488
3489 break;
3490 case FLOW_ACTION_CSUM:
3491 if (csum_offload_supported(priv, action,
3492 act->csum_flags,
3493 extack))
3494 break;
3495
3496 return -EOPNOTSUPP;
3497 case FLOW_ACTION_REDIRECT: {
3498 struct net_device *peer_dev = act->dev;
3499
3500 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3501 same_hw_devs(priv, netdev_priv(peer_dev))) {
3502 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3503 flow_flag_set(flow, HAIRPIN);
3504 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3505 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3506 } else {
3507 NL_SET_ERR_MSG_MOD(extack,
3508 "device is not on same HW, can't offload");
3509 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3510 peer_dev->name);
3511 return -EINVAL;
3512 }
3513 }
3514 break;
3515 case FLOW_ACTION_MARK: {
3516 u32 mark = act->mark;
3517
3518 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3519 NL_SET_ERR_MSG_MOD(extack,
3520 "Bad flow mark - only 16 bit is supported");
3521 return -EINVAL;
3522 }
3523
3524 nic_attr->flow_tag = mark;
3525 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3526 }
3527 break;
3528 case FLOW_ACTION_GOTO:
3529 err = validate_goto_chain(priv, flow, act, action,
3530 extack);
3531 if (err)
3532 return err;
3533
3534 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3535 attr->dest_chain = act->chain_index;
3536 break;
3537 case FLOW_ACTION_CT:
3538 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
3539 &parse_attr->mod_hdr_acts,
3540 act, extack);
3541 if (err)
3542 return err;
3543
3544 flow_flag_set(flow, CT);
3545 break;
3546 default:
3547 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3548 return -EOPNOTSUPP;
3549 }
3550 }
3551
3552 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3553 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3554 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3555 parse_attr, hdrs, &action, extack);
3556 if (err)
3557 return err;
3558
3559
3560
3561 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3562 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3563 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3564 }
3565 }
3566
3567 attr->action = action;
3568
3569 if (attr->dest_chain) {
3570 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3571 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3572 return -EOPNOTSUPP;
3573 }
3574 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3575 }
3576
3577 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3578 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3579
3580 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3581 return -EOPNOTSUPP;
3582
3583 return 0;
3584}
3585
3586static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3587 struct net_device *peer_netdev)
3588{
3589 struct mlx5e_priv *peer_priv;
3590
3591 peer_priv = netdev_priv(peer_netdev);
3592
3593 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3594 mlx5e_eswitch_vf_rep(priv->netdev) &&
3595 mlx5e_eswitch_vf_rep(peer_netdev) &&
3596 same_hw_devs(priv, peer_priv));
3597}
3598
3599static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3600 const struct flow_action_entry *act,
3601 struct mlx5_esw_flow_attr *attr,
3602 u32 *action)
3603{
3604 u8 vlan_idx = attr->total_vlan;
3605
3606 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3607 return -EOPNOTSUPP;
3608
3609 switch (act->id) {
3610 case FLOW_ACTION_VLAN_POP:
3611 if (vlan_idx) {
3612 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3613 MLX5_FS_VLAN_DEPTH))
3614 return -EOPNOTSUPP;
3615
3616 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3617 } else {
3618 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3619 }
3620 break;
3621 case FLOW_ACTION_VLAN_PUSH:
3622 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3623 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3624 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3625 if (!attr->vlan_proto[vlan_idx])
3626 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3627
3628 if (vlan_idx) {
3629 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3630 MLX5_FS_VLAN_DEPTH))
3631 return -EOPNOTSUPP;
3632
3633 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3634 } else {
3635 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3636 (act->vlan.proto != htons(ETH_P_8021Q) ||
3637 act->vlan.prio))
3638 return -EOPNOTSUPP;
3639
3640 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3641 }
3642 break;
3643 default:
3644 return -EINVAL;
3645 }
3646
3647 attr->total_vlan = vlan_idx + 1;
3648
3649 return 0;
3650}
3651
3652static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3653 struct net_device *out_dev)
3654{
3655 struct net_device *fdb_out_dev = out_dev;
3656 struct net_device *uplink_upper;
3657
3658 rcu_read_lock();
3659 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3660 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3661 uplink_upper == out_dev) {
3662 fdb_out_dev = uplink_dev;
3663 } else if (netif_is_lag_master(out_dev)) {
3664 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3665 if (fdb_out_dev &&
3666 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3667 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3668 fdb_out_dev = NULL;
3669 }
3670 rcu_read_unlock();
3671 return fdb_out_dev;
3672}
3673
3674static int add_vlan_push_action(struct mlx5e_priv *priv,
3675 struct mlx5_flow_attr *attr,
3676 struct net_device **out_dev,
3677 u32 *action)
3678{
3679 struct net_device *vlan_dev = *out_dev;
3680 struct flow_action_entry vlan_act = {
3681 .id = FLOW_ACTION_VLAN_PUSH,
3682 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3683 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3684 .vlan.prio = 0,
3685 };
3686 int err;
3687
3688 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3689 if (err)
3690 return err;
3691
3692 rcu_read_lock();
3693 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
3694 rcu_read_unlock();
3695 if (!*out_dev)
3696 return -ENODEV;
3697
3698 if (is_vlan_dev(*out_dev))
3699 err = add_vlan_push_action(priv, attr, out_dev, action);
3700
3701 return err;
3702}
3703
3704static int add_vlan_pop_action(struct mlx5e_priv *priv,
3705 struct mlx5_flow_attr *attr,
3706 u32 *action)
3707{
3708 struct flow_action_entry vlan_act = {
3709 .id = FLOW_ACTION_VLAN_POP,
3710 };
3711 int nest_level, err = 0;
3712
3713 nest_level = attr->parse_attr->filter_dev->lower_level -
3714 priv->netdev->lower_level;
3715 while (nest_level--) {
3716 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3717 if (err)
3718 return err;
3719 }
3720
3721 return err;
3722}
3723
3724static bool same_hw_reps(struct mlx5e_priv *priv,
3725 struct net_device *peer_netdev)
3726{
3727 struct mlx5e_priv *peer_priv;
3728
3729 peer_priv = netdev_priv(peer_netdev);
3730
3731 return mlx5e_eswitch_rep(priv->netdev) &&
3732 mlx5e_eswitch_rep(peer_netdev) &&
3733 same_hw_devs(priv, peer_priv);
3734}
3735
3736static bool is_lag_dev(struct mlx5e_priv *priv,
3737 struct net_device *peer_netdev)
3738{
3739 return ((mlx5_lag_is_sriov(priv->mdev) ||
3740 mlx5_lag_is_multipath(priv->mdev)) &&
3741 same_hw_reps(priv, peer_netdev));
3742}
3743
3744bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3745 struct net_device *out_dev)
3746{
3747 if (is_merged_eswitch_vfs(priv, out_dev))
3748 return true;
3749
3750 if (is_lag_dev(priv, out_dev))
3751 return true;
3752
3753 return mlx5e_eswitch_rep(out_dev) &&
3754 same_port_devs(priv, netdev_priv(out_dev));
3755}
3756
3757static bool is_duplicated_output_device(struct net_device *dev,
3758 struct net_device *out_dev,
3759 int *ifindexes, int if_count,
3760 struct netlink_ext_ack *extack)
3761{
3762 int i;
3763
3764 for (i = 0; i < if_count; i++) {
3765 if (ifindexes[i] == out_dev->ifindex) {
3766 NL_SET_ERR_MSG_MOD(extack,
3767 "can't duplicate output to same device");
3768 netdev_err(dev, "can't duplicate output to same device: %s\n",
3769 out_dev->name);
3770 return true;
3771 }
3772 }
3773
3774 return false;
3775}
3776
3777static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3778 struct mlx5e_tc_flow *flow,
3779 struct net_device *out_dev,
3780 struct netlink_ext_ack *extack)
3781{
3782 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3783 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3784 struct mlx5e_rep_priv *rep_priv;
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3795
3796 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3797 mlx5e_eswitch_uplink_rep(out_dev)))
3798 return 0;
3799
3800 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3801 termination_table_raw_traffic)) {
3802 NL_SET_ERR_MSG_MOD(extack,
3803 "devices are both uplink, can't offload forwarding");
3804 return -EOPNOTSUPP;
3805 } else if (out_dev != rep_priv->netdev) {
3806 NL_SET_ERR_MSG_MOD(extack,
3807 "devices are not the same uplink, can't offload forwarding");
3808 return -EOPNOTSUPP;
3809 }
3810 return 0;
3811}
3812
3813int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
3814 struct mlx5_flow_attr *attr,
3815 int ifindex,
3816 enum mlx5e_tc_int_port_type type,
3817 u32 *action,
3818 int out_index)
3819{
3820 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
3821 struct mlx5e_tc_int_port_priv *int_port_priv;
3822 struct mlx5e_tc_flow_parse_attr *parse_attr;
3823 struct mlx5e_tc_int_port *dest_int_port;
3824 int err;
3825
3826 parse_attr = attr->parse_attr;
3827 int_port_priv = mlx5e_get_int_port_priv(priv);
3828
3829 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
3830 if (IS_ERR(dest_int_port))
3831 return PTR_ERR(dest_int_port);
3832
3833 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
3834 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
3835 mlx5e_tc_int_port_get_metadata(dest_int_port));
3836 if (err) {
3837 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
3838 return err;
3839 }
3840
3841 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3842
3843 esw_attr->dest_int_port = dest_int_port;
3844 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
3845
3846
3847 attr->dest_chain = 0;
3848
3849 return 0;
3850}
3851
3852static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3853 struct flow_action *flow_action,
3854 struct mlx5e_tc_flow *flow,
3855 struct netlink_ext_ack *extack,
3856 struct net_device *filter_dev)
3857{
3858 struct pedit_headers_action hdrs[2] = {};
3859 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3860 struct mlx5e_tc_flow_parse_attr *parse_attr;
3861 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3862 struct mlx5e_sample_attr sample_attr = {};
3863 const struct ip_tunnel_info *info = NULL;
3864 struct mlx5_flow_attr *attr = flow->attr;
3865 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3866 bool ft_flow = mlx5e_is_ft_flow(flow);
3867 const struct flow_action_entry *act;
3868 struct mlx5_esw_flow_attr *esw_attr;
3869 bool encap = false, decap = false;
3870 u32 action = attr->action;
3871 int err, i, if_count = 0;
3872 bool ptype_host = false;
3873 bool mpls_push = false;
3874
3875 if (!flow_action_has_entries(flow_action))
3876 return -EINVAL;
3877
3878 if (!flow_action_hw_stats_check(flow_action, extack,
3879 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3880 return -EOPNOTSUPP;
3881
3882 esw_attr = attr->esw_attr;
3883 parse_attr = attr->parse_attr;
3884
3885 flow_action_for_each(i, act, flow_action) {
3886 switch (act->id) {
3887 case FLOW_ACTION_PTYPE:
3888 if (act->ptype != PACKET_HOST) {
3889 NL_SET_ERR_MSG_MOD(extack,
3890 "skbedit ptype is only supported with type host");
3891 return -EOPNOTSUPP;
3892 }
3893
3894 ptype_host = true;
3895 break;
3896 case FLOW_ACTION_DROP:
3897 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3898 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3899 break;
3900 case FLOW_ACTION_TRAP:
3901 if (!flow_offload_has_one_action(flow_action)) {
3902 NL_SET_ERR_MSG_MOD(extack,
3903 "action trap is supported as a sole action only");
3904 return -EOPNOTSUPP;
3905 }
3906 action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3907 MLX5_FLOW_CONTEXT_ACTION_COUNT);
3908 attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
3909 break;
3910 case FLOW_ACTION_MPLS_PUSH:
3911 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3912 reformat_l2_to_l3_tunnel) ||
3913 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3914 NL_SET_ERR_MSG_MOD(extack,
3915 "mpls push is supported only for mpls_uc protocol");
3916 return -EOPNOTSUPP;
3917 }
3918 mpls_push = true;
3919 break;
3920 case FLOW_ACTION_MPLS_POP:
3921
3922
3923
3924
3925
3926 if (i) {
3927 NL_SET_ERR_MSG_MOD(extack,
3928 "mpls pop supported only as first action");
3929 return -EOPNOTSUPP;
3930 }
3931 if (!netif_is_bareudp(filter_dev)) {
3932 NL_SET_ERR_MSG_MOD(extack,
3933 "mpls pop supported only on bareudp devices");
3934 return -EOPNOTSUPP;
3935 }
3936
3937 parse_attr->eth.h_proto = act->mpls_pop.proto;
3938 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3939 flow_flag_set(flow, L3_TO_L2_DECAP);
3940 break;
3941 case FLOW_ACTION_MANGLE:
3942 case FLOW_ACTION_ADD:
3943 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3944 parse_attr, hdrs, flow, extack);
3945 if (err)
3946 return err;
3947
3948 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3949 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3950 esw_attr->split_count = esw_attr->out_count;
3951 }
3952 break;
3953 case FLOW_ACTION_CSUM:
3954 if (csum_offload_supported(priv, action,
3955 act->csum_flags, extack))
3956 break;
3957
3958 return -EOPNOTSUPP;
3959 case FLOW_ACTION_REDIRECT_INGRESS: {
3960 struct net_device *out_dev;
3961
3962 out_dev = act->dev;
3963 if (!out_dev)
3964 return -EOPNOTSUPP;
3965
3966 if (!netif_is_ovs_master(out_dev)) {
3967 NL_SET_ERR_MSG_MOD(extack,
3968 "redirect to ingress is supported only for OVS internal ports");
3969 return -EOPNOTSUPP;
3970 }
3971
3972 if (netif_is_ovs_master(parse_attr->filter_dev)) {
3973 NL_SET_ERR_MSG_MOD(extack,
3974 "redirect to ingress is not supported from internal port");
3975 return -EOPNOTSUPP;
3976 }
3977
3978 if (!ptype_host) {
3979 NL_SET_ERR_MSG_MOD(extack,
3980 "redirect to int port ingress requires ptype=host action");
3981 return -EOPNOTSUPP;
3982 }
3983
3984 if (esw_attr->out_count) {
3985 NL_SET_ERR_MSG_MOD(extack,
3986 "redirect to int port ingress is supported only as single destination");
3987 return -EOPNOTSUPP;
3988 }
3989
3990 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3991 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3992
3993 err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
3994 MLX5E_TC_INT_PORT_INGRESS,
3995 &action, esw_attr->out_count);
3996 if (err)
3997 return err;
3998
3999 esw_attr->out_count++;
4000
4001 break;
4002 }
4003 case FLOW_ACTION_REDIRECT:
4004 case FLOW_ACTION_MIRRED: {
4005 struct mlx5e_priv *out_priv;
4006 struct net_device *out_dev;
4007
4008 out_dev = act->dev;
4009 if (!out_dev) {
4010
4011
4012
4013
4014 return -EINVAL;
4015 }
4016
4017 if (mpls_push && !netif_is_bareudp(out_dev)) {
4018 NL_SET_ERR_MSG_MOD(extack,
4019 "mpls is supported only through a bareudp device");
4020 return -EOPNOTSUPP;
4021 }
4022
4023 if (ft_flow && out_dev == priv->netdev) {
4024
4025
4026
4027
4028 return -EOPNOTSUPP;
4029 }
4030
4031 if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4032 NL_SET_ERR_MSG_MOD(extack,
4033 "can't support more output ports, can't offload forwarding");
4034 netdev_warn(priv->netdev,
4035 "can't support more than %d output ports, can't offload forwarding\n",
4036 esw_attr->out_count);
4037 return -EOPNOTSUPP;
4038 }
4039
4040 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4041 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4042 if (encap) {
4043 parse_attr->mirred_ifindex[esw_attr->out_count] =
4044 out_dev->ifindex;
4045 parse_attr->tun_info[esw_attr->out_count] =
4046 mlx5e_dup_tun_info(info);
4047 if (!parse_attr->tun_info[esw_attr->out_count])
4048 return -ENOMEM;
4049 encap = false;
4050 esw_attr->dests[esw_attr->out_count].flags |=
4051 MLX5_ESW_DEST_ENCAP;
4052 esw_attr->out_count++;
4053
4054
4055
4056 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4057 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4058 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4059
4060 if (is_duplicated_output_device(priv->netdev,
4061 out_dev,
4062 ifindexes,
4063 if_count,
4064 extack))
4065 return -EOPNOTSUPP;
4066
4067 ifindexes[if_count] = out_dev->ifindex;
4068 if_count++;
4069
4070 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4071 if (!out_dev)
4072 return -ENODEV;
4073
4074 if (is_vlan_dev(out_dev)) {
4075 err = add_vlan_push_action(priv, attr,
4076 &out_dev,
4077 &action);
4078 if (err)
4079 return err;
4080 }
4081
4082 if (is_vlan_dev(parse_attr->filter_dev)) {
4083 err = add_vlan_pop_action(priv, attr,
4084 &action);
4085 if (err)
4086 return err;
4087 }
4088
4089 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4090 if (err)
4091 return err;
4092
4093 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4094 NL_SET_ERR_MSG_MOD(extack,
4095 "devices are not on same switch HW, can't offload forwarding");
4096 return -EOPNOTSUPP;
4097 }
4098
4099 if (same_vf_reps(priv, out_dev)) {
4100 NL_SET_ERR_MSG_MOD(extack,
4101 "can't forward from a VF to itself");
4102 return -EOPNOTSUPP;
4103 }
4104
4105 out_priv = netdev_priv(out_dev);
4106 rpriv = out_priv->ppriv;
4107 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
4108 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
4109 esw_attr->out_count++;
4110 } else if (netif_is_ovs_master(out_dev)) {
4111 err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
4112 out_dev->ifindex,
4113 MLX5E_TC_INT_PORT_EGRESS,
4114 &action,
4115 esw_attr->out_count);
4116 if (err)
4117 return err;
4118
4119 esw_attr->out_count++;
4120 } else if (parse_attr->filter_dev != priv->netdev) {
4121
4122
4123
4124
4125
4126 return -EINVAL;
4127 } else {
4128 NL_SET_ERR_MSG_MOD(extack,
4129 "devices are not on same switch HW, can't offload forwarding");
4130 return -EINVAL;
4131 }
4132 }
4133 break;
4134 case FLOW_ACTION_TUNNEL_ENCAP:
4135 info = act->tunnel;
4136 if (info)
4137 encap = true;
4138 else
4139 return -EOPNOTSUPP;
4140
4141 break;
4142 case FLOW_ACTION_VLAN_PUSH:
4143 case FLOW_ACTION_VLAN_POP:
4144 if (act->id == FLOW_ACTION_VLAN_PUSH &&
4145 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4146
4147 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4148 err = add_vlan_rewrite_action(priv,
4149 MLX5_FLOW_NAMESPACE_FDB,
4150 act, parse_attr, hdrs,
4151 &action, extack);
4152 } else {
4153 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
4154 }
4155 if (err)
4156 return err;
4157
4158 esw_attr->split_count = esw_attr->out_count;
4159 break;
4160 case FLOW_ACTION_VLAN_MANGLE:
4161 err = add_vlan_rewrite_action(priv,
4162 MLX5_FLOW_NAMESPACE_FDB,
4163 act, parse_attr, hdrs,
4164 &action, extack);
4165 if (err)
4166 return err;
4167
4168 esw_attr->split_count = esw_attr->out_count;
4169 break;
4170 case FLOW_ACTION_TUNNEL_DECAP:
4171 decap = true;
4172 break;
4173 case FLOW_ACTION_GOTO:
4174 err = validate_goto_chain(priv, flow, act, action,
4175 extack);
4176 if (err)
4177 return err;
4178
4179 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4180 attr->dest_chain = act->chain_index;
4181 break;
4182 case FLOW_ACTION_CT:
4183 if (flow_flag_test(flow, SAMPLE)) {
4184 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
4185 return -EOPNOTSUPP;
4186 }
4187 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
4188 &parse_attr->mod_hdr_acts,
4189 act, extack);
4190 if (err)
4191 return err;
4192
4193 flow_flag_set(flow, CT);
4194 esw_attr->split_count = esw_attr->out_count;
4195 break;
4196 case FLOW_ACTION_SAMPLE:
4197 if (flow_flag_test(flow, CT)) {
4198 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
4199 return -EOPNOTSUPP;
4200 }
4201 sample_attr.rate = act->sample.rate;
4202 sample_attr.group_num = act->sample.psample_group->group_num;
4203 if (act->sample.truncate)
4204 sample_attr.trunc_size = act->sample.trunc_size;
4205 flow_flag_set(flow, SAMPLE);
4206 break;
4207 default:
4208 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4209 return -EOPNOTSUPP;
4210 }
4211 }
4212
4213
4214 if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
4215 esw_attr->out_count > 1) {
4216 NL_SET_ERR_MSG_MOD(extack,
4217 "Rules with internal port can have only one destination");
4218 return -EOPNOTSUPP;
4219 }
4220
4221
4222 attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4223
4224 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4225 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4226
4227
4228
4229 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4230 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4231 &action, extack);
4232 if (err)
4233 return err;
4234 }
4235
4236 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4237 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4238 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4239 parse_attr, hdrs, &action, extack);
4240 if (err)
4241 return err;
4242
4243
4244
4245
4246 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4247 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4248 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4249 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4250 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4251 esw_attr->split_count = 0;
4252 }
4253 }
4254
4255 attr->action = action;
4256 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4257 return -EOPNOTSUPP;
4258
4259 if (attr->dest_chain) {
4260 if (decap) {
4261
4262
4263
4264
4265
4266
4267
4268
4269 NL_SET_ERR_MSG(extack,
4270 "Decap with goto isn't supported");
4271 netdev_warn(priv->netdev,
4272 "Decap with goto isn't supported");
4273 return -EOPNOTSUPP;
4274 }
4275
4276 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4277 }
4278
4279 if (!(attr->action &
4280 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4281 NL_SET_ERR_MSG_MOD(extack,
4282 "Rule must have at least one forward/drop action");
4283 return -EOPNOTSUPP;
4284 }
4285
4286 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4287 NL_SET_ERR_MSG_MOD(extack,
4288 "current firmware doesn't support split rule for port mirroring");
4289 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4290 return -EOPNOTSUPP;
4291 }
4292
4293
4294
4295
4296 if (flow_flag_test(flow, SAMPLE)) {
4297 attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
4298 if (!attr->sample_attr)
4299 return -ENOMEM;
4300 *attr->sample_attr = sample_attr;
4301 }
4302
4303 return 0;
4304}
4305
4306static void get_flags(int flags, unsigned long *flow_flags)
4307{
4308 unsigned long __flow_flags = 0;
4309
4310 if (flags & MLX5_TC_FLAG(INGRESS))
4311 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4312 if (flags & MLX5_TC_FLAG(EGRESS))
4313 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4314
4315 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4316 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4317 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4318 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4319 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4320 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4321
4322 *flow_flags = __flow_flags;
4323}
4324
4325static const struct rhashtable_params tc_ht_params = {
4326 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4327 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4328 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4329 .automatic_shrinking = true,
4330};
4331
4332static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4333 unsigned long flags)
4334{
4335 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4336 struct mlx5e_rep_priv *uplink_rpriv;
4337
4338 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4339 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4340 return &uplink_rpriv->uplink_priv.tc_ht;
4341 } else
4342 return &priv->fs.tc.ht;
4343}
4344
4345static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4346{
4347 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4348 struct mlx5_flow_attr *attr = flow->attr;
4349 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4350 flow_flag_test(flow, INGRESS);
4351 bool act_is_encap = !!(attr->action &
4352 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4353 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4354 MLX5_DEVCOM_ESW_OFFLOADS);
4355
4356 if (!esw_paired)
4357 return false;
4358
4359 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4360 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4361 (is_rep_ingress || act_is_encap))
4362 return true;
4363
4364 return false;
4365}
4366
4367struct mlx5_flow_attr *
4368mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4369{
4370 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4371 sizeof(struct mlx5_esw_flow_attr) :
4372 sizeof(struct mlx5_nic_flow_attr);
4373 struct mlx5_flow_attr *attr;
4374
4375 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4376}
4377
4378static int
4379mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4380 struct flow_cls_offload *f, unsigned long flow_flags,
4381 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4382 struct mlx5e_tc_flow **__flow)
4383{
4384 struct mlx5e_tc_flow_parse_attr *parse_attr;
4385 struct mlx5_flow_attr *attr;
4386 struct mlx5e_tc_flow *flow;
4387 int err = -ENOMEM;
4388 int out_index;
4389
4390 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4391 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4392 if (!parse_attr || !flow)
4393 goto err_free;
4394
4395 flow->flags = flow_flags;
4396 flow->cookie = f->cookie;
4397 flow->priv = priv;
4398
4399 attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4400 if (!attr)
4401 goto err_free;
4402
4403 flow->attr = attr;
4404
4405 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4406 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4407 INIT_LIST_HEAD(&flow->hairpin);
4408 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4409 refcount_set(&flow->refcnt, 1);
4410 init_completion(&flow->init_done);
4411 init_completion(&flow->del_hw_done);
4412
4413 *__flow = flow;
4414 *__parse_attr = parse_attr;
4415
4416 return 0;
4417
4418err_free:
4419 kfree(flow);
4420 kvfree(parse_attr);
4421 return err;
4422}
4423
4424static void
4425mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4426 struct mlx5e_tc_flow_parse_attr *parse_attr,
4427 struct flow_cls_offload *f)
4428{
4429 attr->parse_attr = parse_attr;
4430 attr->chain = f->common.chain_index;
4431 attr->prio = f->common.prio;
4432}
4433
4434static void
4435mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4436 struct mlx5e_priv *priv,
4437 struct mlx5e_tc_flow_parse_attr *parse_attr,
4438 struct flow_cls_offload *f,
4439 struct mlx5_eswitch_rep *in_rep,
4440 struct mlx5_core_dev *in_mdev)
4441{
4442 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4443 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4444
4445 mlx5e_flow_attr_init(attr, parse_attr, f);
4446
4447 esw_attr->in_rep = in_rep;
4448 esw_attr->in_mdev = in_mdev;
4449
4450 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4451 MLX5_COUNTER_SOURCE_ESWITCH)
4452 esw_attr->counter_dev = in_mdev;
4453 else
4454 esw_attr->counter_dev = priv->mdev;
4455}
4456
4457static struct mlx5e_tc_flow *
4458__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4459 struct flow_cls_offload *f,
4460 unsigned long flow_flags,
4461 struct net_device *filter_dev,
4462 struct mlx5_eswitch_rep *in_rep,
4463 struct mlx5_core_dev *in_mdev)
4464{
4465 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4466 struct netlink_ext_ack *extack = f->common.extack;
4467 struct mlx5e_tc_flow_parse_attr *parse_attr;
4468 struct mlx5e_tc_flow *flow;
4469 int attr_size, err;
4470
4471 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4472 attr_size = sizeof(struct mlx5_esw_flow_attr);
4473 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4474 &parse_attr, &flow);
4475 if (err)
4476 goto out;
4477
4478 parse_attr->filter_dev = filter_dev;
4479 mlx5e_flow_esw_attr_init(flow->attr,
4480 priv, parse_attr,
4481 f, in_rep, in_mdev);
4482
4483 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4484 f, filter_dev);
4485 if (err)
4486 goto err_free;
4487
4488
4489 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4490 &flow->attr->ct_attr, extack);
4491 if (err)
4492 goto err_free;
4493
4494 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4495 if (err)
4496 goto err_free;
4497
4498 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4499 complete_all(&flow->init_done);
4500 if (err) {
4501 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4502 goto err_free;
4503
4504 add_unready_flow(flow);
4505 }
4506
4507 return flow;
4508
4509err_free:
4510 mlx5e_flow_put(priv, flow);
4511out:
4512 return ERR_PTR(err);
4513}
4514
4515static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4516 struct mlx5e_tc_flow *flow,
4517 unsigned long flow_flags)
4518{
4519 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4520 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4521 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4522 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4523 struct mlx5e_tc_flow_parse_attr *parse_attr;
4524 struct mlx5e_rep_priv *peer_urpriv;
4525 struct mlx5e_tc_flow *peer_flow;
4526 struct mlx5_core_dev *in_mdev;
4527 int err = 0;
4528
4529 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4530 if (!peer_esw)
4531 return -ENODEV;
4532
4533 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4534 peer_priv = netdev_priv(peer_urpriv->netdev);
4535
4536
4537
4538
4539
4540
4541 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4542 in_mdev = peer_priv->mdev;
4543 else
4544 in_mdev = priv->mdev;
4545
4546 parse_attr = flow->attr->parse_attr;
4547 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4548 parse_attr->filter_dev,
4549 attr->in_rep, in_mdev);
4550 if (IS_ERR(peer_flow)) {
4551 err = PTR_ERR(peer_flow);
4552 goto out;
4553 }
4554
4555 flow->peer_flow = peer_flow;
4556 flow_flag_set(flow, DUP);
4557 mutex_lock(&esw->offloads.peer_mutex);
4558 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4559 mutex_unlock(&esw->offloads.peer_mutex);
4560
4561out:
4562 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4563 return err;
4564}
4565
4566static int
4567mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4568 struct flow_cls_offload *f,
4569 unsigned long flow_flags,
4570 struct net_device *filter_dev,
4571 struct mlx5e_tc_flow **__flow)
4572{
4573 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4574 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4575 struct mlx5_core_dev *in_mdev = priv->mdev;
4576 struct mlx5e_tc_flow *flow;
4577 int err;
4578
4579 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4580 in_mdev);
4581 if (IS_ERR(flow))
4582 return PTR_ERR(flow);
4583
4584 if (is_peer_flow_needed(flow)) {
4585 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4586 if (err) {
4587 mlx5e_tc_del_fdb_flow(priv, flow);
4588 goto out;
4589 }
4590 }
4591
4592 *__flow = flow;
4593
4594 return 0;
4595
4596out:
4597 return err;
4598}
4599
4600static int
4601mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4602 struct flow_cls_offload *f,
4603 unsigned long flow_flags,
4604 struct net_device *filter_dev,
4605 struct mlx5e_tc_flow **__flow)
4606{
4607 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4608 struct netlink_ext_ack *extack = f->common.extack;
4609 struct mlx5e_tc_flow_parse_attr *parse_attr;
4610 struct mlx5e_tc_flow *flow;
4611 int attr_size, err;
4612
4613 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4614 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4615 return -EOPNOTSUPP;
4616 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4617 return -EOPNOTSUPP;
4618 }
4619
4620 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4621 attr_size = sizeof(struct mlx5_nic_flow_attr);
4622 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4623 &parse_attr, &flow);
4624 if (err)
4625 goto out;
4626
4627 parse_attr->filter_dev = filter_dev;
4628 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4629
4630 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4631 f, filter_dev);
4632 if (err)
4633 goto err_free;
4634
4635 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4636 &flow->attr->ct_attr, extack);
4637 if (err)
4638 goto err_free;
4639
4640 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4641 if (err)
4642 goto err_free;
4643
4644 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4645 if (err)
4646 goto err_free;
4647
4648 flow_flag_set(flow, OFFLOADED);
4649 *__flow = flow;
4650
4651 return 0;
4652
4653err_free:
4654 flow_flag_set(flow, FAILED);
4655 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4656 mlx5e_flow_put(priv, flow);
4657out:
4658 return err;
4659}
4660
4661static int
4662mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4663 struct flow_cls_offload *f,
4664 unsigned long flags,
4665 struct net_device *filter_dev,
4666 struct mlx5e_tc_flow **flow)
4667{
4668 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4669 unsigned long flow_flags;
4670 int err;
4671
4672 get_flags(flags, &flow_flags);
4673
4674 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4675 return -EOPNOTSUPP;
4676
4677 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4678 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4679 filter_dev, flow);
4680 else
4681 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4682 filter_dev, flow);
4683
4684 return err;
4685}
4686
4687static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4688 struct mlx5e_rep_priv *rpriv)
4689{
4690
4691
4692
4693
4694 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4695}
4696
4697int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4698 struct flow_cls_offload *f, unsigned long flags)
4699{
4700 struct netlink_ext_ack *extack = f->common.extack;
4701 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4702 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4703 struct mlx5e_tc_flow *flow;
4704 int err = 0;
4705
4706 if (!mlx5_esw_hold(priv->mdev))
4707 return -EAGAIN;
4708
4709 mlx5_esw_get(priv->mdev);
4710
4711 rcu_read_lock();
4712 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4713 if (flow) {
4714
4715
4716
4717 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4718 goto rcu_unlock;
4719
4720 NL_SET_ERR_MSG_MOD(extack,
4721 "flow cookie already exists, ignoring");
4722 netdev_warn_once(priv->netdev,
4723 "flow cookie %lx already exists, ignoring\n",
4724 f->cookie);
4725 err = -EEXIST;
4726 goto rcu_unlock;
4727 }
4728rcu_unlock:
4729 rcu_read_unlock();
4730 if (flow)
4731 goto out;
4732
4733 trace_mlx5e_configure_flower(f);
4734 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4735 if (err)
4736 goto out;
4737
4738
4739
4740
4741 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4742 flow->orig_dev = dev;
4743
4744 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4745 if (err)
4746 goto err_free;
4747
4748 mlx5_esw_release(priv->mdev);
4749 return 0;
4750
4751err_free:
4752 mlx5e_flow_put(priv, flow);
4753out:
4754 mlx5_esw_put(priv->mdev);
4755 mlx5_esw_release(priv->mdev);
4756 return err;
4757}
4758
4759static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4760{
4761 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4762 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4763
4764 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4765 flow_flag_test(flow, EGRESS) == dir_egress;
4766}
4767
4768int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4769 struct flow_cls_offload *f, unsigned long flags)
4770{
4771 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4772 struct mlx5e_tc_flow *flow;
4773 int err;
4774
4775 rcu_read_lock();
4776 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4777 if (!flow || !same_flow_direction(flow, flags)) {
4778 err = -EINVAL;
4779 goto errout;
4780 }
4781
4782
4783
4784
4785 if (flow_flag_test_and_set(flow, DELETED)) {
4786 err = -EINVAL;
4787 goto errout;
4788 }
4789 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4790 rcu_read_unlock();
4791
4792 trace_mlx5e_delete_flower(f);
4793 mlx5e_flow_put(priv, flow);
4794
4795 mlx5_esw_put(priv->mdev);
4796 return 0;
4797
4798errout:
4799 rcu_read_unlock();
4800 return err;
4801}
4802
4803int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4804 struct flow_cls_offload *f, unsigned long flags)
4805{
4806 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4807 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4808 struct mlx5_eswitch *peer_esw;
4809 struct mlx5e_tc_flow *flow;
4810 struct mlx5_fc *counter;
4811 u64 lastuse = 0;
4812 u64 packets = 0;
4813 u64 bytes = 0;
4814 int err = 0;
4815
4816 rcu_read_lock();
4817 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4818 tc_ht_params));
4819 rcu_read_unlock();
4820 if (IS_ERR(flow))
4821 return PTR_ERR(flow);
4822
4823 if (!same_flow_direction(flow, flags)) {
4824 err = -EINVAL;
4825 goto errout;
4826 }
4827
4828 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4829 counter = mlx5e_tc_get_counter(flow);
4830 if (!counter)
4831 goto errout;
4832
4833 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4834 }
4835
4836
4837
4838
4839 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4840 if (!peer_esw)
4841 goto out;
4842
4843 if (flow_flag_test(flow, DUP) &&
4844 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4845 u64 bytes2;
4846 u64 packets2;
4847 u64 lastuse2;
4848
4849 counter = mlx5e_tc_get_counter(flow->peer_flow);
4850 if (!counter)
4851 goto no_peer_counter;
4852 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4853
4854 bytes += bytes2;
4855 packets += packets2;
4856 lastuse = max_t(u64, lastuse, lastuse2);
4857 }
4858
4859no_peer_counter:
4860 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4861out:
4862 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4863 FLOW_ACTION_HW_STATS_DELAYED);
4864 trace_mlx5e_stats_flower(f);
4865errout:
4866 mlx5e_flow_put(priv, flow);
4867 return err;
4868}
4869
4870static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4871 struct netlink_ext_ack *extack)
4872{
4873 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4874 struct mlx5_eswitch *esw;
4875 u32 rate_mbps = 0;
4876 u16 vport_num;
4877 int err;
4878
4879 vport_num = rpriv->rep->vport;
4880 if (vport_num >= MLX5_VPORT_ECPF) {
4881 NL_SET_ERR_MSG_MOD(extack,
4882 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4883 return -EOPNOTSUPP;
4884 }
4885
4886 esw = priv->mdev->priv.eswitch;
4887
4888
4889
4890
4891
4892
4893 if (rate) {
4894 rate = (rate * BITS_PER_BYTE) + 500000;
4895 do_div(rate, 1000000);
4896 rate_mbps = max_t(u32, rate, 1);
4897 }
4898
4899 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4900 if (err)
4901 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4902
4903 return err;
4904}
4905
4906static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4907 struct flow_action *flow_action,
4908 struct netlink_ext_ack *extack)
4909{
4910 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4911 const struct flow_action_entry *act;
4912 int err;
4913 int i;
4914
4915 if (!flow_action_has_entries(flow_action)) {
4916 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4917 return -EINVAL;
4918 }
4919
4920 if (!flow_offload_has_one_action(flow_action)) {
4921 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4922 return -EOPNOTSUPP;
4923 }
4924
4925 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4926 return -EOPNOTSUPP;
4927
4928 flow_action_for_each(i, act, flow_action) {
4929 switch (act->id) {
4930 case FLOW_ACTION_POLICE:
4931 if (act->police.rate_pkt_ps) {
4932 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
4933 return -EOPNOTSUPP;
4934 }
4935 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4936 if (err)
4937 return err;
4938
4939 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4940 break;
4941 default:
4942 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4943 return -EOPNOTSUPP;
4944 }
4945 }
4946
4947 return 0;
4948}
4949
4950int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4951 struct tc_cls_matchall_offload *ma)
4952{
4953 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4954 struct netlink_ext_ack *extack = ma->common.extack;
4955
4956 if (!mlx5_esw_qos_enabled(esw)) {
4957 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4958 return -EOPNOTSUPP;
4959 }
4960
4961 if (ma->common.prio != 1) {
4962 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4963 return -EINVAL;
4964 }
4965
4966 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4967}
4968
4969int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4970 struct tc_cls_matchall_offload *ma)
4971{
4972 struct netlink_ext_ack *extack = ma->common.extack;
4973
4974 return apply_police_params(priv, 0, extack);
4975}
4976
4977void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4978 struct tc_cls_matchall_offload *ma)
4979{
4980 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4981 struct rtnl_link_stats64 cur_stats;
4982 u64 dbytes;
4983 u64 dpkts;
4984
4985 cur_stats = priv->stats.vf_vport;
4986 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4987 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4988 rpriv->prev_vf_vport_stats = cur_stats;
4989 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4990 FLOW_ACTION_HW_STATS_DELAYED);
4991}
4992
4993static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4994 struct mlx5e_priv *peer_priv)
4995{
4996 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4997 struct mlx5e_hairpin_entry *hpe, *tmp;
4998 LIST_HEAD(init_wait_list);
4999 u16 peer_vhca_id;
5000 int bkt;
5001
5002 if (!same_hw_devs(priv, peer_priv))
5003 return;
5004
5005 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5006
5007 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
5008 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
5009 if (refcount_inc_not_zero(&hpe->refcnt))
5010 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5011 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
5012
5013 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5014 wait_for_completion(&hpe->res_ready);
5015 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5016 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
5017
5018 mlx5e_hairpin_put(priv, hpe);
5019 }
5020}
5021
5022static int mlx5e_tc_netdev_event(struct notifier_block *this,
5023 unsigned long event, void *ptr)
5024{
5025 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5026 struct mlx5e_flow_steering *fs;
5027 struct mlx5e_priv *peer_priv;
5028 struct mlx5e_tc_table *tc;
5029 struct mlx5e_priv *priv;
5030
5031 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5032 event != NETDEV_UNREGISTER ||
5033 ndev->reg_state == NETREG_REGISTERED)
5034 return NOTIFY_DONE;
5035
5036 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5037 fs = container_of(tc, struct mlx5e_flow_steering, tc);
5038 priv = container_of(fs, struct mlx5e_priv, fs);
5039 peer_priv = netdev_priv(ndev);
5040 if (priv == peer_priv ||
5041 !(priv->netdev->features & NETIF_F_HW_TC))
5042 return NOTIFY_DONE;
5043
5044 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5045
5046 return NOTIFY_DONE;
5047}
5048
5049static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
5050{
5051 int tc_grp_size, tc_tbl_size;
5052 u32 max_flow_counter;
5053
5054 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
5055 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
5056
5057 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
5058
5059 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
5060 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
5061
5062 return tc_tbl_size;
5063}
5064
5065int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5066{
5067 struct mlx5e_tc_table *tc = &priv->fs.tc;
5068 struct mlx5_core_dev *dev = priv->mdev;
5069 struct mapping_ctx *chains_mapping;
5070 struct mlx5_chains_attr attr = {};
5071 u64 mapping_id;
5072 int err;
5073
5074 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5075 mutex_init(&tc->t_lock);
5076 mutex_init(&tc->hairpin_tbl_lock);
5077 hash_init(tc->hairpin_tbl);
5078
5079 err = rhashtable_init(&tc->ht, &tc_ht_params);
5080 if (err)
5081 return err;
5082
5083 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
5084
5085 mapping_id = mlx5_query_nic_system_image_guid(dev);
5086
5087 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
5088 sizeof(struct mlx5_mapped_obj),
5089 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
5090
5091 if (IS_ERR(chains_mapping)) {
5092 err = PTR_ERR(chains_mapping);
5093 goto err_mapping;
5094 }
5095 tc->mapping = chains_mapping;
5096
5097 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
5098 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5099 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5100 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5101 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5102 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5103 attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
5104 attr.mapping = chains_mapping;
5105
5106 tc->chains = mlx5_chains_create(dev, &attr);
5107 if (IS_ERR(tc->chains)) {
5108 err = PTR_ERR(tc->chains);
5109 goto err_chains;
5110 }
5111
5112 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5113 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
5114 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5115
5116 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5117 err = register_netdevice_notifier_dev_net(priv->netdev,
5118 &tc->netdevice_nb,
5119 &tc->netdevice_nn);
5120 if (err) {
5121 tc->netdevice_nb.notifier_call = NULL;
5122 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5123 goto err_reg;
5124 }
5125
5126 return 0;
5127
5128err_reg:
5129 mlx5_tc_ct_clean(tc->ct);
5130 mlx5e_tc_post_act_destroy(tc->post_act);
5131 mlx5_chains_destroy(tc->chains);
5132err_chains:
5133 mapping_destroy(chains_mapping);
5134err_mapping:
5135 rhashtable_destroy(&tc->ht);
5136 return err;
5137}
5138
5139static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5140{
5141 struct mlx5e_tc_flow *flow = ptr;
5142 struct mlx5e_priv *priv = flow->priv;
5143
5144 mlx5e_tc_del_flow(priv, flow);
5145 kfree(flow);
5146}
5147
5148void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5149{
5150 struct mlx5e_tc_table *tc = &priv->fs.tc;
5151
5152 if (tc->netdevice_nb.notifier_call)
5153 unregister_netdevice_notifier_dev_net(priv->netdev,
5154 &tc->netdevice_nb,
5155 &tc->netdevice_nn);
5156
5157 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5158 mutex_destroy(&tc->hairpin_tbl_lock);
5159
5160 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5161
5162 if (!IS_ERR_OR_NULL(tc->t)) {
5163 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5164 tc->t = NULL;
5165 }
5166 mutex_destroy(&tc->t_lock);
5167
5168 mlx5_tc_ct_clean(tc->ct);
5169 mlx5e_tc_post_act_destroy(tc->post_act);
5170 mapping_destroy(tc->mapping);
5171 mlx5_chains_destroy(tc->chains);
5172}
5173
5174int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
5175{
5176 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5177 struct mlx5_rep_uplink_priv *uplink_priv;
5178 struct mlx5e_rep_priv *rpriv;
5179 struct mapping_ctx *mapping;
5180 struct mlx5_eswitch *esw;
5181 struct mlx5e_priv *priv;
5182 u64 mapping_id;
5183 int err = 0;
5184
5185 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5186 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5187 priv = netdev_priv(rpriv->netdev);
5188 esw = priv->mdev->priv.eswitch;
5189
5190 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5191 MLX5_FLOW_NAMESPACE_FDB);
5192 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5193 esw_chains(esw),
5194 &esw->offloads.mod_hdr,
5195 MLX5_FLOW_NAMESPACE_FDB,
5196 uplink_priv->post_act);
5197
5198 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5199
5200#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5201 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5202#endif
5203
5204 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5205
5206 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5207 sizeof(struct tunnel_match_key),
5208 TUNNEL_INFO_BITS_MASK, true);
5209
5210 if (IS_ERR(mapping)) {
5211 err = PTR_ERR(mapping);
5212 goto err_tun_mapping;
5213 }
5214 uplink_priv->tunnel_mapping = mapping;
5215
5216
5217 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5218 sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
5219 if (IS_ERR(mapping)) {
5220 err = PTR_ERR(mapping);
5221 goto err_enc_opts_mapping;
5222 }
5223 uplink_priv->tunnel_enc_opts_mapping = mapping;
5224
5225 err = rhashtable_init(tc_ht, &tc_ht_params);
5226 if (err)
5227 goto err_ht_init;
5228
5229 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5230
5231 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5232 if (IS_ERR(uplink_priv->encap)) {
5233 err = PTR_ERR(uplink_priv->encap);
5234 goto err_register_fib_notifier;
5235 }
5236
5237 return 0;
5238
5239err_register_fib_notifier:
5240 rhashtable_destroy(tc_ht);
5241err_ht_init:
5242 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5243err_enc_opts_mapping:
5244 mapping_destroy(uplink_priv->tunnel_mapping);
5245err_tun_mapping:
5246#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5247 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5248#endif
5249 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5250 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5251 netdev_warn(priv->netdev,
5252 "Failed to initialize tc (eswitch), err: %d", err);
5253 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5254 return err;
5255}
5256
5257void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5258{
5259 struct mlx5_rep_uplink_priv *uplink_priv;
5260
5261 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5262
5263 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5264 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5265
5266 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5267 mapping_destroy(uplink_priv->tunnel_mapping);
5268
5269#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5270 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5271#endif
5272 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5273 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5274 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5275}
5276
5277int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5278{
5279 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5280
5281 return atomic_read(&tc_ht->nelems);
5282}
5283
5284void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5285{
5286 struct mlx5e_tc_flow *flow, *tmp;
5287
5288 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5289 __mlx5e_tc_del_fdb_peer_flow(flow);
5290}
5291
5292void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5293{
5294 struct mlx5_rep_uplink_priv *rpriv =
5295 container_of(work, struct mlx5_rep_uplink_priv,
5296 reoffload_flows_work);
5297 struct mlx5e_tc_flow *flow, *tmp;
5298
5299 mutex_lock(&rpriv->unready_flows_lock);
5300 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5301 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5302 unready_flow_del(flow);
5303 }
5304 mutex_unlock(&rpriv->unready_flows_lock);
5305}
5306
5307static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5308 struct flow_cls_offload *cls_flower,
5309 unsigned long flags)
5310{
5311 switch (cls_flower->command) {
5312 case FLOW_CLS_REPLACE:
5313 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5314 flags);
5315 case FLOW_CLS_DESTROY:
5316 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5317 flags);
5318 case FLOW_CLS_STATS:
5319 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5320 flags);
5321 default:
5322 return -EOPNOTSUPP;
5323 }
5324}
5325
5326int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5327 void *cb_priv)
5328{
5329 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5330 struct mlx5e_priv *priv = cb_priv;
5331
5332 if (!priv->netdev || !netif_device_present(priv->netdev))
5333 return -EOPNOTSUPP;
5334
5335 if (mlx5e_is_uplink_rep(priv))
5336 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5337 else
5338 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5339
5340 switch (type) {
5341 case TC_SETUP_CLSFLOWER:
5342 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5343 default:
5344 return -EOPNOTSUPP;
5345 }
5346}
5347
5348bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5349 struct sk_buff *skb)
5350{
5351#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5352 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5353 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5354 struct mlx5e_tc_table *tc = &priv->fs.tc;
5355 struct mlx5_mapped_obj mapped_obj;
5356 struct tc_skb_ext *tc_skb_ext;
5357 int err;
5358
5359 reg_b = be32_to_cpu(cqe->ft_metadata);
5360
5361 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5362
5363 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5364 if (err) {
5365 netdev_dbg(priv->netdev,
5366 "Couldn't find chain for chain tag: %d, err: %d\n",
5367 chain_tag, err);
5368 return false;
5369 }
5370
5371 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5372 chain = mapped_obj.chain;
5373 tc_skb_ext = tc_skb_ext_alloc(skb);
5374 if (WARN_ON(!tc_skb_ext))
5375 return false;
5376
5377 tc_skb_ext->chain = chain;
5378
5379 zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5380 ESW_ZONE_ID_MASK;
5381
5382 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5383 zone_restore_id))
5384 return false;
5385 } else {
5386 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5387 return false;
5388 }
5389#endif
5390
5391 return true;
5392}
5393