1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <net/flow_dissector.h>
34#include <net/flow_offload.h>
35#include <net/sch_generic.h>
36#include <net/pkt_cls.h>
37#include <net/tc_act/tc_gact.h>
38#include <net/tc_act/tc_skbedit.h>
39#include <linux/mlx5/fs.h>
40#include <linux/mlx5/device.h>
41#include <linux/rhashtable.h>
42#include <linux/refcount.h>
43#include <linux/completion.h>
44#include <net/tc_act/tc_mirred.h>
45#include <net/tc_act/tc_vlan.h>
46#include <net/tc_act/tc_tunnel_key.h>
47#include <net/tc_act/tc_pedit.h>
48#include <net/tc_act/tc_csum.h>
49#include <net/tc_act/tc_mpls.h>
50#include <net/arp.h>
51#include <net/ipv6_stubs.h>
52#include <net/bareudp.h>
53#include <net/bonding.h>
54#include "en.h"
55#include "en_rep.h"
56#include "en/rep/tc.h"
57#include "en/rep/neigh.h"
58#include "en_tc.h"
59#include "eswitch.h"
60#include "esw/chains.h"
61#include "fs_core.h"
62#include "en/port.h"
63#include "en/tc_tun.h"
64#include "en/mapping.h"
65#include "en/tc_ct.h"
66#include "en/mod_hdr.h"
67#include "lib/devcom.h"
68#include "lib/geneve.h"
69#include "diag/en_tc_tracepoint.h"
70
71#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
72
73struct mlx5_nic_flow_attr {
74 u32 action;
75 u32 flow_tag;
76 struct mlx5_modify_hdr *modify_hdr;
77 u32 hairpin_tirn;
78 u8 match_level;
79 struct mlx5_flow_table *hairpin_ft;
80 struct mlx5_fc *counter;
81};
82
83#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
84
85enum {
86 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
87 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
88 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
89 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
90 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
91 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
92 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
93 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
94 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
95 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
96 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
97 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
98 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
99 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
100};
101
102#define MLX5E_TC_MAX_SPLITS 1
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct encap_flow_item {
122 struct mlx5e_encap_entry *e;
123 struct list_head list;
124 int index;
125};
126
127struct mlx5e_tc_flow {
128 struct rhash_head node;
129 struct mlx5e_priv *priv;
130 u64 cookie;
131 unsigned long flags;
132 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
133
134
135 struct list_head l3_to_l2_reformat;
136 struct mlx5e_decap_entry *decap_reformat;
137
138
139
140
141
142 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
143 struct mlx5e_tc_flow *peer_flow;
144 struct mlx5e_mod_hdr_handle *mh;
145 struct mlx5e_hairpin_entry *hpe;
146 struct list_head hairpin;
147 struct list_head peer;
148 struct list_head unready;
149 struct net_device *orig_dev;
150 int tmp_efi_index;
151 struct list_head tmp_list;
152 refcount_t refcnt;
153 struct rcu_head rcu_head;
154 struct completion init_done;
155 int tunnel_id;
156
157 union {
158 struct mlx5_esw_flow_attr esw_attr[0];
159 struct mlx5_nic_flow_attr nic_attr[0];
160 };
161};
162
163struct mlx5e_tc_flow_parse_attr {
164 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
165 struct net_device *filter_dev;
166 struct mlx5_flow_spec spec;
167 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
168 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
169 struct ethhdr eth;
170};
171
172#define MLX5E_TC_TABLE_NUM_GROUPS 4
173#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
174
175struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
176 [CHAIN_TO_REG] = {
177 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
178 .moffset = 0,
179 .mlen = 2,
180 },
181 [TUNNEL_TO_REG] = {
182 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
183 .moffset = 1,
184 .mlen = 3,
185 .soffset = MLX5_BYTE_OFF(fte_match_param,
186 misc_parameters_2.metadata_reg_c_1),
187 },
188 [ZONE_TO_REG] = zone_to_reg_ct,
189 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
190 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
191 [MARK_TO_REG] = mark_to_reg_ct,
192 [LABELS_TO_REG] = labels_to_reg_ct,
193 [FTEID_TO_REG] = fteid_to_reg_ct,
194};
195
196static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
197
198void
199mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
200 enum mlx5e_tc_attr_to_reg type,
201 u32 data,
202 u32 mask)
203{
204 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
205 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
206 void *headers_c = spec->match_criteria;
207 void *headers_v = spec->match_value;
208 void *fmask, *fval;
209
210 fmask = headers_c + soffset;
211 fval = headers_v + soffset;
212
213 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
214 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
215
216 memcpy(fmask, &mask, match_len);
217 memcpy(fval, &data, match_len);
218
219 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
220}
221
222void
223mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
224 enum mlx5e_tc_attr_to_reg type,
225 u32 *data,
226 u32 *mask)
227{
228 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
229 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
230 void *headers_c = spec->match_criteria;
231 void *headers_v = spec->match_value;
232 void *fmask, *fval;
233
234 fmask = headers_c + soffset;
235 fval = headers_v + soffset;
236
237 memcpy(mask, fmask, match_len);
238 memcpy(data, fval, match_len);
239
240 *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
241 *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
242}
243
244int
245mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
246 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
247 enum mlx5e_tc_attr_to_reg type,
248 u32 data)
249{
250 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
251 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
252 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
253 char *modact;
254 int err;
255
256 err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
257 mod_hdr_acts);
258 if (err)
259 return err;
260
261 modact = mod_hdr_acts->actions +
262 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
263
264
265 if (mlen == 4)
266 mlen = 0;
267
268 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
269 MLX5_SET(set_action_in, modact, field, mfield);
270 MLX5_SET(set_action_in, modact, offset, moffset * 8);
271 MLX5_SET(set_action_in, modact, length, mlen * 8);
272 MLX5_SET(set_action_in, modact, data, data);
273 mod_hdr_acts->num_actions++;
274
275 return 0;
276}
277
278struct mlx5e_hairpin {
279 struct mlx5_hairpin *pair;
280
281 struct mlx5_core_dev *func_mdev;
282 struct mlx5e_priv *func_priv;
283 u32 tdn;
284 u32 tirn;
285
286 int num_channels;
287 struct mlx5e_rqt indir_rqt;
288 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
289 struct mlx5e_ttc_table ttc;
290};
291
292struct mlx5e_hairpin_entry {
293
294 struct hlist_node hairpin_hlist;
295
296
297 spinlock_t flows_lock;
298
299 struct list_head flows;
300
301
302
303 struct list_head dead_peer_wait_list;
304
305 u16 peer_vhca_id;
306 u8 prio;
307 struct mlx5e_hairpin *hp;
308 refcount_t refcnt;
309 struct completion res_ready;
310};
311
312static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
313 struct mlx5e_tc_flow *flow);
314
315static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
316{
317 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
318 return ERR_PTR(-EINVAL);
319 return flow;
320}
321
322static void mlx5e_flow_put(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow)
324{
325 if (refcount_dec_and_test(&flow->refcnt)) {
326 mlx5e_tc_del_flow(priv, flow);
327 kfree_rcu(flow, rcu_head);
328 }
329}
330
331static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
332{
333
334 smp_mb__before_atomic();
335 set_bit(flag, &flow->flags);
336}
337
338#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
339
340static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
341 unsigned long flag)
342{
343
344 return test_and_set_bit(flag, &flow->flags);
345}
346
347#define flow_flag_test_and_set(flow, flag) \
348 __flow_flag_test_and_set(flow, \
349 MLX5E_TC_FLOW_FLAG_##flag)
350
351static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
352{
353
354 smp_mb__before_atomic();
355 clear_bit(flag, &flow->flags);
356}
357
358#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
359 MLX5E_TC_FLOW_FLAG_##flag)
360
361static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
362{
363 bool ret = test_bit(flag, &flow->flags);
364
365
366 smp_mb__after_atomic();
367 return ret;
368}
369
370#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
371 MLX5E_TC_FLOW_FLAG_##flag)
372
373static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
374{
375 return flow_flag_test(flow, ESWITCH);
376}
377
378static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
379{
380 return flow_flag_test(flow, FT);
381}
382
383static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
384{
385 return flow_flag_test(flow, OFFLOADED);
386}
387
388static int get_flow_name_space(struct mlx5e_tc_flow *flow)
389{
390 return mlx5e_is_eswitch_flow(flow) ?
391 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
392}
393
394static struct mod_hdr_tbl *
395get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
396{
397 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
398
399 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
400 &esw->offloads.mod_hdr :
401 &priv->fs.tc.mod_hdr;
402}
403
404static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
405 struct mlx5e_tc_flow *flow,
406 struct mlx5e_tc_flow_parse_attr *parse_attr)
407{
408 struct mlx5_modify_hdr *modify_hdr;
409 struct mlx5e_mod_hdr_handle *mh;
410
411 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
412 get_flow_name_space(flow),
413 &parse_attr->mod_hdr_acts);
414 if (IS_ERR(mh))
415 return PTR_ERR(mh);
416
417 modify_hdr = mlx5e_mod_hdr_get(mh);
418 if (mlx5e_is_eswitch_flow(flow))
419 flow->esw_attr->modify_hdr = modify_hdr;
420 else
421 flow->nic_attr->modify_hdr = modify_hdr;
422 flow->mh = mh;
423
424 return 0;
425}
426
427static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
428 struct mlx5e_tc_flow *flow)
429{
430
431 if (!flow->mh)
432 return;
433
434 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
435 flow->mh);
436 flow->mh = NULL;
437}
438
439static
440struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
441{
442 struct net_device *netdev;
443 struct mlx5e_priv *priv;
444
445 netdev = __dev_get_by_index(net, ifindex);
446 priv = netdev_priv(netdev);
447 return priv->mdev;
448}
449
450static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
451{
452 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
453 void *tirc;
454 int err;
455
456 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
457 if (err)
458 goto alloc_tdn_err;
459
460 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
461
462 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
463 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
464 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
465
466 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
467 if (err)
468 goto create_tir_err;
469
470 return 0;
471
472create_tir_err:
473 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
474alloc_tdn_err:
475 return err;
476}
477
478static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
479{
480 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
481 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
482}
483
484static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
485{
486 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
487 struct mlx5e_priv *priv = hp->func_priv;
488 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
489
490 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
491 hp->num_channels);
492
493 for (i = 0; i < sz; i++) {
494 ix = i;
495 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
496 ix = mlx5e_bits_invert(i, ilog2(sz));
497 ix = indirection_rqt[ix];
498 rqn = hp->pair->rqn[ix];
499 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
500 }
501}
502
503static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
504{
505 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
506 struct mlx5e_priv *priv = hp->func_priv;
507 struct mlx5_core_dev *mdev = priv->mdev;
508 void *rqtc;
509 u32 *in;
510
511 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
512 in = kvzalloc(inlen, GFP_KERNEL);
513 if (!in)
514 return -ENOMEM;
515
516 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
517
518 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
519 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
520
521 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
522
523 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
524 if (!err)
525 hp->indir_rqt.enabled = true;
526
527 kvfree(in);
528 return err;
529}
530
531static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
532{
533 struct mlx5e_priv *priv = hp->func_priv;
534 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
535 int tt, i, err;
536 void *tirc;
537
538 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
539 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
540
541 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
542 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
543
544 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
545 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
546 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
547 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
548
549 err = mlx5_core_create_tir(hp->func_mdev, in,
550 &hp->indir_tirn[tt]);
551 if (err) {
552 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
553 goto err_destroy_tirs;
554 }
555 }
556 return 0;
557
558err_destroy_tirs:
559 for (i = 0; i < tt; i++)
560 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
561 return err;
562}
563
564static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
565{
566 int tt;
567
568 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
569 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
570}
571
572static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
573 struct ttc_params *ttc_params)
574{
575 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
576 int tt;
577
578 memset(ttc_params, 0, sizeof(*ttc_params));
579
580 ttc_params->any_tt_tirn = hp->tirn;
581
582 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
583 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
584
585 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
586 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
587 ft_attr->prio = MLX5E_TC_PRIO;
588}
589
590static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
591{
592 struct mlx5e_priv *priv = hp->func_priv;
593 struct ttc_params ttc_params;
594 int err;
595
596 err = mlx5e_hairpin_create_indirect_rqt(hp);
597 if (err)
598 return err;
599
600 err = mlx5e_hairpin_create_indirect_tirs(hp);
601 if (err)
602 goto err_create_indirect_tirs;
603
604 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
605 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
606 if (err)
607 goto err_create_ttc_table;
608
609 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
610 hp->num_channels, hp->ttc.ft.t->id);
611
612 return 0;
613
614err_create_ttc_table:
615 mlx5e_hairpin_destroy_indirect_tirs(hp);
616err_create_indirect_tirs:
617 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
618
619 return err;
620}
621
622static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
623{
624 struct mlx5e_priv *priv = hp->func_priv;
625
626 mlx5e_destroy_ttc_table(priv, &hp->ttc);
627 mlx5e_hairpin_destroy_indirect_tirs(hp);
628 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
629}
630
631static struct mlx5e_hairpin *
632mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
633 int peer_ifindex)
634{
635 struct mlx5_core_dev *func_mdev, *peer_mdev;
636 struct mlx5e_hairpin *hp;
637 struct mlx5_hairpin *pair;
638 int err;
639
640 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
641 if (!hp)
642 return ERR_PTR(-ENOMEM);
643
644 func_mdev = priv->mdev;
645 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
646
647 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
648 if (IS_ERR(pair)) {
649 err = PTR_ERR(pair);
650 goto create_pair_err;
651 }
652 hp->pair = pair;
653 hp->func_mdev = func_mdev;
654 hp->func_priv = priv;
655 hp->num_channels = params->num_channels;
656
657 err = mlx5e_hairpin_create_transport(hp);
658 if (err)
659 goto create_transport_err;
660
661 if (hp->num_channels > 1) {
662 err = mlx5e_hairpin_rss_init(hp);
663 if (err)
664 goto rss_init_err;
665 }
666
667 return hp;
668
669rss_init_err:
670 mlx5e_hairpin_destroy_transport(hp);
671create_transport_err:
672 mlx5_core_hairpin_destroy(hp->pair);
673create_pair_err:
674 kfree(hp);
675 return ERR_PTR(err);
676}
677
678static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
679{
680 if (hp->num_channels > 1)
681 mlx5e_hairpin_rss_cleanup(hp);
682 mlx5e_hairpin_destroy_transport(hp);
683 mlx5_core_hairpin_destroy(hp->pair);
684 kvfree(hp);
685}
686
687static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
688{
689 return (peer_vhca_id << 16 | prio);
690}
691
692static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
693 u16 peer_vhca_id, u8 prio)
694{
695 struct mlx5e_hairpin_entry *hpe;
696 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
697
698 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
699 hairpin_hlist, hash_key) {
700 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
701 refcount_inc(&hpe->refcnt);
702 return hpe;
703 }
704 }
705
706 return NULL;
707}
708
709static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
710 struct mlx5e_hairpin_entry *hpe)
711{
712
713 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
714 return;
715 hash_del(&hpe->hairpin_hlist);
716 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
717
718 if (!IS_ERR_OR_NULL(hpe->hp)) {
719 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
720 dev_name(hpe->hp->pair->peer_mdev->device));
721
722 mlx5e_hairpin_destroy(hpe->hp);
723 }
724
725 WARN_ON(!list_empty(&hpe->flows));
726 kfree(hpe);
727}
728
729#define UNKNOWN_MATCH_PRIO 8
730
731static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
732 struct mlx5_flow_spec *spec, u8 *match_prio,
733 struct netlink_ext_ack *extack)
734{
735 void *headers_c, *headers_v;
736 u8 prio_val, prio_mask = 0;
737 bool vlan_present;
738
739#ifdef CONFIG_MLX5_CORE_EN_DCB
740 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
741 NL_SET_ERR_MSG_MOD(extack,
742 "only PCP trust state supported for hairpin");
743 return -EOPNOTSUPP;
744 }
745#endif
746 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
747 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
748
749 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
750 if (vlan_present) {
751 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
752 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
753 }
754
755 if (!vlan_present || !prio_mask) {
756 prio_val = UNKNOWN_MATCH_PRIO;
757 } else if (prio_mask != 0x7) {
758 NL_SET_ERR_MSG_MOD(extack,
759 "masked priority match not supported for hairpin");
760 return -EOPNOTSUPP;
761 }
762
763 *match_prio = prio_val;
764 return 0;
765}
766
767static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
768 struct mlx5e_tc_flow *flow,
769 struct mlx5e_tc_flow_parse_attr *parse_attr,
770 struct netlink_ext_ack *extack)
771{
772 int peer_ifindex = parse_attr->mirred_ifindex[0];
773 struct mlx5_hairpin_params params;
774 struct mlx5_core_dev *peer_mdev;
775 struct mlx5e_hairpin_entry *hpe;
776 struct mlx5e_hairpin *hp;
777 u64 link_speed64;
778 u32 link_speed;
779 u8 match_prio;
780 u16 peer_id;
781 int err;
782
783 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
784 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
785 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
786 return -EOPNOTSUPP;
787 }
788
789 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
790 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
791 extack);
792 if (err)
793 return err;
794
795 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
796 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
797 if (hpe) {
798 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
799 wait_for_completion(&hpe->res_ready);
800
801 if (IS_ERR(hpe->hp)) {
802 err = -EREMOTEIO;
803 goto out_err;
804 }
805 goto attach_flow;
806 }
807
808 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
809 if (!hpe) {
810 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
811 return -ENOMEM;
812 }
813
814 spin_lock_init(&hpe->flows_lock);
815 INIT_LIST_HEAD(&hpe->flows);
816 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
817 hpe->peer_vhca_id = peer_id;
818 hpe->prio = match_prio;
819 refcount_set(&hpe->refcnt, 1);
820 init_completion(&hpe->res_ready);
821
822 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
823 hash_hairpin_info(peer_id, match_prio));
824 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
825
826 params.log_data_size = 15;
827 params.log_data_size = min_t(u8, params.log_data_size,
828 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
829 params.log_data_size = max_t(u8, params.log_data_size,
830 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
831
832 params.log_num_packets = params.log_data_size -
833 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
834 params.log_num_packets = min_t(u8, params.log_num_packets,
835 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
836
837 params.q_counter = priv->q_counter;
838
839 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
840 link_speed = max_t(u32, link_speed, 50000);
841 link_speed64 = link_speed;
842 do_div(link_speed64, 50000);
843 params.num_channels = link_speed64;
844
845 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
846 hpe->hp = hp;
847 complete_all(&hpe->res_ready);
848 if (IS_ERR(hp)) {
849 err = PTR_ERR(hp);
850 goto out_err;
851 }
852
853 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
854 hp->tirn, hp->pair->rqn[0],
855 dev_name(hp->pair->peer_mdev->device),
856 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
857
858attach_flow:
859 if (hpe->hp->num_channels > 1) {
860 flow_flag_set(flow, HAIRPIN_RSS);
861 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
862 } else {
863 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
864 }
865
866 flow->hpe = hpe;
867 spin_lock(&hpe->flows_lock);
868 list_add(&flow->hairpin, &hpe->flows);
869 spin_unlock(&hpe->flows_lock);
870
871 return 0;
872
873out_err:
874 mlx5e_hairpin_put(priv, hpe);
875 return err;
876}
877
878static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
879 struct mlx5e_tc_flow *flow)
880{
881
882 if (!flow->hpe)
883 return;
884
885 spin_lock(&flow->hpe->flows_lock);
886 list_del(&flow->hairpin);
887 spin_unlock(&flow->hpe->flows_lock);
888
889 mlx5e_hairpin_put(priv, flow->hpe);
890 flow->hpe = NULL;
891}
892
893static int
894mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
895 struct mlx5e_tc_flow_parse_attr *parse_attr,
896 struct mlx5e_tc_flow *flow,
897 struct netlink_ext_ack *extack)
898{
899 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
900 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
901 struct mlx5_core_dev *dev = priv->mdev;
902 struct mlx5_flow_destination dest[2] = {};
903 struct mlx5_flow_act flow_act = {
904 .action = attr->action,
905 .flags = FLOW_ACT_NO_APPEND,
906 };
907 struct mlx5_fc *counter = NULL;
908 int err, dest_ix = 0;
909
910 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
911 flow_context->flow_tag = attr->flow_tag;
912
913 if (flow_flag_test(flow, HAIRPIN)) {
914 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
915 if (err)
916 return err;
917
918 if (flow_flag_test(flow, HAIRPIN_RSS)) {
919 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
920 dest[dest_ix].ft = attr->hairpin_ft;
921 } else {
922 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
923 dest[dest_ix].tir_num = attr->hairpin_tirn;
924 }
925 dest_ix++;
926 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
927 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
928 dest[dest_ix].ft = priv->fs.vlan.ft.t;
929 dest_ix++;
930 }
931
932 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
933 counter = mlx5_fc_create(dev, true);
934 if (IS_ERR(counter))
935 return PTR_ERR(counter);
936
937 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
938 dest[dest_ix].counter_id = mlx5_fc_id(counter);
939 dest_ix++;
940 attr->counter = counter;
941 }
942
943 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
944 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
945 flow_act.modify_hdr = attr->modify_hdr;
946 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
947 if (err)
948 return err;
949 }
950
951 mutex_lock(&priv->fs.tc.t_lock);
952 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
953 struct mlx5_flow_table_attr ft_attr = {};
954 int tc_grp_size, tc_tbl_size, tc_num_grps;
955 u32 max_flow_counter;
956
957 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
958 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
959
960 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
961
962 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
963 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
964 tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
965
966 ft_attr.prio = MLX5E_TC_PRIO;
967 ft_attr.max_fte = tc_tbl_size;
968 ft_attr.level = MLX5E_TC_FT_LEVEL;
969 ft_attr.autogroup.max_num_groups = tc_num_grps;
970 priv->fs.tc.t =
971 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
972 &ft_attr);
973 if (IS_ERR(priv->fs.tc.t)) {
974 mutex_unlock(&priv->fs.tc.t_lock);
975 NL_SET_ERR_MSG_MOD(extack,
976 "Failed to create tc offload table");
977 netdev_err(priv->netdev,
978 "Failed to create tc offload table\n");
979 return PTR_ERR(priv->fs.tc.t);
980 }
981 }
982
983 if (attr->match_level != MLX5_MATCH_NONE)
984 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
985
986 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
987 &flow_act, dest, dest_ix);
988 mutex_unlock(&priv->fs.tc.t_lock);
989
990 return PTR_ERR_OR_ZERO(flow->rule[0]);
991}
992
993static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
994 struct mlx5e_tc_flow *flow)
995{
996 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
997 struct mlx5_fc *counter = NULL;
998
999 counter = attr->counter;
1000 if (!IS_ERR_OR_NULL(flow->rule[0]))
1001 mlx5_del_flow_rules(flow->rule[0]);
1002 mlx5_fc_destroy(priv->mdev, counter);
1003
1004 mutex_lock(&priv->fs.tc.t_lock);
1005 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1006 mlx5_destroy_flow_table(priv->fs.tc.t);
1007 priv->fs.tc.t = NULL;
1008 }
1009 mutex_unlock(&priv->fs.tc.t_lock);
1010
1011 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1012 mlx5e_detach_mod_hdr(priv, flow);
1013
1014 if (flow_flag_test(flow, HAIRPIN))
1015 mlx5e_hairpin_flow_del(priv, flow);
1016}
1017
1018static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1019 struct mlx5e_tc_flow *flow, int out_index);
1020
1021static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1022 struct mlx5e_tc_flow *flow,
1023 struct net_device *mirred_dev,
1024 int out_index,
1025 struct netlink_ext_ack *extack,
1026 struct net_device **encap_dev,
1027 bool *encap_valid);
1028static int mlx5e_attach_decap(struct mlx5e_priv *priv,
1029 struct mlx5e_tc_flow *flow,
1030 struct netlink_ext_ack *extack);
1031static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1032 struct mlx5e_tc_flow *flow);
1033
1034static struct mlx5_flow_handle *
1035mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1036 struct mlx5e_tc_flow *flow,
1037 struct mlx5_flow_spec *spec,
1038 struct mlx5_esw_flow_attr *attr)
1039{
1040 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1041 struct mlx5_flow_handle *rule;
1042
1043 if (flow_flag_test(flow, CT)) {
1044 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1045
1046 return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
1047 mod_hdr_acts);
1048 }
1049
1050 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1051 if (IS_ERR(rule))
1052 return rule;
1053
1054 if (attr->split_count) {
1055 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1056 if (IS_ERR(flow->rule[1])) {
1057 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1058 return flow->rule[1];
1059 }
1060 }
1061
1062 return rule;
1063}
1064
1065static void
1066mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1067 struct mlx5e_tc_flow *flow,
1068 struct mlx5_esw_flow_attr *attr)
1069{
1070 flow_flag_clear(flow, OFFLOADED);
1071
1072 if (flow_flag_test(flow, CT)) {
1073 mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
1074 return;
1075 }
1076
1077 if (attr->split_count)
1078 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1079
1080 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1081}
1082
1083static struct mlx5_flow_handle *
1084mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1085 struct mlx5e_tc_flow *flow,
1086 struct mlx5_flow_spec *spec)
1087{
1088 struct mlx5_esw_flow_attr slow_attr;
1089 struct mlx5_flow_handle *rule;
1090
1091 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1092 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1093 slow_attr.split_count = 0;
1094 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1095
1096 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
1097 if (!IS_ERR(rule))
1098 flow_flag_set(flow, SLOW);
1099
1100 return rule;
1101}
1102
1103static void
1104mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1105 struct mlx5e_tc_flow *flow)
1106{
1107 struct mlx5_esw_flow_attr slow_attr;
1108
1109 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1110 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1111 slow_attr.split_count = 0;
1112 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1113 mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1114 flow_flag_clear(flow, SLOW);
1115}
1116
1117
1118
1119
1120static void unready_flow_add(struct mlx5e_tc_flow *flow,
1121 struct list_head *unready_flows)
1122{
1123 flow_flag_set(flow, NOT_READY);
1124 list_add_tail(&flow->unready, unready_flows);
1125}
1126
1127
1128
1129
1130static void unready_flow_del(struct mlx5e_tc_flow *flow)
1131{
1132 list_del(&flow->unready);
1133 flow_flag_clear(flow, NOT_READY);
1134}
1135
1136static void add_unready_flow(struct mlx5e_tc_flow *flow)
1137{
1138 struct mlx5_rep_uplink_priv *uplink_priv;
1139 struct mlx5e_rep_priv *rpriv;
1140 struct mlx5_eswitch *esw;
1141
1142 esw = flow->priv->mdev->priv.eswitch;
1143 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1144 uplink_priv = &rpriv->uplink_priv;
1145
1146 mutex_lock(&uplink_priv->unready_flows_lock);
1147 unready_flow_add(flow, &uplink_priv->unready_flows);
1148 mutex_unlock(&uplink_priv->unready_flows_lock);
1149}
1150
1151static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1152{
1153 struct mlx5_rep_uplink_priv *uplink_priv;
1154 struct mlx5e_rep_priv *rpriv;
1155 struct mlx5_eswitch *esw;
1156
1157 esw = flow->priv->mdev->priv.eswitch;
1158 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1159 uplink_priv = &rpriv->uplink_priv;
1160
1161 mutex_lock(&uplink_priv->unready_flows_lock);
1162 unready_flow_del(flow);
1163 mutex_unlock(&uplink_priv->unready_flows_lock);
1164}
1165
1166static int
1167mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1168 struct mlx5e_tc_flow *flow,
1169 struct netlink_ext_ack *extack)
1170{
1171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1172 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1173 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1174 struct net_device *out_dev, *encap_dev = NULL;
1175 struct mlx5_fc *counter = NULL;
1176 struct mlx5e_rep_priv *rpriv;
1177 struct mlx5e_priv *out_priv;
1178 bool encap_valid = true;
1179 u32 max_prio, max_chain;
1180 int err = 0;
1181 int out_index;
1182
1183 if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
1184 NL_SET_ERR_MSG_MOD(extack,
1185 "E-switch priorities unsupported, upgrade FW");
1186 return -EOPNOTSUPP;
1187 }
1188
1189
1190
1191
1192
1193
1194 max_chain = mlx5_esw_chains_get_chain_range(esw);
1195 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1196 NL_SET_ERR_MSG_MOD(extack,
1197 "Requested chain is out of supported range");
1198 return -EOPNOTSUPP;
1199 }
1200
1201 max_prio = mlx5_esw_chains_get_prio_range(esw);
1202 if (attr->prio > max_prio) {
1203 NL_SET_ERR_MSG_MOD(extack,
1204 "Requested priority is out of supported range");
1205 return -EOPNOTSUPP;
1206 }
1207
1208 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1209 err = mlx5e_attach_decap(priv, flow, extack);
1210 if (err)
1211 return err;
1212 }
1213
1214 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1215 int mirred_ifindex;
1216
1217 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1218 continue;
1219
1220 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1221 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1222 mirred_ifindex);
1223 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1224 extack, &encap_dev, &encap_valid);
1225 if (err)
1226 return err;
1227
1228 out_priv = netdev_priv(encap_dev);
1229 rpriv = out_priv->ppriv;
1230 attr->dests[out_index].rep = rpriv->rep;
1231 attr->dests[out_index].mdev = out_priv->mdev;
1232 }
1233
1234 err = mlx5_eswitch_add_vlan_action(esw, attr);
1235 if (err)
1236 return err;
1237
1238 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1239 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1240 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1241 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1242 if (err)
1243 return err;
1244 }
1245
1246 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1247 counter = mlx5_fc_create(attr->counter_dev, true);
1248 if (IS_ERR(counter))
1249 return PTR_ERR(counter);
1250
1251 attr->counter = counter;
1252 }
1253
1254
1255
1256
1257
1258 if (!encap_valid)
1259 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1260 else
1261 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1262
1263 if (IS_ERR(flow->rule[0]))
1264 return PTR_ERR(flow->rule[0]);
1265 else
1266 flow_flag_set(flow, OFFLOADED);
1267
1268 return 0;
1269}
1270
1271static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1272{
1273 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1274 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1275 spec->match_value,
1276 misc_parameters_3);
1277 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1278 headers_v,
1279 geneve_tlv_option_0_data);
1280
1281 return !!geneve_tlv_opt_0_data;
1282}
1283
1284static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1285 struct mlx5e_tc_flow *flow)
1286{
1287 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1288 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1289 int out_index;
1290
1291 mlx5e_put_flow_tunnel_id(flow);
1292
1293 if (flow_flag_test(flow, NOT_READY))
1294 remove_unready_flow(flow);
1295
1296 if (mlx5e_is_offloaded_flow(flow)) {
1297 if (flow_flag_test(flow, SLOW))
1298 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1299 else
1300 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1301 }
1302
1303 if (mlx5_flow_has_geneve_opt(flow))
1304 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1305
1306 mlx5_eswitch_del_vlan_action(esw, attr);
1307
1308 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1309 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1310 mlx5e_detach_encap(priv, flow, out_index);
1311 kfree(attr->parse_attr->tun_info[out_index]);
1312 }
1313 kvfree(attr->parse_attr);
1314
1315 mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr);
1316
1317 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1318 mlx5e_detach_mod_hdr(priv, flow);
1319
1320 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1321 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1322
1323 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1324 mlx5e_detach_decap(priv, flow);
1325}
1326
1327void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1328 struct mlx5e_encap_entry *e,
1329 struct list_head *flow_list)
1330{
1331 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1332 struct mlx5_esw_flow_attr *esw_attr;
1333 struct mlx5_flow_handle *rule;
1334 struct mlx5_flow_spec *spec;
1335 struct mlx5e_tc_flow *flow;
1336 int err;
1337
1338 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1339 e->reformat_type,
1340 e->encap_size, e->encap_header,
1341 MLX5_FLOW_NAMESPACE_FDB);
1342 if (IS_ERR(e->pkt_reformat)) {
1343 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1344 PTR_ERR(e->pkt_reformat));
1345 return;
1346 }
1347 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1348 mlx5e_rep_queue_neigh_stats_work(priv);
1349
1350 list_for_each_entry(flow, flow_list, tmp_list) {
1351 bool all_flow_encaps_valid = true;
1352 int i;
1353
1354 if (!mlx5e_is_offloaded_flow(flow))
1355 continue;
1356 esw_attr = flow->esw_attr;
1357 spec = &esw_attr->parse_attr->spec;
1358
1359 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1360 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1361
1362
1363
1364
1365 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1366 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1367 continue;
1368 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1369 all_flow_encaps_valid = false;
1370 break;
1371 }
1372 }
1373
1374 if (!all_flow_encaps_valid)
1375 continue;
1376
1377 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1378 if (IS_ERR(rule)) {
1379 err = PTR_ERR(rule);
1380 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1381 err);
1382 continue;
1383 }
1384
1385 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1386 flow->rule[0] = rule;
1387
1388 flow_flag_set(flow, OFFLOADED);
1389 }
1390}
1391
1392void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1393 struct mlx5e_encap_entry *e,
1394 struct list_head *flow_list)
1395{
1396 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1397 struct mlx5_flow_handle *rule;
1398 struct mlx5_flow_spec *spec;
1399 struct mlx5e_tc_flow *flow;
1400 int err;
1401
1402 list_for_each_entry(flow, flow_list, tmp_list) {
1403 if (!mlx5e_is_offloaded_flow(flow))
1404 continue;
1405 spec = &flow->esw_attr->parse_attr->spec;
1406
1407
1408 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1409
1410 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1411
1412 if (IS_ERR(rule)) {
1413 err = PTR_ERR(rule);
1414 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1415 err);
1416 continue;
1417 }
1418
1419 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1420 flow->rule[0] = rule;
1421
1422 flow_flag_set(flow, OFFLOADED);
1423 }
1424
1425
1426 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1427 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1428}
1429
1430static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1431{
1432 if (mlx5e_is_eswitch_flow(flow))
1433 return flow->esw_attr->counter;
1434 else
1435 return flow->nic_attr->counter;
1436}
1437
1438
1439
1440
1441void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1442{
1443 struct encap_flow_item *efi;
1444 struct mlx5e_tc_flow *flow;
1445
1446 list_for_each_entry(efi, &e->flows, list) {
1447 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1448 if (IS_ERR(mlx5e_flow_get(flow)))
1449 continue;
1450 wait_for_completion(&flow->init_done);
1451
1452 flow->tmp_efi_index = efi->index;
1453 list_add(&flow->tmp_list, flow_list);
1454 }
1455}
1456
1457
1458void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1459{
1460 struct mlx5e_tc_flow *flow, *tmp;
1461
1462 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1463 mlx5e_flow_put(priv, flow);
1464}
1465
1466static struct mlx5e_encap_entry *
1467mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1468 struct mlx5e_encap_entry *e)
1469{
1470 struct mlx5e_encap_entry *next = NULL;
1471
1472retry:
1473 rcu_read_lock();
1474
1475
1476 for (next = e ?
1477 list_next_or_null_rcu(&nhe->encap_list,
1478 &e->encap_list,
1479 struct mlx5e_encap_entry,
1480 encap_list) :
1481 list_first_or_null_rcu(&nhe->encap_list,
1482 struct mlx5e_encap_entry,
1483 encap_list);
1484 next;
1485 next = list_next_or_null_rcu(&nhe->encap_list,
1486 &next->encap_list,
1487 struct mlx5e_encap_entry,
1488 encap_list))
1489 if (mlx5e_encap_take(next))
1490 break;
1491
1492 rcu_read_unlock();
1493
1494
1495 if (e)
1496 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1497 if (!next)
1498 return next;
1499
1500
1501 wait_for_completion(&next->res_ready);
1502
1503 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1504 e = next;
1505 goto retry;
1506 }
1507
1508 return next;
1509}
1510
1511void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1512{
1513 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1514 struct mlx5e_encap_entry *e = NULL;
1515 struct mlx5e_tc_flow *flow;
1516 struct mlx5_fc *counter;
1517 struct neigh_table *tbl;
1518 bool neigh_used = false;
1519 struct neighbour *n;
1520 u64 lastuse;
1521
1522 if (m_neigh->family == AF_INET)
1523 tbl = &arp_tbl;
1524#if IS_ENABLED(CONFIG_IPV6)
1525 else if (m_neigh->family == AF_INET6)
1526 tbl = ipv6_stub->nd_tbl;
1527#endif
1528 else
1529 return;
1530
1531
1532
1533
1534 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1535 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1536 struct encap_flow_item *efi, *tmp;
1537 struct mlx5_eswitch *esw;
1538 LIST_HEAD(flow_list);
1539
1540 esw = priv->mdev->priv.eswitch;
1541 mutex_lock(&esw->offloads.encap_tbl_lock);
1542 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1543 flow = container_of(efi, struct mlx5e_tc_flow,
1544 encaps[efi->index]);
1545 if (IS_ERR(mlx5e_flow_get(flow)))
1546 continue;
1547 list_add(&flow->tmp_list, &flow_list);
1548
1549 if (mlx5e_is_offloaded_flow(flow)) {
1550 counter = mlx5e_tc_get_counter(flow);
1551 lastuse = mlx5_fc_query_lastuse(counter);
1552 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1553 neigh_used = true;
1554 break;
1555 }
1556 }
1557 }
1558 mutex_unlock(&esw->offloads.encap_tbl_lock);
1559
1560 mlx5e_put_encap_flow_list(priv, &flow_list);
1561 if (neigh_used) {
1562
1563 mlx5e_encap_put(priv, e);
1564 break;
1565 }
1566 }
1567
1568 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1569
1570 if (neigh_used) {
1571 nhe->reported_lastuse = jiffies;
1572
1573
1574
1575
1576 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1577 if (!n)
1578 return;
1579
1580 neigh_event_send(n, NULL);
1581 neigh_release(n);
1582 }
1583}
1584
1585static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1586{
1587 WARN_ON(!list_empty(&e->flows));
1588
1589 if (e->compl_result > 0) {
1590 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1591
1592 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1593 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1594 }
1595
1596 kfree(e->tun_info);
1597 kfree(e->encap_header);
1598 kfree_rcu(e, rcu);
1599}
1600
1601static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
1602 struct mlx5e_decap_entry *d)
1603{
1604 WARN_ON(!list_empty(&d->flows));
1605
1606 if (!d->compl_result)
1607 mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
1608
1609 kfree_rcu(d, rcu);
1610}
1611
1612void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1613{
1614 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1615
1616 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1617 return;
1618 hash_del_rcu(&e->encap_hlist);
1619 mutex_unlock(&esw->offloads.encap_tbl_lock);
1620
1621 mlx5e_encap_dealloc(priv, e);
1622}
1623
1624static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
1625{
1626 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1627
1628 if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
1629 return;
1630 hash_del_rcu(&d->hlist);
1631 mutex_unlock(&esw->offloads.decap_tbl_lock);
1632
1633 mlx5e_decap_dealloc(priv, d);
1634}
1635
1636static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1637 struct mlx5e_tc_flow *flow, int out_index)
1638{
1639 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1640 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1641
1642
1643 if (!e)
1644 return;
1645
1646 mutex_lock(&esw->offloads.encap_tbl_lock);
1647 list_del(&flow->encaps[out_index].list);
1648 flow->encaps[out_index].e = NULL;
1649 if (!refcount_dec_and_test(&e->refcnt)) {
1650 mutex_unlock(&esw->offloads.encap_tbl_lock);
1651 return;
1652 }
1653 hash_del_rcu(&e->encap_hlist);
1654 mutex_unlock(&esw->offloads.encap_tbl_lock);
1655
1656 mlx5e_encap_dealloc(priv, e);
1657}
1658
1659static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1660 struct mlx5e_tc_flow *flow)
1661{
1662 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1663 struct mlx5e_decap_entry *d = flow->decap_reformat;
1664
1665 if (!d)
1666 return;
1667
1668 mutex_lock(&esw->offloads.decap_tbl_lock);
1669 list_del(&flow->l3_to_l2_reformat);
1670 flow->decap_reformat = NULL;
1671
1672 if (!refcount_dec_and_test(&d->refcnt)) {
1673 mutex_unlock(&esw->offloads.decap_tbl_lock);
1674 return;
1675 }
1676 hash_del_rcu(&d->hlist);
1677 mutex_unlock(&esw->offloads.decap_tbl_lock);
1678
1679 mlx5e_decap_dealloc(priv, d);
1680}
1681
1682static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1683{
1684 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1685
1686 if (!flow_flag_test(flow, ESWITCH) ||
1687 !flow_flag_test(flow, DUP))
1688 return;
1689
1690 mutex_lock(&esw->offloads.peer_mutex);
1691 list_del(&flow->peer);
1692 mutex_unlock(&esw->offloads.peer_mutex);
1693
1694 flow_flag_clear(flow, DUP);
1695
1696 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1697 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1698 kfree(flow->peer_flow);
1699 }
1700
1701 flow->peer_flow = NULL;
1702}
1703
1704static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1705{
1706 struct mlx5_core_dev *dev = flow->priv->mdev;
1707 struct mlx5_devcom *devcom = dev->priv.devcom;
1708 struct mlx5_eswitch *peer_esw;
1709
1710 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1711 if (!peer_esw)
1712 return;
1713
1714 __mlx5e_tc_del_fdb_peer_flow(flow);
1715 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1716}
1717
1718static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1719 struct mlx5e_tc_flow *flow)
1720{
1721 if (mlx5e_is_eswitch_flow(flow)) {
1722 mlx5e_tc_del_fdb_peer_flow(flow);
1723 mlx5e_tc_del_fdb_flow(priv, flow);
1724 } else {
1725 mlx5e_tc_del_nic_flow(priv, flow);
1726 }
1727}
1728
1729static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1730{
1731 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1732 struct flow_action *flow_action = &rule->action;
1733 const struct flow_action_entry *act;
1734 int i;
1735
1736 flow_action_for_each(i, act, flow_action) {
1737 switch (act->id) {
1738 case FLOW_ACTION_GOTO:
1739 return true;
1740 default:
1741 continue;
1742 }
1743 }
1744
1745 return false;
1746}
1747
1748static int
1749enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1750 struct flow_dissector_key_enc_opts *opts,
1751 struct netlink_ext_ack *extack,
1752 bool *dont_care)
1753{
1754 struct geneve_opt *opt;
1755 int off = 0;
1756
1757 *dont_care = true;
1758
1759 while (opts->len > off) {
1760 opt = (struct geneve_opt *)&opts->data[off];
1761
1762 if (!(*dont_care) || opt->opt_class || opt->type ||
1763 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1764 *dont_care = false;
1765
1766 if (opt->opt_class != htons(U16_MAX) ||
1767 opt->type != U8_MAX) {
1768 NL_SET_ERR_MSG(extack,
1769 "Partial match of tunnel options in chain > 0 isn't supported");
1770 netdev_warn(priv->netdev,
1771 "Partial match of tunnel options in chain > 0 isn't supported");
1772 return -EOPNOTSUPP;
1773 }
1774 }
1775
1776 off += sizeof(struct geneve_opt) + opt->length * 4;
1777 }
1778
1779 return 0;
1780}
1781
1782#define COPY_DISSECTOR(rule, diss_key, dst)\
1783({ \
1784 struct flow_rule *__rule = (rule);\
1785 typeof(dst) __dst = dst;\
1786\
1787 memcpy(__dst,\
1788 skb_flow_dissector_target(__rule->match.dissector,\
1789 diss_key,\
1790 __rule->match.key),\
1791 sizeof(*__dst));\
1792})
1793
1794static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1795 struct mlx5e_tc_flow *flow,
1796 struct flow_cls_offload *f,
1797 struct net_device *filter_dev)
1798{
1799 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1800 struct netlink_ext_ack *extack = f->common.extack;
1801 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1802 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1803 struct flow_match_enc_opts enc_opts_match;
1804 struct tunnel_match_enc_opts tun_enc_opts;
1805 struct mlx5_rep_uplink_priv *uplink_priv;
1806 struct mlx5e_rep_priv *uplink_rpriv;
1807 struct tunnel_match_key tunnel_key;
1808 bool enc_opts_is_dont_care = true;
1809 u32 tun_id, enc_opts_id = 0;
1810 struct mlx5_eswitch *esw;
1811 u32 value, mask;
1812 int err;
1813
1814 esw = priv->mdev->priv.eswitch;
1815 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1816 uplink_priv = &uplink_rpriv->uplink_priv;
1817
1818 memset(&tunnel_key, 0, sizeof(tunnel_key));
1819 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1820 &tunnel_key.enc_control);
1821 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1822 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1823 &tunnel_key.enc_ipv4);
1824 else
1825 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1826 &tunnel_key.enc_ipv6);
1827 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1828 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1829 &tunnel_key.enc_tp);
1830 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1831 &tunnel_key.enc_key_id);
1832 tunnel_key.filter_ifindex = filter_dev->ifindex;
1833
1834 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1835 if (err)
1836 return err;
1837
1838 flow_rule_match_enc_opts(rule, &enc_opts_match);
1839 err = enc_opts_is_dont_care_or_full_match(priv,
1840 enc_opts_match.mask,
1841 extack,
1842 &enc_opts_is_dont_care);
1843 if (err)
1844 goto err_enc_opts;
1845
1846 if (!enc_opts_is_dont_care) {
1847 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1848 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1849 sizeof(*enc_opts_match.key));
1850 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1851 sizeof(*enc_opts_match.mask));
1852
1853 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1854 &tun_enc_opts, &enc_opts_id);
1855 if (err)
1856 goto err_enc_opts;
1857 }
1858
1859 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1860 mask = enc_opts_id ? TUNNEL_ID_MASK :
1861 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1862
1863 if (attr->chain) {
1864 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1865 TUNNEL_TO_REG, value, mask);
1866 } else {
1867 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1868 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1869 mod_hdr_acts,
1870 TUNNEL_TO_REG, value);
1871 if (err)
1872 goto err_set;
1873
1874 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1875 }
1876
1877 flow->tunnel_id = value;
1878 return 0;
1879
1880err_set:
1881 if (enc_opts_id)
1882 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1883 enc_opts_id);
1884err_enc_opts:
1885 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1886 return err;
1887}
1888
1889static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1890{
1891 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1892 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1893 struct mlx5_rep_uplink_priv *uplink_priv;
1894 struct mlx5e_rep_priv *uplink_rpriv;
1895 struct mlx5_eswitch *esw;
1896
1897 esw = flow->priv->mdev->priv.eswitch;
1898 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1899 uplink_priv = &uplink_rpriv->uplink_priv;
1900
1901 if (tun_id)
1902 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1903 if (enc_opts_id)
1904 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1905 enc_opts_id);
1906}
1907
1908u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1909{
1910 return flow->tunnel_id;
1911}
1912
1913void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1914 struct flow_match_basic *match, bool outer,
1915 void *headers_c, void *headers_v)
1916{
1917 bool ip_version_cap;
1918
1919 ip_version_cap = outer ?
1920 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1921 ft_field_support.outer_ip_version) :
1922 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1923 ft_field_support.inner_ip_version);
1924
1925 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1926 (match->key->n_proto == htons(ETH_P_IP) ||
1927 match->key->n_proto == htons(ETH_P_IPV6))) {
1928 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1929 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1930 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1931 } else {
1932 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1933 ntohs(match->mask->n_proto));
1934 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1935 ntohs(match->key->n_proto));
1936 }
1937}
1938
1939static int parse_tunnel_attr(struct mlx5e_priv *priv,
1940 struct mlx5e_tc_flow *flow,
1941 struct mlx5_flow_spec *spec,
1942 struct flow_cls_offload *f,
1943 struct net_device *filter_dev,
1944 u8 *match_level,
1945 bool *match_inner)
1946{
1947 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1948 struct netlink_ext_ack *extack = f->common.extack;
1949 bool needs_mapping, sets_mapping;
1950 int err;
1951
1952 if (!mlx5e_is_eswitch_flow(flow))
1953 return -EOPNOTSUPP;
1954
1955 needs_mapping = !!flow->esw_attr->chain;
1956 sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
1957 *match_inner = !needs_mapping;
1958
1959 if ((needs_mapping || sets_mapping) &&
1960 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1961 NL_SET_ERR_MSG(extack,
1962 "Chains on tunnel devices isn't supported without register loopback support");
1963 netdev_warn(priv->netdev,
1964 "Chains on tunnel devices isn't supported without register loopback support");
1965 return -EOPNOTSUPP;
1966 }
1967
1968 if (!flow->esw_attr->chain) {
1969 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1970 match_level);
1971 if (err) {
1972 NL_SET_ERR_MSG_MOD(extack,
1973 "Failed to parse tunnel attributes");
1974 netdev_warn(priv->netdev,
1975 "Failed to parse tunnel attributes");
1976 return err;
1977 }
1978
1979
1980
1981
1982 if (!netif_is_bareudp(filter_dev))
1983 flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1984 }
1985
1986 if (!needs_mapping && !sets_mapping)
1987 return 0;
1988
1989 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
1990}
1991
1992static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
1993{
1994 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1995 inner_headers);
1996}
1997
1998static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
1999{
2000 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2001 inner_headers);
2002}
2003
2004static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2005{
2006 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2007 outer_headers);
2008}
2009
2010static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2011{
2012 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2013 outer_headers);
2014}
2015
2016static void *get_match_headers_value(u32 flags,
2017 struct mlx5_flow_spec *spec)
2018{
2019 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2020 get_match_inner_headers_value(spec) :
2021 get_match_outer_headers_value(spec);
2022}
2023
2024static void *get_match_headers_criteria(u32 flags,
2025 struct mlx5_flow_spec *spec)
2026{
2027 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2028 get_match_inner_headers_criteria(spec) :
2029 get_match_outer_headers_criteria(spec);
2030}
2031
2032static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2033 struct flow_cls_offload *f)
2034{
2035 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2036 struct netlink_ext_ack *extack = f->common.extack;
2037 struct net_device *ingress_dev;
2038 struct flow_match_meta match;
2039
2040 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2041 return 0;
2042
2043 flow_rule_match_meta(rule, &match);
2044 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2045 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2046 return -EOPNOTSUPP;
2047 }
2048
2049 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2050 match.key->ingress_ifindex);
2051 if (!ingress_dev) {
2052 NL_SET_ERR_MSG_MOD(extack,
2053 "Can't find the ingress port to match on");
2054 return -ENOENT;
2055 }
2056
2057 if (ingress_dev != filter_dev) {
2058 NL_SET_ERR_MSG_MOD(extack,
2059 "Can't match on the ingress filter port");
2060 return -EOPNOTSUPP;
2061 }
2062
2063 return 0;
2064}
2065
2066static bool skip_key_basic(struct net_device *filter_dev,
2067 struct flow_cls_offload *f)
2068{
2069
2070
2071
2072
2073
2074 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2075 return true;
2076
2077 return false;
2078}
2079
2080static int __parse_cls_flower(struct mlx5e_priv *priv,
2081 struct mlx5e_tc_flow *flow,
2082 struct mlx5_flow_spec *spec,
2083 struct flow_cls_offload *f,
2084 struct net_device *filter_dev,
2085 u8 *inner_match_level, u8 *outer_match_level)
2086{
2087 struct netlink_ext_ack *extack = f->common.extack;
2088 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2089 outer_headers);
2090 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2091 outer_headers);
2092 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2093 misc_parameters);
2094 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2095 misc_parameters);
2096 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2097 struct flow_dissector *dissector = rule->match.dissector;
2098 u16 addr_type = 0;
2099 u8 ip_proto = 0;
2100 u8 *match_level;
2101 int err;
2102
2103 match_level = outer_match_level;
2104
2105 if (dissector->used_keys &
2106 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2107 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2108 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2109 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2110 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2111 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2112 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2113 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2114 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2115 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2116 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2117 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2118 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2119 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2120 BIT(FLOW_DISSECTOR_KEY_TCP) |
2121 BIT(FLOW_DISSECTOR_KEY_IP) |
2122 BIT(FLOW_DISSECTOR_KEY_CT) |
2123 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2124 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2125 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2126 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2127 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
2128 dissector->used_keys);
2129 return -EOPNOTSUPP;
2130 }
2131
2132 if (mlx5e_get_tc_tun(filter_dev)) {
2133 bool match_inner = false;
2134
2135 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2136 outer_match_level, &match_inner);
2137 if (err)
2138 return err;
2139
2140 if (match_inner) {
2141
2142
2143
2144
2145 match_level = inner_match_level;
2146 headers_c = get_match_inner_headers_criteria(spec);
2147 headers_v = get_match_inner_headers_value(spec);
2148 }
2149 }
2150
2151 err = mlx5e_flower_parse_meta(filter_dev, f);
2152 if (err)
2153 return err;
2154
2155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2156 !skip_key_basic(filter_dev, f)) {
2157 struct flow_match_basic match;
2158
2159 flow_rule_match_basic(rule, &match);
2160 mlx5e_tc_set_ethertype(priv->mdev, &match,
2161 match_level == outer_match_level,
2162 headers_c, headers_v);
2163
2164 if (match.mask->n_proto)
2165 *match_level = MLX5_MATCH_L2;
2166 }
2167 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2168 is_vlan_dev(filter_dev)) {
2169 struct flow_dissector_key_vlan filter_dev_mask;
2170 struct flow_dissector_key_vlan filter_dev_key;
2171 struct flow_match_vlan match;
2172
2173 if (is_vlan_dev(filter_dev)) {
2174 match.key = &filter_dev_key;
2175 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2176 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2177 match.key->vlan_priority = 0;
2178 match.mask = &filter_dev_mask;
2179 memset(match.mask, 0xff, sizeof(*match.mask));
2180 match.mask->vlan_priority = 0;
2181 } else {
2182 flow_rule_match_vlan(rule, &match);
2183 }
2184 if (match.mask->vlan_id ||
2185 match.mask->vlan_priority ||
2186 match.mask->vlan_tpid) {
2187 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2188 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2189 svlan_tag, 1);
2190 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2191 svlan_tag, 1);
2192 } else {
2193 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2194 cvlan_tag, 1);
2195 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2196 cvlan_tag, 1);
2197 }
2198
2199 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2200 match.mask->vlan_id);
2201 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2202 match.key->vlan_id);
2203
2204 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2205 match.mask->vlan_priority);
2206 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2207 match.key->vlan_priority);
2208
2209 *match_level = MLX5_MATCH_L2;
2210 }
2211 } else if (*match_level != MLX5_MATCH_NONE) {
2212
2213
2214
2215
2216 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2217 *match_level = MLX5_MATCH_L2;
2218 }
2219
2220 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2221 struct flow_match_vlan match;
2222
2223 flow_rule_match_cvlan(rule, &match);
2224 if (match.mask->vlan_id ||
2225 match.mask->vlan_priority ||
2226 match.mask->vlan_tpid) {
2227 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2228 MLX5_SET(fte_match_set_misc, misc_c,
2229 outer_second_svlan_tag, 1);
2230 MLX5_SET(fte_match_set_misc, misc_v,
2231 outer_second_svlan_tag, 1);
2232 } else {
2233 MLX5_SET(fte_match_set_misc, misc_c,
2234 outer_second_cvlan_tag, 1);
2235 MLX5_SET(fte_match_set_misc, misc_v,
2236 outer_second_cvlan_tag, 1);
2237 }
2238
2239 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2240 match.mask->vlan_id);
2241 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2242 match.key->vlan_id);
2243 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2244 match.mask->vlan_priority);
2245 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2246 match.key->vlan_priority);
2247
2248 *match_level = MLX5_MATCH_L2;
2249 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2250 }
2251 }
2252
2253 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2254 struct flow_match_eth_addrs match;
2255
2256 flow_rule_match_eth_addrs(rule, &match);
2257 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2258 dmac_47_16),
2259 match.mask->dst);
2260 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2261 dmac_47_16),
2262 match.key->dst);
2263
2264 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2265 smac_47_16),
2266 match.mask->src);
2267 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2268 smac_47_16),
2269 match.key->src);
2270
2271 if (!is_zero_ether_addr(match.mask->src) ||
2272 !is_zero_ether_addr(match.mask->dst))
2273 *match_level = MLX5_MATCH_L2;
2274 }
2275
2276 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2277 struct flow_match_control match;
2278
2279 flow_rule_match_control(rule, &match);
2280 addr_type = match.key->addr_type;
2281
2282
2283 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2284 return -EOPNOTSUPP;
2285
2286 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2287 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2288 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2289 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2290
2291
2292 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2293 *match_level = MLX5_MATCH_L2;
2294
2295 else
2296 *match_level = MLX5_MATCH_L3;
2297 }
2298 }
2299
2300 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2301 struct flow_match_basic match;
2302
2303 flow_rule_match_basic(rule, &match);
2304 ip_proto = match.key->ip_proto;
2305
2306 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2307 match.mask->ip_proto);
2308 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2309 match.key->ip_proto);
2310
2311 if (match.mask->ip_proto)
2312 *match_level = MLX5_MATCH_L3;
2313 }
2314
2315 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2316 struct flow_match_ipv4_addrs match;
2317
2318 flow_rule_match_ipv4_addrs(rule, &match);
2319 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2320 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2321 &match.mask->src, sizeof(match.mask->src));
2322 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2323 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2324 &match.key->src, sizeof(match.key->src));
2325 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2326 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2327 &match.mask->dst, sizeof(match.mask->dst));
2328 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2329 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2330 &match.key->dst, sizeof(match.key->dst));
2331
2332 if (match.mask->src || match.mask->dst)
2333 *match_level = MLX5_MATCH_L3;
2334 }
2335
2336 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2337 struct flow_match_ipv6_addrs match;
2338
2339 flow_rule_match_ipv6_addrs(rule, &match);
2340 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2341 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2342 &match.mask->src, sizeof(match.mask->src));
2343 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2344 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2345 &match.key->src, sizeof(match.key->src));
2346
2347 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2348 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2349 &match.mask->dst, sizeof(match.mask->dst));
2350 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2351 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2352 &match.key->dst, sizeof(match.key->dst));
2353
2354 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2355 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2356 *match_level = MLX5_MATCH_L3;
2357 }
2358
2359 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2360 struct flow_match_ip match;
2361
2362 flow_rule_match_ip(rule, &match);
2363 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2364 match.mask->tos & 0x3);
2365 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2366 match.key->tos & 0x3);
2367
2368 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2369 match.mask->tos >> 2);
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2371 match.key->tos >> 2);
2372
2373 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2374 match.mask->ttl);
2375 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2376 match.key->ttl);
2377
2378 if (match.mask->ttl &&
2379 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2380 ft_field_support.outer_ipv4_ttl)) {
2381 NL_SET_ERR_MSG_MOD(extack,
2382 "Matching on TTL is not supported");
2383 return -EOPNOTSUPP;
2384 }
2385
2386 if (match.mask->tos || match.mask->ttl)
2387 *match_level = MLX5_MATCH_L3;
2388 }
2389
2390
2391
2392 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2393 struct flow_match_ports match;
2394
2395 flow_rule_match_ports(rule, &match);
2396 switch (ip_proto) {
2397 case IPPROTO_TCP:
2398 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2399 tcp_sport, ntohs(match.mask->src));
2400 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2401 tcp_sport, ntohs(match.key->src));
2402
2403 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2404 tcp_dport, ntohs(match.mask->dst));
2405 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2406 tcp_dport, ntohs(match.key->dst));
2407 break;
2408
2409 case IPPROTO_UDP:
2410 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2411 udp_sport, ntohs(match.mask->src));
2412 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2413 udp_sport, ntohs(match.key->src));
2414
2415 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2416 udp_dport, ntohs(match.mask->dst));
2417 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2418 udp_dport, ntohs(match.key->dst));
2419 break;
2420 default:
2421 NL_SET_ERR_MSG_MOD(extack,
2422 "Only UDP and TCP transports are supported for L4 matching");
2423 netdev_err(priv->netdev,
2424 "Only UDP and TCP transport are supported\n");
2425 return -EINVAL;
2426 }
2427
2428 if (match.mask->src || match.mask->dst)
2429 *match_level = MLX5_MATCH_L4;
2430 }
2431
2432 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2433 struct flow_match_tcp match;
2434
2435 flow_rule_match_tcp(rule, &match);
2436 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2437 ntohs(match.mask->flags));
2438 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2439 ntohs(match.key->flags));
2440
2441 if (match.mask->flags)
2442 *match_level = MLX5_MATCH_L4;
2443 }
2444
2445 return 0;
2446}
2447
2448static int parse_cls_flower(struct mlx5e_priv *priv,
2449 struct mlx5e_tc_flow *flow,
2450 struct mlx5_flow_spec *spec,
2451 struct flow_cls_offload *f,
2452 struct net_device *filter_dev)
2453{
2454 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2455 struct netlink_ext_ack *extack = f->common.extack;
2456 struct mlx5_core_dev *dev = priv->mdev;
2457 struct mlx5_eswitch *esw = dev->priv.eswitch;
2458 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2459 struct mlx5_eswitch_rep *rep;
2460 bool is_eswitch_flow;
2461 int err;
2462
2463 inner_match_level = MLX5_MATCH_NONE;
2464 outer_match_level = MLX5_MATCH_NONE;
2465
2466 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2467 &inner_match_level, &outer_match_level);
2468 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2469 outer_match_level : inner_match_level;
2470
2471 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2472 if (!err && is_eswitch_flow) {
2473 rep = rpriv->rep;
2474 if (rep->vport != MLX5_VPORT_UPLINK &&
2475 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2476 esw->offloads.inline_mode < non_tunnel_match_level)) {
2477 NL_SET_ERR_MSG_MOD(extack,
2478 "Flow is not offloaded due to min inline setting");
2479 netdev_warn(priv->netdev,
2480 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2481 non_tunnel_match_level, esw->offloads.inline_mode);
2482 return -EOPNOTSUPP;
2483 }
2484 }
2485
2486 if (is_eswitch_flow) {
2487 flow->esw_attr->inner_match_level = inner_match_level;
2488 flow->esw_attr->outer_match_level = outer_match_level;
2489 } else {
2490 flow->nic_attr->match_level = non_tunnel_match_level;
2491 }
2492
2493 return err;
2494}
2495
2496struct pedit_headers {
2497 struct ethhdr eth;
2498 struct vlan_hdr vlan;
2499 struct iphdr ip4;
2500 struct ipv6hdr ip6;
2501 struct tcphdr tcp;
2502 struct udphdr udp;
2503};
2504
2505struct pedit_headers_action {
2506 struct pedit_headers vals;
2507 struct pedit_headers masks;
2508 u32 pedits;
2509};
2510
2511static int pedit_header_offsets[] = {
2512 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2513 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2514 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2515 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2516 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2517};
2518
2519#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2520
2521static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2522 struct pedit_headers_action *hdrs)
2523{
2524 u32 *curr_pmask, *curr_pval;
2525
2526 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2527 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2528
2529 if (*curr_pmask & mask)
2530 goto out_err;
2531
2532 *curr_pmask |= mask;
2533 *curr_pval |= (val & mask);
2534
2535 return 0;
2536
2537out_err:
2538 return -EOPNOTSUPP;
2539}
2540
2541struct mlx5_fields {
2542 u8 field;
2543 u8 field_bsize;
2544 u32 field_mask;
2545 u32 offset;
2546 u32 match_offset;
2547};
2548
2549#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2550 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2551 offsetof(struct pedit_headers, field) + (off), \
2552 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2553
2554
2555
2556
2557#define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2558 type matchmaskx = *(type *)(matchmaskp); \
2559 type matchvalx = *(type *)(matchvalp); \
2560 type maskx = *(type *)(maskp); \
2561 type valx = *(type *)(valp); \
2562 \
2563 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2564 matchmaskx)); \
2565})
2566
2567static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2568 void *matchmaskp, u8 bsize)
2569{
2570 bool same = false;
2571
2572 switch (bsize) {
2573 case 8:
2574 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2575 break;
2576 case 16:
2577 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2578 break;
2579 case 32:
2580 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2581 break;
2582 }
2583
2584 return same;
2585}
2586
2587static struct mlx5_fields fields[] = {
2588 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2589 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2590 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2591 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2592 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2593 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2594
2595 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2596 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2597 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2598 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2599
2600 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2601 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2602 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2603 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2604 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2605 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2606 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2607 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2608 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2609 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2610 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2611 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2612 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2613 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2614 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2615 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2616 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2617
2618 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2619 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2620
2621 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2622
2623 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2624 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2625};
2626
2627static unsigned long mask_to_le(unsigned long mask, int size)
2628{
2629 __be32 mask_be32;
2630 __be16 mask_be16;
2631
2632 if (size == 32) {
2633 mask_be32 = (__force __be32)(mask);
2634 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2635 } else if (size == 16) {
2636 mask_be32 = (__force __be32)(mask);
2637 mask_be16 = *(__be16 *)&mask_be32;
2638 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2639 }
2640
2641 return mask;
2642}
2643static int offload_pedit_fields(struct mlx5e_priv *priv,
2644 int namespace,
2645 struct pedit_headers_action *hdrs,
2646 struct mlx5e_tc_flow_parse_attr *parse_attr,
2647 u32 *action_flags,
2648 struct netlink_ext_ack *extack)
2649{
2650 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2651 int i, action_size, first, last, next_z;
2652 void *headers_c, *headers_v, *action, *vals_p;
2653 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2654 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2655 struct mlx5_fields *f;
2656 unsigned long mask, field_mask;
2657 int err;
2658 u8 cmd;
2659
2660 mod_acts = &parse_attr->mod_hdr_acts;
2661 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2662 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2663
2664 set_masks = &hdrs[0].masks;
2665 add_masks = &hdrs[1].masks;
2666 set_vals = &hdrs[0].vals;
2667 add_vals = &hdrs[1].vals;
2668
2669 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2670
2671 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2672 bool skip;
2673
2674 f = &fields[i];
2675
2676 s_mask = 0;
2677 a_mask = 0;
2678
2679 s_masks_p = (void *)set_masks + f->offset;
2680 a_masks_p = (void *)add_masks + f->offset;
2681
2682 s_mask = *s_masks_p & f->field_mask;
2683 a_mask = *a_masks_p & f->field_mask;
2684
2685 if (!s_mask && !a_mask)
2686 continue;
2687
2688 if (s_mask && a_mask) {
2689 NL_SET_ERR_MSG_MOD(extack,
2690 "can't set and add to the same HW field");
2691 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2692 return -EOPNOTSUPP;
2693 }
2694
2695 skip = false;
2696 if (s_mask) {
2697 void *match_mask = headers_c + f->match_offset;
2698 void *match_val = headers_v + f->match_offset;
2699
2700 cmd = MLX5_ACTION_TYPE_SET;
2701 mask = s_mask;
2702 vals_p = (void *)set_vals + f->offset;
2703
2704 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2705 match_mask, f->field_bsize))
2706 skip = true;
2707
2708 *s_masks_p &= ~f->field_mask;
2709 } else {
2710 cmd = MLX5_ACTION_TYPE_ADD;
2711 mask = a_mask;
2712 vals_p = (void *)add_vals + f->offset;
2713
2714 if ((*(u32 *)vals_p & f->field_mask) == 0)
2715 skip = true;
2716
2717 *a_masks_p &= ~f->field_mask;
2718 }
2719 if (skip)
2720 continue;
2721
2722 mask = mask_to_le(mask, f->field_bsize);
2723
2724 first = find_first_bit(&mask, f->field_bsize);
2725 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2726 last = find_last_bit(&mask, f->field_bsize);
2727 if (first < next_z && next_z < last) {
2728 NL_SET_ERR_MSG_MOD(extack,
2729 "rewrite of few sub-fields isn't supported");
2730 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2731 mask);
2732 return -EOPNOTSUPP;
2733 }
2734
2735 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2736 if (err) {
2737 NL_SET_ERR_MSG_MOD(extack,
2738 "too many pedit actions, can't offload");
2739 mlx5_core_warn(priv->mdev,
2740 "mlx5: parsed %d pedit actions, can't do more\n",
2741 mod_acts->num_actions);
2742 return err;
2743 }
2744
2745 action = mod_acts->actions +
2746 (mod_acts->num_actions * action_size);
2747 MLX5_SET(set_action_in, action, action_type, cmd);
2748 MLX5_SET(set_action_in, action, field, f->field);
2749
2750 if (cmd == MLX5_ACTION_TYPE_SET) {
2751 int start;
2752
2753 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2754
2755
2756 start = find_first_bit(&field_mask, f->field_bsize);
2757
2758 MLX5_SET(set_action_in, action, offset, first - start);
2759
2760 MLX5_SET(set_action_in, action, length, (last - first + 1));
2761 }
2762
2763 if (f->field_bsize == 32)
2764 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2765 else if (f->field_bsize == 16)
2766 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2767 else if (f->field_bsize == 8)
2768 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2769
2770 ++mod_acts->num_actions;
2771 }
2772
2773 return 0;
2774}
2775
2776static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2777 int namespace)
2778{
2779 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
2780 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2781 else
2782 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2783}
2784
2785int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2786 int namespace,
2787 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2788{
2789 int action_size, new_num_actions, max_hw_actions;
2790 size_t new_sz, old_sz;
2791 void *ret;
2792
2793 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2794 return 0;
2795
2796 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2797
2798 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2799 namespace);
2800 new_num_actions = min(max_hw_actions,
2801 mod_hdr_acts->actions ?
2802 mod_hdr_acts->max_actions * 2 : 1);
2803 if (mod_hdr_acts->max_actions == new_num_actions)
2804 return -ENOSPC;
2805
2806 new_sz = action_size * new_num_actions;
2807 old_sz = mod_hdr_acts->max_actions * action_size;
2808 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2809 if (!ret)
2810 return -ENOMEM;
2811
2812 memset(ret + old_sz, 0, new_sz - old_sz);
2813 mod_hdr_acts->actions = ret;
2814 mod_hdr_acts->max_actions = new_num_actions;
2815
2816 return 0;
2817}
2818
2819void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2820{
2821 kfree(mod_hdr_acts->actions);
2822 mod_hdr_acts->actions = NULL;
2823 mod_hdr_acts->num_actions = 0;
2824 mod_hdr_acts->max_actions = 0;
2825}
2826
2827static const struct pedit_headers zero_masks = {};
2828
2829static int
2830parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2831 const struct flow_action_entry *act, int namespace,
2832 struct mlx5e_tc_flow_parse_attr *parse_attr,
2833 struct pedit_headers_action *hdrs,
2834 struct netlink_ext_ack *extack)
2835{
2836 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2837 int err = -EOPNOTSUPP;
2838 u32 mask, val, offset;
2839 u8 htype;
2840
2841 htype = act->mangle.htype;
2842 err = -EOPNOTSUPP;
2843
2844 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2845 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2846 goto out_err;
2847 }
2848
2849 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2850 NL_SET_ERR_MSG_MOD(extack,
2851 "The pedit offload action is not supported");
2852 goto out_err;
2853 }
2854
2855 mask = act->mangle.mask;
2856 val = act->mangle.val;
2857 offset = act->mangle.offset;
2858
2859 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2860 if (err)
2861 goto out_err;
2862
2863 hdrs[cmd].pedits++;
2864
2865 return 0;
2866out_err:
2867 return err;
2868}
2869
2870static int
2871parse_pedit_to_reformat(struct mlx5e_priv *priv,
2872 const struct flow_action_entry *act,
2873 struct mlx5e_tc_flow_parse_attr *parse_attr,
2874 struct netlink_ext_ack *extack)
2875{
2876 u32 mask, val, offset;
2877 u32 *p;
2878
2879 if (act->id != FLOW_ACTION_MANGLE)
2880 return -EOPNOTSUPP;
2881
2882 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2883 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2884 return -EOPNOTSUPP;
2885 }
2886
2887 mask = ~act->mangle.mask;
2888 val = act->mangle.val;
2889 offset = act->mangle.offset;
2890 p = (u32 *)&parse_attr->eth;
2891 *(p + (offset >> 2)) |= (val & mask);
2892
2893 return 0;
2894}
2895
2896static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2897 const struct flow_action_entry *act, int namespace,
2898 struct mlx5e_tc_flow_parse_attr *parse_attr,
2899 struct pedit_headers_action *hdrs,
2900 struct mlx5e_tc_flow *flow,
2901 struct netlink_ext_ack *extack)
2902{
2903 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
2904 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
2905
2906 return parse_pedit_to_modify_hdr(priv, act, namespace,
2907 parse_attr, hdrs, extack);
2908}
2909
2910static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2911 struct mlx5e_tc_flow_parse_attr *parse_attr,
2912 struct pedit_headers_action *hdrs,
2913 u32 *action_flags,
2914 struct netlink_ext_ack *extack)
2915{
2916 struct pedit_headers *cmd_masks;
2917 int err;
2918 u8 cmd;
2919
2920 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2921 action_flags, extack);
2922 if (err < 0)
2923 goto out_dealloc_parsed_actions;
2924
2925 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2926 cmd_masks = &hdrs[cmd].masks;
2927 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2928 NL_SET_ERR_MSG_MOD(extack,
2929 "attempt to offload an unsupported field");
2930 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2931 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2932 16, 1, cmd_masks, sizeof(zero_masks), true);
2933 err = -EOPNOTSUPP;
2934 goto out_dealloc_parsed_actions;
2935 }
2936 }
2937
2938 return 0;
2939
2940out_dealloc_parsed_actions:
2941 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2942 return err;
2943}
2944
2945static bool csum_offload_supported(struct mlx5e_priv *priv,
2946 u32 action,
2947 u32 update_flags,
2948 struct netlink_ext_ack *extack)
2949{
2950 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2951 TCA_CSUM_UPDATE_FLAG_UDP;
2952
2953
2954 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2955 NL_SET_ERR_MSG_MOD(extack,
2956 "TC csum action is only offloaded with pedit");
2957 netdev_warn(priv->netdev,
2958 "TC csum action is only offloaded with pedit\n");
2959 return false;
2960 }
2961
2962 if (update_flags & ~prot_flags) {
2963 NL_SET_ERR_MSG_MOD(extack,
2964 "can't offload TC csum action for some header/s");
2965 netdev_warn(priv->netdev,
2966 "can't offload TC csum action for some header/s - flags %#x\n",
2967 update_flags);
2968 return false;
2969 }
2970
2971 return true;
2972}
2973
2974struct ip_ttl_word {
2975 __u8 ttl;
2976 __u8 protocol;
2977 __sum16 check;
2978};
2979
2980struct ipv6_hoplimit_word {
2981 __be16 payload_len;
2982 __u8 nexthdr;
2983 __u8 hop_limit;
2984};
2985
2986static int is_action_keys_supported(const struct flow_action_entry *act,
2987 bool ct_flow, bool *modify_ip_header,
2988 bool *modify_tuple,
2989 struct netlink_ext_ack *extack)
2990{
2991 u32 mask, offset;
2992 u8 htype;
2993
2994 htype = act->mangle.htype;
2995 offset = act->mangle.offset;
2996 mask = ~act->mangle.mask;
2997
2998
2999
3000
3001 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3002 struct ip_ttl_word *ttl_word =
3003 (struct ip_ttl_word *)&mask;
3004
3005 if (offset != offsetof(struct iphdr, ttl) ||
3006 ttl_word->protocol ||
3007 ttl_word->check) {
3008 *modify_ip_header = true;
3009 }
3010
3011 if (offset >= offsetof(struct iphdr, saddr))
3012 *modify_tuple = true;
3013
3014 if (ct_flow && *modify_tuple) {
3015 NL_SET_ERR_MSG_MOD(extack,
3016 "can't offload re-write of ipv4 address with action ct");
3017 return -EOPNOTSUPP;
3018 }
3019 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3020 struct ipv6_hoplimit_word *hoplimit_word =
3021 (struct ipv6_hoplimit_word *)&mask;
3022
3023 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3024 hoplimit_word->payload_len ||
3025 hoplimit_word->nexthdr) {
3026 *modify_ip_header = true;
3027 }
3028
3029 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3030 *modify_tuple = true;
3031
3032 if (ct_flow && *modify_tuple) {
3033 NL_SET_ERR_MSG_MOD(extack,
3034 "can't offload re-write of ipv6 address with action ct");
3035 return -EOPNOTSUPP;
3036 }
3037 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3038 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3039 *modify_tuple = true;
3040 if (ct_flow) {
3041 NL_SET_ERR_MSG_MOD(extack,
3042 "can't offload re-write of transport header ports with action ct");
3043 return -EOPNOTSUPP;
3044 }
3045 }
3046
3047 return 0;
3048}
3049
3050static bool modify_header_match_supported(struct mlx5e_priv *priv,
3051 struct mlx5_flow_spec *spec,
3052 struct flow_action *flow_action,
3053 u32 actions, bool ct_flow,
3054 bool ct_clear,
3055 struct netlink_ext_ack *extack)
3056{
3057 const struct flow_action_entry *act;
3058 bool modify_ip_header, modify_tuple;
3059 void *headers_c;
3060 void *headers_v;
3061 u16 ethertype;
3062 u8 ip_proto;
3063 int i, err;
3064
3065 headers_c = get_match_headers_criteria(actions, spec);
3066 headers_v = get_match_headers_value(actions, spec);
3067 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3068
3069
3070 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3071 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3072 goto out_ok;
3073
3074 modify_ip_header = false;
3075 modify_tuple = false;
3076 flow_action_for_each(i, act, flow_action) {
3077 if (act->id != FLOW_ACTION_MANGLE &&
3078 act->id != FLOW_ACTION_ADD)
3079 continue;
3080
3081 err = is_action_keys_supported(act, ct_flow,
3082 &modify_ip_header,
3083 &modify_tuple, extack);
3084 if (err)
3085 return err;
3086 }
3087
3088
3089
3090
3091
3092 if (!ct_clear && modify_tuple &&
3093 mlx5_tc_ct_add_no_trk_match(priv, spec)) {
3094 NL_SET_ERR_MSG_MOD(extack,
3095 "can't offload tuple modify header with ct matches");
3096 netdev_info(priv->netdev,
3097 "can't offload tuple modify header with ct matches");
3098 return false;
3099 }
3100
3101 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3102 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3103 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3104 NL_SET_ERR_MSG_MOD(extack,
3105 "can't offload re-write of non TCP/UDP");
3106 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3107 ip_proto);
3108 return false;
3109 }
3110
3111out_ok:
3112 return true;
3113}
3114
3115static bool actions_match_supported(struct mlx5e_priv *priv,
3116 struct flow_action *flow_action,
3117 struct mlx5e_tc_flow_parse_attr *parse_attr,
3118 struct mlx5e_tc_flow *flow,
3119 struct netlink_ext_ack *extack)
3120{
3121 bool ct_flow = false, ct_clear = false;
3122 u32 actions;
3123
3124 if (mlx5e_is_eswitch_flow(flow)) {
3125 actions = flow->esw_attr->action;
3126 ct_clear = flow->esw_attr->ct_attr.ct_action &
3127 TCA_CT_ACT_CLEAR;
3128 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3129 if (flow->esw_attr->split_count && ct_flow) {
3130
3131
3132
3133 NL_SET_ERR_MSG_MOD(extack,
3134 "Can't offload mirroring with action ct");
3135 return false;
3136 }
3137 } else {
3138 actions = flow->nic_attr->action;
3139 }
3140
3141 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3142 return modify_header_match_supported(priv, &parse_attr->spec,
3143 flow_action, actions,
3144 ct_flow, ct_clear,
3145 extack);
3146
3147 return true;
3148}
3149
3150static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3151{
3152 return priv->mdev == peer_priv->mdev;
3153}
3154
3155static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3156{
3157 struct mlx5_core_dev *fmdev, *pmdev;
3158 u64 fsystem_guid, psystem_guid;
3159
3160 fmdev = priv->mdev;
3161 pmdev = peer_priv->mdev;
3162
3163 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3164 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3165
3166 return (fsystem_guid == psystem_guid);
3167}
3168
3169static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3170 const struct flow_action_entry *act,
3171 struct mlx5e_tc_flow_parse_attr *parse_attr,
3172 struct pedit_headers_action *hdrs,
3173 u32 *action, struct netlink_ext_ack *extack)
3174{
3175 u16 mask16 = VLAN_VID_MASK;
3176 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3177 const struct flow_action_entry pedit_act = {
3178 .id = FLOW_ACTION_MANGLE,
3179 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3180 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3181 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3182 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3183 };
3184 u8 match_prio_mask, match_prio_val;
3185 void *headers_c, *headers_v;
3186 int err;
3187
3188 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3189 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3190
3191 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3192 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3193 NL_SET_ERR_MSG_MOD(extack,
3194 "VLAN rewrite action must have VLAN protocol match");
3195 return -EOPNOTSUPP;
3196 }
3197
3198 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3199 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3200 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3201 NL_SET_ERR_MSG_MOD(extack,
3202 "Changing VLAN prio is not supported");
3203 return -EOPNOTSUPP;
3204 }
3205
3206 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3207 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3208
3209 return err;
3210}
3211
3212static int
3213add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3214 struct mlx5e_tc_flow_parse_attr *parse_attr,
3215 struct pedit_headers_action *hdrs,
3216 u32 *action, struct netlink_ext_ack *extack)
3217{
3218 const struct flow_action_entry prio_tag_act = {
3219 .vlan.vid = 0,
3220 .vlan.prio =
3221 MLX5_GET(fte_match_set_lyr_2_4,
3222 get_match_headers_value(*action,
3223 &parse_attr->spec),
3224 first_prio) &
3225 MLX5_GET(fte_match_set_lyr_2_4,
3226 get_match_headers_criteria(*action,
3227 &parse_attr->spec),
3228 first_prio),
3229 };
3230
3231 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3232 &prio_tag_act, parse_attr, hdrs, action,
3233 extack);
3234}
3235
3236static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3237 struct flow_action *flow_action,
3238 struct mlx5e_tc_flow_parse_attr *parse_attr,
3239 struct mlx5e_tc_flow *flow,
3240 struct netlink_ext_ack *extack)
3241{
3242 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
3243 struct pedit_headers_action hdrs[2] = {};
3244 const struct flow_action_entry *act;
3245 u32 action = 0;
3246 int err, i;
3247
3248 if (!flow_action_has_entries(flow_action))
3249 return -EINVAL;
3250
3251 if (!flow_action_hw_stats_check(flow_action, extack,
3252 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3253 return -EOPNOTSUPP;
3254
3255 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3256
3257 flow_action_for_each(i, act, flow_action) {
3258 switch (act->id) {
3259 case FLOW_ACTION_ACCEPT:
3260 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3261 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3262 break;
3263 case FLOW_ACTION_DROP:
3264 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3265 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3266 flow_table_properties_nic_receive.flow_counter))
3267 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3268 break;
3269 case FLOW_ACTION_MANGLE:
3270 case FLOW_ACTION_ADD:
3271 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3272 parse_attr, hdrs, NULL, extack);
3273 if (err)
3274 return err;
3275
3276 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
3277 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3278 break;
3279 case FLOW_ACTION_VLAN_MANGLE:
3280 err = add_vlan_rewrite_action(priv,
3281 MLX5_FLOW_NAMESPACE_KERNEL,
3282 act, parse_attr, hdrs,
3283 &action, extack);
3284 if (err)
3285 return err;
3286
3287 break;
3288 case FLOW_ACTION_CSUM:
3289 if (csum_offload_supported(priv, action,
3290 act->csum_flags,
3291 extack))
3292 break;
3293
3294 return -EOPNOTSUPP;
3295 case FLOW_ACTION_REDIRECT: {
3296 struct net_device *peer_dev = act->dev;
3297
3298 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3299 same_hw_devs(priv, netdev_priv(peer_dev))) {
3300 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3301 flow_flag_set(flow, HAIRPIN);
3302 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3303 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3304 } else {
3305 NL_SET_ERR_MSG_MOD(extack,
3306 "device is not on same HW, can't offload");
3307 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3308 peer_dev->name);
3309 return -EINVAL;
3310 }
3311 }
3312 break;
3313 case FLOW_ACTION_MARK: {
3314 u32 mark = act->mark;
3315
3316 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3317 NL_SET_ERR_MSG_MOD(extack,
3318 "Bad flow mark - only 16 bit is supported");
3319 return -EINVAL;
3320 }
3321
3322 attr->flow_tag = mark;
3323 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3324 }
3325 break;
3326 default:
3327 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3328 return -EOPNOTSUPP;
3329 }
3330 }
3331
3332 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3333 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3334 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3335 parse_attr, hdrs, &action, extack);
3336 if (err)
3337 return err;
3338
3339
3340
3341 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3342 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3343 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3344 }
3345 }
3346
3347 attr->action = action;
3348 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3349 return -EOPNOTSUPP;
3350
3351 return 0;
3352}
3353
3354struct encap_key {
3355 const struct ip_tunnel_key *ip_tun_key;
3356 struct mlx5e_tc_tunnel *tc_tunnel;
3357};
3358
3359static inline int cmp_encap_info(struct encap_key *a,
3360 struct encap_key *b)
3361{
3362 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3363 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3364}
3365
3366static inline int cmp_decap_info(struct mlx5e_decap_key *a,
3367 struct mlx5e_decap_key *b)
3368{
3369 return memcmp(&a->key, &b->key, sizeof(b->key));
3370}
3371
3372static inline int hash_encap_info(struct encap_key *key)
3373{
3374 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3375 key->tc_tunnel->tunnel_type);
3376}
3377
3378static inline int hash_decap_info(struct mlx5e_decap_key *key)
3379{
3380 return jhash(&key->key, sizeof(key->key), 0);
3381}
3382
3383static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3384 struct net_device *peer_netdev)
3385{
3386 struct mlx5e_priv *peer_priv;
3387
3388 peer_priv = netdev_priv(peer_netdev);
3389
3390 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3391 mlx5e_eswitch_vf_rep(priv->netdev) &&
3392 mlx5e_eswitch_vf_rep(peer_netdev) &&
3393 same_hw_devs(priv, peer_priv));
3394}
3395
3396bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3397{
3398 return refcount_inc_not_zero(&e->refcnt);
3399}
3400
3401static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
3402{
3403 return refcount_inc_not_zero(&e->refcnt);
3404}
3405
3406static struct mlx5e_encap_entry *
3407mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3408 uintptr_t hash_key)
3409{
3410 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3411 struct mlx5e_encap_entry *e;
3412 struct encap_key e_key;
3413
3414 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3415 encap_hlist, hash_key) {
3416 e_key.ip_tun_key = &e->tun_info->key;
3417 e_key.tc_tunnel = e->tunnel;
3418 if (!cmp_encap_info(&e_key, key) &&
3419 mlx5e_encap_take(e))
3420 return e;
3421 }
3422
3423 return NULL;
3424}
3425
3426static struct mlx5e_decap_entry *
3427mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
3428 uintptr_t hash_key)
3429{
3430 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3431 struct mlx5e_decap_key r_key;
3432 struct mlx5e_decap_entry *e;
3433
3434 hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
3435 hlist, hash_key) {
3436 r_key = e->key;
3437 if (!cmp_decap_info(&r_key, key) &&
3438 mlx5e_decap_take(e))
3439 return e;
3440 }
3441 return NULL;
3442}
3443
3444static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3445{
3446 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3447
3448 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3449}
3450
3451static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3452 struct mlx5e_tc_flow *flow,
3453 int out_index,
3454 struct mlx5e_encap_entry *e,
3455 struct netlink_ext_ack *extack)
3456{
3457 int i;
3458
3459 for (i = 0; i < out_index; i++) {
3460 if (flow->encaps[i].e != e)
3461 continue;
3462 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3463 netdev_err(priv->netdev, "can't duplicate encap action\n");
3464 return true;
3465 }
3466
3467 return false;
3468}
3469
3470static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3471 struct mlx5e_tc_flow *flow,
3472 struct net_device *mirred_dev,
3473 int out_index,
3474 struct netlink_ext_ack *extack,
3475 struct net_device **encap_dev,
3476 bool *encap_valid)
3477{
3478 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3479 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3480 struct mlx5e_tc_flow_parse_attr *parse_attr;
3481 const struct ip_tunnel_info *tun_info;
3482 struct encap_key key;
3483 struct mlx5e_encap_entry *e;
3484 unsigned short family;
3485 uintptr_t hash_key;
3486 int err = 0;
3487
3488 parse_attr = attr->parse_attr;
3489 tun_info = parse_attr->tun_info[out_index];
3490 family = ip_tunnel_info_af(tun_info);
3491 key.ip_tun_key = &tun_info->key;
3492 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3493 if (!key.tc_tunnel) {
3494 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3495 return -EOPNOTSUPP;
3496 }
3497
3498 hash_key = hash_encap_info(&key);
3499
3500 mutex_lock(&esw->offloads.encap_tbl_lock);
3501 e = mlx5e_encap_get(priv, &key, hash_key);
3502
3503
3504 if (e) {
3505
3506 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3507 err = -EOPNOTSUPP;
3508 goto out_err;
3509 }
3510
3511 mutex_unlock(&esw->offloads.encap_tbl_lock);
3512 wait_for_completion(&e->res_ready);
3513
3514
3515 mutex_lock(&esw->offloads.encap_tbl_lock);
3516 if (e->compl_result < 0) {
3517 err = -EREMOTEIO;
3518 goto out_err;
3519 }
3520 goto attach_flow;
3521 }
3522
3523 e = kzalloc(sizeof(*e), GFP_KERNEL);
3524 if (!e) {
3525 err = -ENOMEM;
3526 goto out_err;
3527 }
3528
3529 refcount_set(&e->refcnt, 1);
3530 init_completion(&e->res_ready);
3531
3532 tun_info = dup_tun_info(tun_info);
3533 if (!tun_info) {
3534 err = -ENOMEM;
3535 goto out_err_init;
3536 }
3537 e->tun_info = tun_info;
3538 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3539 if (err)
3540 goto out_err_init;
3541
3542 INIT_LIST_HEAD(&e->flows);
3543 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3544 mutex_unlock(&esw->offloads.encap_tbl_lock);
3545
3546 if (family == AF_INET)
3547 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3548 else if (family == AF_INET6)
3549 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3550
3551
3552 mutex_lock(&esw->offloads.encap_tbl_lock);
3553 complete_all(&e->res_ready);
3554 if (err) {
3555 e->compl_result = err;
3556 goto out_err;
3557 }
3558 e->compl_result = 1;
3559
3560attach_flow:
3561 flow->encaps[out_index].e = e;
3562 list_add(&flow->encaps[out_index].list, &e->flows);
3563 flow->encaps[out_index].index = out_index;
3564 *encap_dev = e->out_dev;
3565 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3566 attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3567 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3568 *encap_valid = true;
3569 } else {
3570 *encap_valid = false;
3571 }
3572 mutex_unlock(&esw->offloads.encap_tbl_lock);
3573
3574 return err;
3575
3576out_err:
3577 mutex_unlock(&esw->offloads.encap_tbl_lock);
3578 if (e)
3579 mlx5e_encap_put(priv, e);
3580 return err;
3581
3582out_err_init:
3583 mutex_unlock(&esw->offloads.encap_tbl_lock);
3584 kfree(tun_info);
3585 kfree(e);
3586 return err;
3587}
3588
3589static int mlx5e_attach_decap(struct mlx5e_priv *priv,
3590 struct mlx5e_tc_flow *flow,
3591 struct netlink_ext_ack *extack)
3592{
3593 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3594 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3595 struct mlx5e_tc_flow_parse_attr *parse_attr;
3596 struct mlx5e_decap_entry *d;
3597 struct mlx5e_decap_key key;
3598 uintptr_t hash_key;
3599 int err = 0;
3600
3601 parse_attr = attr->parse_attr;
3602 if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
3603 NL_SET_ERR_MSG_MOD(extack,
3604 "encap header larger than max supported");
3605 return -EOPNOTSUPP;
3606 }
3607
3608 key.key = parse_attr->eth;
3609 hash_key = hash_decap_info(&key);
3610 mutex_lock(&esw->offloads.decap_tbl_lock);
3611 d = mlx5e_decap_get(priv, &key, hash_key);
3612 if (d) {
3613 mutex_unlock(&esw->offloads.decap_tbl_lock);
3614 wait_for_completion(&d->res_ready);
3615 mutex_lock(&esw->offloads.decap_tbl_lock);
3616 if (d->compl_result) {
3617 err = -EREMOTEIO;
3618 goto out_free;
3619 }
3620 goto found;
3621 }
3622
3623 d = kzalloc(sizeof(*d), GFP_KERNEL);
3624 if (!d) {
3625 err = -ENOMEM;
3626 goto out_err;
3627 }
3628
3629 d->key = key;
3630 refcount_set(&d->refcnt, 1);
3631 init_completion(&d->res_ready);
3632 INIT_LIST_HEAD(&d->flows);
3633 hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
3634 mutex_unlock(&esw->offloads.decap_tbl_lock);
3635
3636 d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
3637 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
3638 sizeof(parse_attr->eth),
3639 &parse_attr->eth,
3640 MLX5_FLOW_NAMESPACE_FDB);
3641 if (IS_ERR(d->pkt_reformat)) {
3642 err = PTR_ERR(d->pkt_reformat);
3643 d->compl_result = err;
3644 }
3645 mutex_lock(&esw->offloads.decap_tbl_lock);
3646 complete_all(&d->res_ready);
3647 if (err)
3648 goto out_free;
3649
3650found:
3651 flow->decap_reformat = d;
3652 attr->decap_pkt_reformat = d->pkt_reformat;
3653 list_add(&flow->l3_to_l2_reformat, &d->flows);
3654 mutex_unlock(&esw->offloads.decap_tbl_lock);
3655 return 0;
3656
3657out_free:
3658 mutex_unlock(&esw->offloads.decap_tbl_lock);
3659 mlx5e_decap_put(priv, d);
3660 return err;
3661
3662out_err:
3663 mutex_unlock(&esw->offloads.decap_tbl_lock);
3664 return err;
3665}
3666
3667static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3668 const struct flow_action_entry *act,
3669 struct mlx5_esw_flow_attr *attr,
3670 u32 *action)
3671{
3672 u8 vlan_idx = attr->total_vlan;
3673
3674 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3675 return -EOPNOTSUPP;
3676
3677 switch (act->id) {
3678 case FLOW_ACTION_VLAN_POP:
3679 if (vlan_idx) {
3680 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3681 MLX5_FS_VLAN_DEPTH))
3682 return -EOPNOTSUPP;
3683
3684 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3685 } else {
3686 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3687 }
3688 break;
3689 case FLOW_ACTION_VLAN_PUSH:
3690 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3691 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3692 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3693 if (!attr->vlan_proto[vlan_idx])
3694 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3695
3696 if (vlan_idx) {
3697 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3698 MLX5_FS_VLAN_DEPTH))
3699 return -EOPNOTSUPP;
3700
3701 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3702 } else {
3703 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3704 (act->vlan.proto != htons(ETH_P_8021Q) ||
3705 act->vlan.prio))
3706 return -EOPNOTSUPP;
3707
3708 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3709 }
3710 break;
3711 default:
3712 return -EINVAL;
3713 }
3714
3715 attr->total_vlan = vlan_idx + 1;
3716
3717 return 0;
3718}
3719
3720static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3721 struct net_device *out_dev)
3722{
3723 struct net_device *fdb_out_dev = out_dev;
3724 struct net_device *uplink_upper;
3725
3726 rcu_read_lock();
3727 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3728 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3729 uplink_upper == out_dev) {
3730 fdb_out_dev = uplink_dev;
3731 } else if (netif_is_lag_master(out_dev)) {
3732 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3733 if (fdb_out_dev &&
3734 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3735 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3736 fdb_out_dev = NULL;
3737 }
3738 rcu_read_unlock();
3739 return fdb_out_dev;
3740}
3741
3742static int add_vlan_push_action(struct mlx5e_priv *priv,
3743 struct mlx5_esw_flow_attr *attr,
3744 struct net_device **out_dev,
3745 u32 *action)
3746{
3747 struct net_device *vlan_dev = *out_dev;
3748 struct flow_action_entry vlan_act = {
3749 .id = FLOW_ACTION_VLAN_PUSH,
3750 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3751 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3752 .vlan.prio = 0,
3753 };
3754 int err;
3755
3756 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3757 if (err)
3758 return err;
3759
3760 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3761 dev_get_iflink(vlan_dev));
3762 if (is_vlan_dev(*out_dev))
3763 err = add_vlan_push_action(priv, attr, out_dev, action);
3764
3765 return err;
3766}
3767
3768static int add_vlan_pop_action(struct mlx5e_priv *priv,
3769 struct mlx5_esw_flow_attr *attr,
3770 u32 *action)
3771{
3772 struct flow_action_entry vlan_act = {
3773 .id = FLOW_ACTION_VLAN_POP,
3774 };
3775 int nest_level, err = 0;
3776
3777 nest_level = attr->parse_attr->filter_dev->lower_level -
3778 priv->netdev->lower_level;
3779 while (nest_level--) {
3780 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3781 if (err)
3782 return err;
3783 }
3784
3785 return err;
3786}
3787
3788static bool same_hw_reps(struct mlx5e_priv *priv,
3789 struct net_device *peer_netdev)
3790{
3791 struct mlx5e_priv *peer_priv;
3792
3793 peer_priv = netdev_priv(peer_netdev);
3794
3795 return mlx5e_eswitch_rep(priv->netdev) &&
3796 mlx5e_eswitch_rep(peer_netdev) &&
3797 same_hw_devs(priv, peer_priv);
3798}
3799
3800static bool is_lag_dev(struct mlx5e_priv *priv,
3801 struct net_device *peer_netdev)
3802{
3803 return ((mlx5_lag_is_sriov(priv->mdev) ||
3804 mlx5_lag_is_multipath(priv->mdev)) &&
3805 same_hw_reps(priv, peer_netdev));
3806}
3807
3808bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3809 struct net_device *out_dev)
3810{
3811 if (is_merged_eswitch_vfs(priv, out_dev))
3812 return true;
3813
3814 if (is_lag_dev(priv, out_dev))
3815 return true;
3816
3817 return mlx5e_eswitch_rep(out_dev) &&
3818 same_port_devs(priv, netdev_priv(out_dev));
3819}
3820
3821static bool is_duplicated_output_device(struct net_device *dev,
3822 struct net_device *out_dev,
3823 int *ifindexes, int if_count,
3824 struct netlink_ext_ack *extack)
3825{
3826 int i;
3827
3828 for (i = 0; i < if_count; i++) {
3829 if (ifindexes[i] == out_dev->ifindex) {
3830 NL_SET_ERR_MSG_MOD(extack,
3831 "can't duplicate output to same device");
3832 netdev_err(dev, "can't duplicate output to same device: %s\n",
3833 out_dev->name);
3834 return true;
3835 }
3836 }
3837
3838 return false;
3839}
3840
3841static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
3842 struct mlx5e_tc_flow *flow,
3843 const struct flow_action_entry *act,
3844 u32 actions,
3845 struct netlink_ext_ack *extack)
3846{
3847 u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
3848 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3849 bool ft_flow = mlx5e_is_ft_flow(flow);
3850 u32 dest_chain = act->chain_index;
3851
3852 if (ft_flow) {
3853 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3854 return -EOPNOTSUPP;
3855 }
3856
3857 if (!mlx5_esw_chains_backwards_supported(esw) &&
3858 dest_chain <= attr->chain) {
3859 NL_SET_ERR_MSG_MOD(extack,
3860 "Goto lower numbered chain isn't supported");
3861 return -EOPNOTSUPP;
3862 }
3863 if (dest_chain > max_chain) {
3864 NL_SET_ERR_MSG_MOD(extack,
3865 "Requested destination chain is out of supported range");
3866 return -EOPNOTSUPP;
3867 }
3868
3869 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3870 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3871 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
3872 NL_SET_ERR_MSG_MOD(extack,
3873 "Goto chain is not allowed if action has reformat or decap");
3874 return -EOPNOTSUPP;
3875 }
3876
3877 return 0;
3878}
3879
3880static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3881 struct mlx5e_tc_flow *flow,
3882 struct net_device *out_dev,
3883 struct netlink_ext_ack *extack)
3884{
3885 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3886 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3887 struct mlx5e_rep_priv *rep_priv;
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3898
3899 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3900 mlx5e_eswitch_uplink_rep(out_dev)))
3901 return 0;
3902
3903 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3904 termination_table_raw_traffic)) {
3905 NL_SET_ERR_MSG_MOD(extack,
3906 "devices are both uplink, can't offload forwarding");
3907 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3908 priv->netdev->name, out_dev->name);
3909 return -EOPNOTSUPP;
3910 } else if (out_dev != rep_priv->netdev) {
3911 NL_SET_ERR_MSG_MOD(extack,
3912 "devices are not the same uplink, can't offload forwarding");
3913 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3914 priv->netdev->name, out_dev->name);
3915 return -EOPNOTSUPP;
3916 }
3917 return 0;
3918}
3919
3920static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3921 struct flow_action *flow_action,
3922 struct mlx5e_tc_flow *flow,
3923 struct netlink_ext_ack *extack,
3924 struct net_device *filter_dev)
3925{
3926 struct pedit_headers_action hdrs[2] = {};
3927 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3928 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3929 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3930 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3931 const struct ip_tunnel_info *info = NULL;
3932 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3933 bool ft_flow = mlx5e_is_ft_flow(flow);
3934 const struct flow_action_entry *act;
3935 bool encap = false, decap = false;
3936 u32 action = attr->action;
3937 int err, i, if_count = 0;
3938 bool mpls_push = false;
3939
3940 if (!flow_action_has_entries(flow_action))
3941 return -EINVAL;
3942
3943 if (!flow_action_hw_stats_check(flow_action, extack,
3944 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3945 return -EOPNOTSUPP;
3946
3947 flow_action_for_each(i, act, flow_action) {
3948 switch (act->id) {
3949 case FLOW_ACTION_DROP:
3950 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3951 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3952 break;
3953 case FLOW_ACTION_MPLS_PUSH:
3954 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3955 reformat_l2_to_l3_tunnel) ||
3956 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3957 NL_SET_ERR_MSG_MOD(extack,
3958 "mpls push is supported only for mpls_uc protocol");
3959 return -EOPNOTSUPP;
3960 }
3961 mpls_push = true;
3962 break;
3963 case FLOW_ACTION_MPLS_POP:
3964
3965
3966
3967
3968
3969 if (i) {
3970 NL_SET_ERR_MSG_MOD(extack,
3971 "mpls pop supported only as first action");
3972 return -EOPNOTSUPP;
3973 }
3974 if (!netif_is_bareudp(filter_dev)) {
3975 NL_SET_ERR_MSG_MOD(extack,
3976 "mpls pop supported only on bareudp devices");
3977 return -EOPNOTSUPP;
3978 }
3979
3980 parse_attr->eth.h_proto = act->mpls_pop.proto;
3981 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3982 flow_flag_set(flow, L3_TO_L2_DECAP);
3983 break;
3984 case FLOW_ACTION_MANGLE:
3985 case FLOW_ACTION_ADD:
3986 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3987 parse_attr, hdrs, flow, extack);
3988 if (err)
3989 return err;
3990
3991 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3992 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3993 attr->split_count = attr->out_count;
3994 }
3995 break;
3996 case FLOW_ACTION_CSUM:
3997 if (csum_offload_supported(priv, action,
3998 act->csum_flags, extack))
3999 break;
4000
4001 return -EOPNOTSUPP;
4002 case FLOW_ACTION_REDIRECT:
4003 case FLOW_ACTION_MIRRED: {
4004 struct mlx5e_priv *out_priv;
4005 struct net_device *out_dev;
4006
4007 out_dev = act->dev;
4008 if (!out_dev) {
4009
4010
4011
4012
4013 return -EINVAL;
4014 }
4015
4016 if (mpls_push && !netif_is_bareudp(out_dev)) {
4017 NL_SET_ERR_MSG_MOD(extack,
4018 "mpls is supported only through a bareudp device");
4019 return -EOPNOTSUPP;
4020 }
4021
4022 if (ft_flow && out_dev == priv->netdev) {
4023
4024
4025
4026
4027 return -EOPNOTSUPP;
4028 }
4029
4030 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4031 NL_SET_ERR_MSG_MOD(extack,
4032 "can't support more output ports, can't offload forwarding");
4033 netdev_warn(priv->netdev,
4034 "can't support more than %d output ports, can't offload forwarding\n",
4035 attr->out_count);
4036 return -EOPNOTSUPP;
4037 }
4038
4039 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4040 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4041 if (encap) {
4042 parse_attr->mirred_ifindex[attr->out_count] =
4043 out_dev->ifindex;
4044 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
4045 if (!parse_attr->tun_info[attr->out_count])
4046 return -ENOMEM;
4047 encap = false;
4048 attr->dests[attr->out_count].flags |=
4049 MLX5_ESW_DEST_ENCAP;
4050 attr->out_count++;
4051
4052
4053
4054 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4055 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4056 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4057
4058 if (is_duplicated_output_device(priv->netdev,
4059 out_dev,
4060 ifindexes,
4061 if_count,
4062 extack))
4063 return -EOPNOTSUPP;
4064
4065 ifindexes[if_count] = out_dev->ifindex;
4066 if_count++;
4067
4068 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4069 if (!out_dev)
4070 return -ENODEV;
4071
4072 if (is_vlan_dev(out_dev)) {
4073 err = add_vlan_push_action(priv, attr,
4074 &out_dev,
4075 &action);
4076 if (err)
4077 return err;
4078 }
4079
4080 if (is_vlan_dev(parse_attr->filter_dev)) {
4081 err = add_vlan_pop_action(priv, attr,
4082 &action);
4083 if (err)
4084 return err;
4085 }
4086
4087 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4088 if (err)
4089 return err;
4090
4091 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4092 NL_SET_ERR_MSG_MOD(extack,
4093 "devices are not on same switch HW, can't offload forwarding");
4094 return -EOPNOTSUPP;
4095 }
4096
4097 out_priv = netdev_priv(out_dev);
4098 rpriv = out_priv->ppriv;
4099 attr->dests[attr->out_count].rep = rpriv->rep;
4100 attr->dests[attr->out_count].mdev = out_priv->mdev;
4101 attr->out_count++;
4102 } else if (parse_attr->filter_dev != priv->netdev) {
4103
4104
4105
4106
4107
4108 return -EINVAL;
4109 } else {
4110 NL_SET_ERR_MSG_MOD(extack,
4111 "devices are not on same switch HW, can't offload forwarding");
4112 netdev_warn(priv->netdev,
4113 "devices %s %s not on same switch HW, can't offload forwarding\n",
4114 priv->netdev->name,
4115 out_dev->name);
4116 return -EINVAL;
4117 }
4118 }
4119 break;
4120 case FLOW_ACTION_TUNNEL_ENCAP:
4121 info = act->tunnel;
4122 if (info)
4123 encap = true;
4124 else
4125 return -EOPNOTSUPP;
4126
4127 break;
4128 case FLOW_ACTION_VLAN_PUSH:
4129 case FLOW_ACTION_VLAN_POP:
4130 if (act->id == FLOW_ACTION_VLAN_PUSH &&
4131 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4132
4133 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4134 err = add_vlan_rewrite_action(priv,
4135 MLX5_FLOW_NAMESPACE_FDB,
4136 act, parse_attr, hdrs,
4137 &action, extack);
4138 } else {
4139 err = parse_tc_vlan_action(priv, act, attr, &action);
4140 }
4141 if (err)
4142 return err;
4143
4144 attr->split_count = attr->out_count;
4145 break;
4146 case FLOW_ACTION_VLAN_MANGLE:
4147 err = add_vlan_rewrite_action(priv,
4148 MLX5_FLOW_NAMESPACE_FDB,
4149 act, parse_attr, hdrs,
4150 &action, extack);
4151 if (err)
4152 return err;
4153
4154 attr->split_count = attr->out_count;
4155 break;
4156 case FLOW_ACTION_TUNNEL_DECAP:
4157 decap = true;
4158 break;
4159 case FLOW_ACTION_GOTO:
4160 err = mlx5_validate_goto_chain(esw, flow, act, action,
4161 extack);
4162 if (err)
4163 return err;
4164
4165 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4166 attr->dest_chain = act->chain_index;
4167 break;
4168 case FLOW_ACTION_CT:
4169 err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
4170 if (err)
4171 return err;
4172
4173 flow_flag_set(flow, CT);
4174 break;
4175 default:
4176 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4177 return -EOPNOTSUPP;
4178 }
4179 }
4180
4181 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4182 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4183
4184
4185
4186 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4187 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4188 &action, extack);
4189 if (err)
4190 return err;
4191 }
4192
4193 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4194 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4195 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4196 parse_attr, hdrs, &action, extack);
4197 if (err)
4198 return err;
4199
4200
4201
4202
4203 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4204 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4205 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4206 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4207 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4208 attr->split_count = 0;
4209 }
4210 }
4211
4212 attr->action = action;
4213 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4214 return -EOPNOTSUPP;
4215
4216 if (attr->dest_chain) {
4217 if (decap) {
4218
4219
4220
4221
4222
4223
4224
4225
4226 NL_SET_ERR_MSG(extack,
4227 "Decap with goto isn't supported");
4228 netdev_warn(priv->netdev,
4229 "Decap with goto isn't supported");
4230 return -EOPNOTSUPP;
4231 }
4232
4233 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
4234 NL_SET_ERR_MSG_MOD(extack,
4235 "Mirroring goto chain rules isn't supported");
4236 return -EOPNOTSUPP;
4237 }
4238 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4239 }
4240
4241 if (!(attr->action &
4242 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4243 NL_SET_ERR_MSG_MOD(extack,
4244 "Rule must have at least one forward/drop action");
4245 return -EOPNOTSUPP;
4246 }
4247
4248 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4249 NL_SET_ERR_MSG_MOD(extack,
4250 "current firmware doesn't support split rule for port mirroring");
4251 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4252 return -EOPNOTSUPP;
4253 }
4254
4255 return 0;
4256}
4257
4258static void get_flags(int flags, unsigned long *flow_flags)
4259{
4260 unsigned long __flow_flags = 0;
4261
4262 if (flags & MLX5_TC_FLAG(INGRESS))
4263 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4264 if (flags & MLX5_TC_FLAG(EGRESS))
4265 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4266
4267 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4268 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4269 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4270 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4271 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4272 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4273
4274 *flow_flags = __flow_flags;
4275}
4276
4277static const struct rhashtable_params tc_ht_params = {
4278 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4279 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4280 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4281 .automatic_shrinking = true,
4282};
4283
4284static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4285 unsigned long flags)
4286{
4287 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4288 struct mlx5e_rep_priv *uplink_rpriv;
4289
4290 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4291 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4292 return &uplink_rpriv->uplink_priv.tc_ht;
4293 } else
4294 return &priv->fs.tc.ht;
4295}
4296
4297static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4298{
4299 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4300 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4301 flow_flag_test(flow, INGRESS);
4302 bool act_is_encap = !!(attr->action &
4303 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4304 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
4305 MLX5_DEVCOM_ESW_OFFLOADS);
4306
4307 if (!esw_paired)
4308 return false;
4309
4310 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
4311 mlx5_lag_is_multipath(attr->in_mdev)) &&
4312 (is_rep_ingress || act_is_encap))
4313 return true;
4314
4315 return false;
4316}
4317
4318static int
4319mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4320 struct flow_cls_offload *f, unsigned long flow_flags,
4321 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4322 struct mlx5e_tc_flow **__flow)
4323{
4324 struct mlx5e_tc_flow_parse_attr *parse_attr;
4325 struct mlx5e_tc_flow *flow;
4326 int out_index, err;
4327
4328 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
4329 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4330 if (!parse_attr || !flow) {
4331 err = -ENOMEM;
4332 goto err_free;
4333 }
4334
4335 flow->cookie = f->cookie;
4336 flow->flags = flow_flags;
4337 flow->priv = priv;
4338 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4339 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4340 INIT_LIST_HEAD(&flow->hairpin);
4341 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4342 refcount_set(&flow->refcnt, 1);
4343 init_completion(&flow->init_done);
4344
4345 *__flow = flow;
4346 *__parse_attr = parse_attr;
4347
4348 return 0;
4349
4350err_free:
4351 kfree(flow);
4352 kvfree(parse_attr);
4353 return err;
4354}
4355
4356static void
4357mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4358 struct mlx5e_priv *priv,
4359 struct mlx5e_tc_flow_parse_attr *parse_attr,
4360 struct flow_cls_offload *f,
4361 struct mlx5_eswitch_rep *in_rep,
4362 struct mlx5_core_dev *in_mdev)
4363{
4364 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4365
4366 esw_attr->parse_attr = parse_attr;
4367 esw_attr->chain = f->common.chain_index;
4368 esw_attr->prio = f->common.prio;
4369
4370 esw_attr->in_rep = in_rep;
4371 esw_attr->in_mdev = in_mdev;
4372
4373 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4374 MLX5_COUNTER_SOURCE_ESWITCH)
4375 esw_attr->counter_dev = in_mdev;
4376 else
4377 esw_attr->counter_dev = priv->mdev;
4378}
4379
4380static struct mlx5e_tc_flow *
4381__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4382 struct flow_cls_offload *f,
4383 unsigned long flow_flags,
4384 struct net_device *filter_dev,
4385 struct mlx5_eswitch_rep *in_rep,
4386 struct mlx5_core_dev *in_mdev)
4387{
4388 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4389 struct netlink_ext_ack *extack = f->common.extack;
4390 struct mlx5e_tc_flow_parse_attr *parse_attr;
4391 struct mlx5e_tc_flow *flow;
4392 int attr_size, err;
4393
4394 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4395 attr_size = sizeof(struct mlx5_esw_flow_attr);
4396 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4397 &parse_attr, &flow);
4398 if (err)
4399 goto out;
4400
4401 parse_attr->filter_dev = filter_dev;
4402 mlx5e_flow_esw_attr_init(flow->esw_attr,
4403 priv, parse_attr,
4404 f, in_rep, in_mdev);
4405
4406 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4407 f, filter_dev);
4408 if (err)
4409 goto err_free;
4410
4411
4412 err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f,
4413 &flow->esw_attr->ct_attr, extack);
4414 if (err)
4415 goto err_free;
4416
4417 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4418 if (err)
4419 goto err_free;
4420
4421 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4422 complete_all(&flow->init_done);
4423 if (err) {
4424 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4425 goto err_free;
4426
4427 add_unready_flow(flow);
4428 }
4429
4430 return flow;
4431
4432err_free:
4433 mlx5e_flow_put(priv, flow);
4434out:
4435 return ERR_PTR(err);
4436}
4437
4438static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4439 struct mlx5e_tc_flow *flow,
4440 unsigned long flow_flags)
4441{
4442 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4443 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4444 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4445 struct mlx5e_tc_flow_parse_attr *parse_attr;
4446 struct mlx5e_rep_priv *peer_urpriv;
4447 struct mlx5e_tc_flow *peer_flow;
4448 struct mlx5_core_dev *in_mdev;
4449 int err = 0;
4450
4451 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4452 if (!peer_esw)
4453 return -ENODEV;
4454
4455 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4456 peer_priv = netdev_priv(peer_urpriv->netdev);
4457
4458
4459
4460
4461
4462
4463 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
4464 in_mdev = peer_priv->mdev;
4465 else
4466 in_mdev = priv->mdev;
4467
4468 parse_attr = flow->esw_attr->parse_attr;
4469 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4470 parse_attr->filter_dev,
4471 flow->esw_attr->in_rep, in_mdev);
4472 if (IS_ERR(peer_flow)) {
4473 err = PTR_ERR(peer_flow);
4474 goto out;
4475 }
4476
4477 flow->peer_flow = peer_flow;
4478 flow_flag_set(flow, DUP);
4479 mutex_lock(&esw->offloads.peer_mutex);
4480 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4481 mutex_unlock(&esw->offloads.peer_mutex);
4482
4483out:
4484 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4485 return err;
4486}
4487
4488static int
4489mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4490 struct flow_cls_offload *f,
4491 unsigned long flow_flags,
4492 struct net_device *filter_dev,
4493 struct mlx5e_tc_flow **__flow)
4494{
4495 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4496 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4497 struct mlx5_core_dev *in_mdev = priv->mdev;
4498 struct mlx5e_tc_flow *flow;
4499 int err;
4500
4501 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4502 in_mdev);
4503 if (IS_ERR(flow))
4504 return PTR_ERR(flow);
4505
4506 if (is_peer_flow_needed(flow)) {
4507 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4508 if (err) {
4509 mlx5e_tc_del_fdb_flow(priv, flow);
4510 goto out;
4511 }
4512 }
4513
4514 *__flow = flow;
4515
4516 return 0;
4517
4518out:
4519 return err;
4520}
4521
4522static int
4523mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4524 struct flow_cls_offload *f,
4525 unsigned long flow_flags,
4526 struct net_device *filter_dev,
4527 struct mlx5e_tc_flow **__flow)
4528{
4529 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4530 struct netlink_ext_ack *extack = f->common.extack;
4531 struct mlx5e_tc_flow_parse_attr *parse_attr;
4532 struct mlx5e_tc_flow *flow;
4533 int attr_size, err;
4534
4535
4536 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4537 return -EOPNOTSUPP;
4538
4539 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4540 attr_size = sizeof(struct mlx5_nic_flow_attr);
4541 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4542 &parse_attr, &flow);
4543 if (err)
4544 goto out;
4545
4546 parse_attr->filter_dev = filter_dev;
4547 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4548 f, filter_dev);
4549 if (err)
4550 goto err_free;
4551
4552 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4553 if (err)
4554 goto err_free;
4555
4556 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4557 if (err)
4558 goto err_free;
4559
4560 flow_flag_set(flow, OFFLOADED);
4561 kvfree(parse_attr);
4562 *__flow = flow;
4563
4564 return 0;
4565
4566err_free:
4567 mlx5e_flow_put(priv, flow);
4568 kvfree(parse_attr);
4569out:
4570 return err;
4571}
4572
4573static int
4574mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4575 struct flow_cls_offload *f,
4576 unsigned long flags,
4577 struct net_device *filter_dev,
4578 struct mlx5e_tc_flow **flow)
4579{
4580 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4581 unsigned long flow_flags;
4582 int err;
4583
4584 get_flags(flags, &flow_flags);
4585
4586 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4587 return -EOPNOTSUPP;
4588
4589 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4590 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4591 filter_dev, flow);
4592 else
4593 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4594 filter_dev, flow);
4595
4596 return err;
4597}
4598
4599static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4600 struct mlx5e_rep_priv *rpriv)
4601{
4602
4603
4604
4605
4606 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4607}
4608
4609int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4610 struct flow_cls_offload *f, unsigned long flags)
4611{
4612 struct netlink_ext_ack *extack = f->common.extack;
4613 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4614 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4615 struct mlx5e_tc_flow *flow;
4616 int err = 0;
4617
4618 rcu_read_lock();
4619 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4620 if (flow) {
4621
4622
4623
4624 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4625 goto rcu_unlock;
4626
4627 NL_SET_ERR_MSG_MOD(extack,
4628 "flow cookie already exists, ignoring");
4629 netdev_warn_once(priv->netdev,
4630 "flow cookie %lx already exists, ignoring\n",
4631 f->cookie);
4632 err = -EEXIST;
4633 goto rcu_unlock;
4634 }
4635rcu_unlock:
4636 rcu_read_unlock();
4637 if (flow)
4638 goto out;
4639
4640 trace_mlx5e_configure_flower(f);
4641 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4642 if (err)
4643 goto out;
4644
4645
4646
4647
4648 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4649 flow->orig_dev = dev;
4650
4651 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4652 if (err)
4653 goto err_free;
4654
4655 return 0;
4656
4657err_free:
4658 mlx5e_flow_put(priv, flow);
4659out:
4660 return err;
4661}
4662
4663static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4664{
4665 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4666 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4667
4668 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4669 flow_flag_test(flow, EGRESS) == dir_egress;
4670}
4671
4672int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4673 struct flow_cls_offload *f, unsigned long flags)
4674{
4675 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4676 struct mlx5e_tc_flow *flow;
4677 int err;
4678
4679 rcu_read_lock();
4680 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4681 if (!flow || !same_flow_direction(flow, flags)) {
4682 err = -EINVAL;
4683 goto errout;
4684 }
4685
4686
4687
4688
4689 if (flow_flag_test_and_set(flow, DELETED)) {
4690 err = -EINVAL;
4691 goto errout;
4692 }
4693 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4694 rcu_read_unlock();
4695
4696 trace_mlx5e_delete_flower(f);
4697 mlx5e_flow_put(priv, flow);
4698
4699 return 0;
4700
4701errout:
4702 rcu_read_unlock();
4703 return err;
4704}
4705
4706int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4707 struct flow_cls_offload *f, unsigned long flags)
4708{
4709 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4710 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4711 struct mlx5_eswitch *peer_esw;
4712 struct mlx5e_tc_flow *flow;
4713 struct mlx5_fc *counter;
4714 u64 lastuse = 0;
4715 u64 packets = 0;
4716 u64 bytes = 0;
4717 int err = 0;
4718
4719 rcu_read_lock();
4720 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4721 tc_ht_params));
4722 rcu_read_unlock();
4723 if (IS_ERR(flow))
4724 return PTR_ERR(flow);
4725
4726 if (!same_flow_direction(flow, flags)) {
4727 err = -EINVAL;
4728 goto errout;
4729 }
4730
4731 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4732 counter = mlx5e_tc_get_counter(flow);
4733 if (!counter)
4734 goto errout;
4735
4736 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4737 }
4738
4739
4740
4741
4742 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4743 if (!peer_esw)
4744 goto out;
4745
4746 if (flow_flag_test(flow, DUP) &&
4747 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4748 u64 bytes2;
4749 u64 packets2;
4750 u64 lastuse2;
4751
4752 counter = mlx5e_tc_get_counter(flow->peer_flow);
4753 if (!counter)
4754 goto no_peer_counter;
4755 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4756
4757 bytes += bytes2;
4758 packets += packets2;
4759 lastuse = max_t(u64, lastuse, lastuse2);
4760 }
4761
4762no_peer_counter:
4763 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4764out:
4765 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4766 FLOW_ACTION_HW_STATS_DELAYED);
4767 trace_mlx5e_stats_flower(f);
4768errout:
4769 mlx5e_flow_put(priv, flow);
4770 return err;
4771}
4772
4773static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4774 struct netlink_ext_ack *extack)
4775{
4776 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4777 struct mlx5_eswitch *esw;
4778 u16 vport_num;
4779 u32 rate_mbps;
4780 int err;
4781
4782 vport_num = rpriv->rep->vport;
4783 if (vport_num >= MLX5_VPORT_ECPF) {
4784 NL_SET_ERR_MSG_MOD(extack,
4785 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4786 return -EOPNOTSUPP;
4787 }
4788
4789 esw = priv->mdev->priv.eswitch;
4790
4791
4792
4793
4794
4795
4796 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4797 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4798 if (err)
4799 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4800
4801 return err;
4802}
4803
4804static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4805 struct flow_action *flow_action,
4806 struct netlink_ext_ack *extack)
4807{
4808 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4809 const struct flow_action_entry *act;
4810 int err;
4811 int i;
4812
4813 if (!flow_action_has_entries(flow_action)) {
4814 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4815 return -EINVAL;
4816 }
4817
4818 if (!flow_offload_has_one_action(flow_action)) {
4819 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4820 return -EOPNOTSUPP;
4821 }
4822
4823 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4824 return -EOPNOTSUPP;
4825
4826 flow_action_for_each(i, act, flow_action) {
4827 switch (act->id) {
4828 case FLOW_ACTION_POLICE:
4829 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4830 if (err)
4831 return err;
4832
4833 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4834 break;
4835 default:
4836 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4837 return -EOPNOTSUPP;
4838 }
4839 }
4840
4841 return 0;
4842}
4843
4844int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4845 struct tc_cls_matchall_offload *ma)
4846{
4847 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4848 struct netlink_ext_ack *extack = ma->common.extack;
4849
4850 if (!mlx5_esw_qos_enabled(esw)) {
4851 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4852 return -EOPNOTSUPP;
4853 }
4854
4855 if (ma->common.prio != 1) {
4856 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4857 return -EINVAL;
4858 }
4859
4860 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4861}
4862
4863int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4864 struct tc_cls_matchall_offload *ma)
4865{
4866 struct netlink_ext_ack *extack = ma->common.extack;
4867
4868 return apply_police_params(priv, 0, extack);
4869}
4870
4871void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4872 struct tc_cls_matchall_offload *ma)
4873{
4874 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4875 struct rtnl_link_stats64 cur_stats;
4876 u64 dbytes;
4877 u64 dpkts;
4878
4879 cur_stats = priv->stats.vf_vport;
4880 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4881 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4882 rpriv->prev_vf_vport_stats = cur_stats;
4883 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4884 FLOW_ACTION_HW_STATS_DELAYED);
4885}
4886
4887static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4888 struct mlx5e_priv *peer_priv)
4889{
4890 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4891 struct mlx5e_hairpin_entry *hpe, *tmp;
4892 LIST_HEAD(init_wait_list);
4893 u16 peer_vhca_id;
4894 int bkt;
4895
4896 if (!same_hw_devs(priv, peer_priv))
4897 return;
4898
4899 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4900
4901 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4902 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4903 if (refcount_inc_not_zero(&hpe->refcnt))
4904 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4905 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4906
4907 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4908 wait_for_completion(&hpe->res_ready);
4909 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4910 hpe->hp->pair->peer_gone = true;
4911
4912 mlx5e_hairpin_put(priv, hpe);
4913 }
4914}
4915
4916static int mlx5e_tc_netdev_event(struct notifier_block *this,
4917 unsigned long event, void *ptr)
4918{
4919 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4920 struct mlx5e_flow_steering *fs;
4921 struct mlx5e_priv *peer_priv;
4922 struct mlx5e_tc_table *tc;
4923 struct mlx5e_priv *priv;
4924
4925 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4926 event != NETDEV_UNREGISTER ||
4927 ndev->reg_state == NETREG_REGISTERED)
4928 return NOTIFY_DONE;
4929
4930 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4931 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4932 priv = container_of(fs, struct mlx5e_priv, fs);
4933 peer_priv = netdev_priv(ndev);
4934 if (priv == peer_priv ||
4935 !(priv->netdev->features & NETIF_F_HW_TC))
4936 return NOTIFY_DONE;
4937
4938 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4939
4940 return NOTIFY_DONE;
4941}
4942
4943int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4944{
4945 struct mlx5e_tc_table *tc = &priv->fs.tc;
4946 int err;
4947
4948 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4949 mutex_init(&tc->t_lock);
4950 mutex_init(&tc->hairpin_tbl_lock);
4951 hash_init(tc->hairpin_tbl);
4952
4953 err = rhashtable_init(&tc->ht, &tc_ht_params);
4954 if (err)
4955 return err;
4956
4957 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4958 err = register_netdevice_notifier_dev_net(priv->netdev,
4959 &tc->netdevice_nb,
4960 &tc->netdevice_nn);
4961 if (err) {
4962 tc->netdevice_nb.notifier_call = NULL;
4963 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4964 }
4965
4966 return err;
4967}
4968
4969static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4970{
4971 struct mlx5e_tc_flow *flow = ptr;
4972 struct mlx5e_priv *priv = flow->priv;
4973
4974 mlx5e_tc_del_flow(priv, flow);
4975 kfree(flow);
4976}
4977
4978void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4979{
4980 struct mlx5e_tc_table *tc = &priv->fs.tc;
4981
4982 if (tc->netdevice_nb.notifier_call)
4983 unregister_netdevice_notifier_dev_net(priv->netdev,
4984 &tc->netdevice_nb,
4985 &tc->netdevice_nn);
4986
4987 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4988 mutex_destroy(&tc->hairpin_tbl_lock);
4989
4990 rhashtable_destroy(&tc->ht);
4991
4992 if (!IS_ERR_OR_NULL(tc->t)) {
4993 mlx5_destroy_flow_table(tc->t);
4994 tc->t = NULL;
4995 }
4996 mutex_destroy(&tc->t_lock);
4997}
4998
4999int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
5000{
5001 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5002 struct mlx5_rep_uplink_priv *uplink_priv;
5003 struct mlx5e_rep_priv *priv;
5004 struct mapping_ctx *mapping;
5005 int err;
5006
5007 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5008 priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5009
5010 err = mlx5_tc_ct_init(uplink_priv);
5011 if (err)
5012 goto err_ct;
5013
5014 mapping = mapping_create(sizeof(struct tunnel_match_key),
5015 TUNNEL_INFO_BITS_MASK, true);
5016 if (IS_ERR(mapping)) {
5017 err = PTR_ERR(mapping);
5018 goto err_tun_mapping;
5019 }
5020 uplink_priv->tunnel_mapping = mapping;
5021
5022 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
5023 if (IS_ERR(mapping)) {
5024 err = PTR_ERR(mapping);
5025 goto err_enc_opts_mapping;
5026 }
5027 uplink_priv->tunnel_enc_opts_mapping = mapping;
5028
5029 err = rhashtable_init(tc_ht, &tc_ht_params);
5030 if (err)
5031 goto err_ht_init;
5032
5033 return err;
5034
5035err_ht_init:
5036 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5037err_enc_opts_mapping:
5038 mapping_destroy(uplink_priv->tunnel_mapping);
5039err_tun_mapping:
5040 mlx5_tc_ct_clean(uplink_priv);
5041err_ct:
5042 netdev_warn(priv->netdev,
5043 "Failed to initialize tc (eswitch), err: %d", err);
5044 return err;
5045}
5046
5047void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5048{
5049 struct mlx5_rep_uplink_priv *uplink_priv;
5050
5051 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5052
5053 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5054 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5055 mapping_destroy(uplink_priv->tunnel_mapping);
5056
5057 mlx5_tc_ct_clean(uplink_priv);
5058}
5059
5060int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5061{
5062 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5063
5064 return atomic_read(&tc_ht->nelems);
5065}
5066
5067void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5068{
5069 struct mlx5e_tc_flow *flow, *tmp;
5070
5071 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5072 __mlx5e_tc_del_fdb_peer_flow(flow);
5073}
5074
5075void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5076{
5077 struct mlx5_rep_uplink_priv *rpriv =
5078 container_of(work, struct mlx5_rep_uplink_priv,
5079 reoffload_flows_work);
5080 struct mlx5e_tc_flow *flow, *tmp;
5081
5082 mutex_lock(&rpriv->unready_flows_lock);
5083 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5084 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5085 unready_flow_del(flow);
5086 }
5087 mutex_unlock(&rpriv->unready_flows_lock);
5088}
5089
5090static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5091 struct flow_cls_offload *cls_flower,
5092 unsigned long flags)
5093{
5094 switch (cls_flower->command) {
5095 case FLOW_CLS_REPLACE:
5096 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5097 flags);
5098 case FLOW_CLS_DESTROY:
5099 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5100 flags);
5101 case FLOW_CLS_STATS:
5102 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5103 flags);
5104 default:
5105 return -EOPNOTSUPP;
5106 }
5107}
5108
5109int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5110 void *cb_priv)
5111{
5112 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5113 struct mlx5e_priv *priv = cb_priv;
5114
5115 switch (type) {
5116 case TC_SETUP_CLSFLOWER:
5117 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5118 default:
5119 return -EOPNOTSUPP;
5120 }
5121}
5122