1
2
3
4#include <net/dst_metadata.h>
5#include <linux/netdevice.h>
6#include <linux/list.h>
7#include <linux/rculist.h>
8#include <linux/rtnetlink.h>
9#include <linux/workqueue.h>
10#include <linux/spinlock.h>
11#include "tc.h"
12#include "neigh.h"
13#include "en_rep.h"
14#include "eswitch.h"
15#include "lib/fs_chains.h"
16#include "en/tc_ct.h"
17#include "en/mapping.h"
18#include "en/tc_tun.h"
19#include "lib/port_tun.h"
20#include "esw/sample.h"
21
22struct mlx5e_rep_indr_block_priv {
23 struct net_device *netdev;
24 struct mlx5e_rep_priv *rpriv;
25
26 struct list_head list;
27};
28
29int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
30 struct mlx5e_encap_entry *e,
31 struct mlx5e_neigh *m_neigh,
32 struct net_device *neigh_dev)
33{
34 struct mlx5e_rep_priv *rpriv = priv->ppriv;
35 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
36 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
37 struct mlx5e_neigh_hash_entry *nhe;
38 int err;
39
40 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
41 if (err)
42 return err;
43
44 mutex_lock(&rpriv->neigh_update.encap_lock);
45 nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
46 if (!nhe) {
47 err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
48 if (err) {
49 mutex_unlock(&rpriv->neigh_update.encap_lock);
50 mlx5_tun_entropy_refcount_dec(tun_entropy,
51 e->reformat_type);
52 return err;
53 }
54 }
55
56 e->nhe = nhe;
57 spin_lock(&nhe->encap_list_lock);
58 list_add_rcu(&e->encap_list, &nhe->encap_list);
59 spin_unlock(&nhe->encap_list_lock);
60
61 mutex_unlock(&rpriv->neigh_update.encap_lock);
62
63 return 0;
64}
65
66void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
67 struct mlx5e_encap_entry *e)
68{
69 struct mlx5e_rep_priv *rpriv = priv->ppriv;
70 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
71 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
72
73 if (!e->nhe)
74 return;
75
76 spin_lock(&e->nhe->encap_list_lock);
77 list_del_rcu(&e->encap_list);
78 spin_unlock(&e->nhe->encap_list_lock);
79
80 mlx5e_rep_neigh_entry_release(e->nhe);
81 e->nhe = NULL;
82 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
83}
84
85void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
86 struct mlx5e_encap_entry *e,
87 bool neigh_connected,
88 unsigned char ha[ETH_ALEN])
89{
90 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
91 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
92 bool encap_connected;
93 LIST_HEAD(flow_list);
94
95 ASSERT_RTNL();
96
97 mutex_lock(&esw->offloads.encap_tbl_lock);
98 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
99 if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
100 goto unlock;
101
102 mlx5e_take_all_encap_flows(e, &flow_list);
103
104 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
105 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
106 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
107
108 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
109 struct net_device *route_dev;
110
111 ether_addr_copy(e->h_dest, ha);
112 ether_addr_copy(eth->h_dest, ha);
113
114
115
116 route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
117 if (route_dev)
118 ether_addr_copy(eth->h_source, route_dev->dev_addr);
119
120 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
121 }
122unlock:
123 mutex_unlock(&esw->offloads.encap_tbl_lock);
124 mlx5e_put_flow_list(priv, &flow_list);
125}
126
127static int
128mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
129 struct flow_cls_offload *cls_flower, int flags)
130{
131 switch (cls_flower->command) {
132 case FLOW_CLS_REPLACE:
133 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
134 flags);
135 case FLOW_CLS_DESTROY:
136 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
137 flags);
138 case FLOW_CLS_STATS:
139 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
140 flags);
141 default:
142 return -EOPNOTSUPP;
143 }
144}
145
146static
147int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
148 struct tc_cls_matchall_offload *ma)
149{
150 switch (ma->command) {
151 case TC_CLSMATCHALL_REPLACE:
152 return mlx5e_tc_configure_matchall(priv, ma);
153 case TC_CLSMATCHALL_DESTROY:
154 return mlx5e_tc_delete_matchall(priv, ma);
155 case TC_CLSMATCHALL_STATS:
156 mlx5e_tc_stats_matchall(priv, ma);
157 return 0;
158 default:
159 return -EOPNOTSUPP;
160 }
161}
162
163static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
164 void *cb_priv)
165{
166 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
167 struct mlx5e_priv *priv = cb_priv;
168
169 if (!priv->netdev || !netif_device_present(priv->netdev))
170 return -EOPNOTSUPP;
171
172 switch (type) {
173 case TC_SETUP_CLSFLOWER:
174 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
175 case TC_SETUP_CLSMATCHALL:
176 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
177 default:
178 return -EOPNOTSUPP;
179 }
180}
181
182static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
183 void *cb_priv)
184{
185 struct flow_cls_offload tmp, *f = type_data;
186 struct mlx5e_priv *priv = cb_priv;
187 struct mlx5_eswitch *esw;
188 unsigned long flags;
189 int err;
190
191 flags = MLX5_TC_FLAG(INGRESS) |
192 MLX5_TC_FLAG(ESW_OFFLOAD) |
193 MLX5_TC_FLAG(FT_OFFLOAD);
194 esw = priv->mdev->priv.eswitch;
195
196 switch (type) {
197 case TC_SETUP_CLSFLOWER:
198 memcpy(&tmp, f, sizeof(*f));
199
200 if (!mlx5_chains_prios_supported(esw_chains(esw)))
201 return -EOPNOTSUPP;
202
203
204
205
206
207
208
209
210
211
212 if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
213 return -EOPNOTSUPP;
214 if (tmp.common.chain_index != 0)
215 return -EOPNOTSUPP;
216
217 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
218 tmp.common.prio++;
219 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
220 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
221 return err;
222 default:
223 return -EOPNOTSUPP;
224 }
225}
226
227static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
228static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
229int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
230 void *type_data)
231{
232 struct mlx5e_priv *priv = netdev_priv(dev);
233 struct flow_block_offload *f = type_data;
234
235 f->unlocked_driver_cb = true;
236
237 switch (type) {
238 case TC_SETUP_BLOCK:
239 return flow_block_cb_setup_simple(type_data,
240 &mlx5e_rep_block_tc_cb_list,
241 mlx5e_rep_setup_tc_cb,
242 priv, priv, true);
243 case TC_SETUP_FT:
244 return flow_block_cb_setup_simple(type_data,
245 &mlx5e_rep_block_ft_cb_list,
246 mlx5e_rep_setup_ft_cb,
247 priv, priv, true);
248 default:
249 return -EOPNOTSUPP;
250 }
251}
252
253int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
254{
255 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
256 int err;
257
258 mutex_init(&uplink_priv->unready_flows_lock);
259 INIT_LIST_HEAD(&uplink_priv->unready_flows);
260
261
262 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
263 return err;
264}
265
266void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
267{
268
269 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
270 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
271}
272
273void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
274{
275 struct mlx5e_rep_priv *rpriv = priv->ppriv;
276
277 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
278 mlx5e_tc_reoffload_flows_work);
279}
280
281void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
282{
283 struct mlx5e_rep_priv *rpriv = priv->ppriv;
284
285 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
286}
287
288int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
289{
290 struct mlx5e_rep_priv *rpriv = priv->ppriv;
291
292 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
293
294 return NOTIFY_OK;
295}
296
297static struct mlx5e_rep_indr_block_priv *
298mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
299 struct net_device *netdev)
300{
301 struct mlx5e_rep_indr_block_priv *cb_priv;
302
303
304 ASSERT_RTNL();
305
306 list_for_each_entry(cb_priv,
307 &rpriv->uplink_priv.tc_indr_block_priv_list,
308 list)
309 if (cb_priv->netdev == netdev)
310 return cb_priv;
311
312 return NULL;
313}
314
315static int
316mlx5e_rep_indr_offload(struct net_device *netdev,
317 struct flow_cls_offload *flower,
318 struct mlx5e_rep_indr_block_priv *indr_priv,
319 unsigned long flags)
320{
321 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
322 int err = 0;
323
324 if (!netif_device_present(indr_priv->rpriv->netdev))
325 return -EOPNOTSUPP;
326
327 switch (flower->command) {
328 case FLOW_CLS_REPLACE:
329 err = mlx5e_configure_flower(netdev, priv, flower, flags);
330 break;
331 case FLOW_CLS_DESTROY:
332 err = mlx5e_delete_flower(netdev, priv, flower, flags);
333 break;
334 case FLOW_CLS_STATS:
335 err = mlx5e_stats_flower(netdev, priv, flower, flags);
336 break;
337 default:
338 err = -EOPNOTSUPP;
339 }
340
341 return err;
342}
343
344static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
345 void *type_data, void *indr_priv)
346{
347 unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
348 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
349
350 switch (type) {
351 case TC_SETUP_CLSFLOWER:
352 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
353 flags);
354 default:
355 return -EOPNOTSUPP;
356 }
357}
358
359static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
360 void *type_data, void *indr_priv)
361{
362 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
363 struct flow_cls_offload *f = type_data;
364 struct flow_cls_offload tmp;
365 struct mlx5e_priv *mpriv;
366 struct mlx5_eswitch *esw;
367 unsigned long flags;
368 int err;
369
370 mpriv = netdev_priv(priv->rpriv->netdev);
371 esw = mpriv->mdev->priv.eswitch;
372
373 flags = MLX5_TC_FLAG(EGRESS) |
374 MLX5_TC_FLAG(ESW_OFFLOAD) |
375 MLX5_TC_FLAG(FT_OFFLOAD);
376
377 switch (type) {
378 case TC_SETUP_CLSFLOWER:
379 memcpy(&tmp, f, sizeof(*f));
380
381
382
383
384
385
386
387
388
389
390 if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
391 tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
392 tmp.common.chain_index)
393 return -EOPNOTSUPP;
394
395 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
396 tmp.common.prio++;
397 err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
398 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
399 return err;
400 default:
401 return -EOPNOTSUPP;
402 }
403}
404
405static void mlx5e_rep_indr_block_unbind(void *cb_priv)
406{
407 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
408
409 list_del(&indr_priv->list);
410 kfree(indr_priv);
411}
412
413static LIST_HEAD(mlx5e_block_cb_list);
414
415static int
416mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
417 struct mlx5e_rep_priv *rpriv,
418 struct flow_block_offload *f,
419 flow_setup_cb_t *setup_cb,
420 void *data,
421 void (*cleanup)(struct flow_block_cb *block_cb))
422{
423 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
424 struct mlx5e_rep_indr_block_priv *indr_priv;
425 struct flow_block_cb *block_cb;
426
427 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
428 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
429 return -EOPNOTSUPP;
430
431 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
432 return -EOPNOTSUPP;
433
434 f->unlocked_driver_cb = true;
435 f->driver_block_list = &mlx5e_block_cb_list;
436
437 switch (f->command) {
438 case FLOW_BLOCK_BIND:
439 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
440 if (indr_priv)
441 return -EEXIST;
442
443 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
444 if (!indr_priv)
445 return -ENOMEM;
446
447 indr_priv->netdev = netdev;
448 indr_priv->rpriv = rpriv;
449 list_add(&indr_priv->list,
450 &rpriv->uplink_priv.tc_indr_block_priv_list);
451
452 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
453 mlx5e_rep_indr_block_unbind,
454 f, netdev, sch, data, rpriv,
455 cleanup);
456 if (IS_ERR(block_cb)) {
457 list_del(&indr_priv->list);
458 kfree(indr_priv);
459 return PTR_ERR(block_cb);
460 }
461 flow_block_cb_add(block_cb, f);
462 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
463
464 return 0;
465 case FLOW_BLOCK_UNBIND:
466 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
467 if (!indr_priv)
468 return -ENOENT;
469
470 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
471 if (!block_cb)
472 return -ENOENT;
473
474 flow_indr_block_cb_remove(block_cb, f);
475 list_del(&block_cb->driver_list);
476 return 0;
477 default:
478 return -EOPNOTSUPP;
479 }
480 return 0;
481}
482
483static
484int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
485 enum tc_setup_type type, void *type_data,
486 void *data,
487 void (*cleanup)(struct flow_block_cb *block_cb))
488{
489 switch (type) {
490 case TC_SETUP_BLOCK:
491 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
492 mlx5e_rep_indr_setup_tc_cb,
493 data, cleanup);
494 case TC_SETUP_FT:
495 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
496 mlx5e_rep_indr_setup_ft_cb,
497 data, cleanup);
498 default:
499 return -EOPNOTSUPP;
500 }
501}
502
503int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
504{
505 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
506
507
508 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
509
510 return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
511}
512
513void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
514{
515 flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
516 mlx5e_rep_indr_block_unbind);
517}
518
519#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
520static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
521 struct mlx5e_tc_update_priv *tc_priv,
522 u32 tunnel_id)
523{
524 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
525 struct tunnel_match_enc_opts enc_opts = {};
526 struct mlx5_rep_uplink_priv *uplink_priv;
527 struct mlx5e_rep_priv *uplink_rpriv;
528 struct metadata_dst *tun_dst;
529 struct tunnel_match_key key;
530 u32 tun_id, enc_opts_id;
531 struct net_device *dev;
532 int err;
533
534 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
535 tun_id = tunnel_id >> ENC_OPTS_BITS;
536
537 if (!tun_id)
538 return true;
539
540 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
541 uplink_priv = &uplink_rpriv->uplink_priv;
542
543 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
544 if (err) {
545 WARN_ON_ONCE(true);
546 netdev_dbg(priv->netdev,
547 "Couldn't find tunnel for tun_id: %d, err: %d\n",
548 tun_id, err);
549 return false;
550 }
551
552 if (enc_opts_id) {
553 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
554 enc_opts_id, &enc_opts);
555 if (err) {
556 netdev_dbg(priv->netdev,
557 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
558 enc_opts_id, err);
559 return false;
560 }
561 }
562
563 if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
564 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
565 key.enc_ip.tos, key.enc_ip.ttl,
566 key.enc_tp.dst, TUNNEL_KEY,
567 key32_to_tunnel_id(key.enc_key_id.keyid),
568 enc_opts.key.len);
569 } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
570 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
571 key.enc_ip.tos, key.enc_ip.ttl,
572 key.enc_tp.dst, 0, TUNNEL_KEY,
573 key32_to_tunnel_id(key.enc_key_id.keyid),
574 enc_opts.key.len);
575 } else {
576 netdev_dbg(priv->netdev,
577 "Couldn't restore tunnel, unsupported addr_type: %d\n",
578 key.enc_control.addr_type);
579 return false;
580 }
581
582 if (!tun_dst) {
583 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
584 return false;
585 }
586
587 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
588
589 if (enc_opts.key.len)
590 ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
591 enc_opts.key.data,
592 enc_opts.key.len,
593 enc_opts.key.dst_opt_type);
594
595 skb_dst_set(skb, (struct dst_entry *)tun_dst);
596 dev = dev_get_by_index(&init_net, key.filter_ifindex);
597 if (!dev) {
598 netdev_dbg(priv->netdev,
599 "Couldn't find tunnel device with ifindex: %d\n",
600 key.filter_ifindex);
601 return false;
602 }
603
604
605 tc_priv->tun_dev = dev;
606
607 skb->dev = dev;
608
609 return true;
610}
611
612static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
613 struct mlx5e_tc_update_priv *tc_priv)
614{
615 struct mlx5e_priv *priv = netdev_priv(skb->dev);
616 u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
617
618 if (chain) {
619 struct mlx5_rep_uplink_priv *uplink_priv;
620 struct mlx5e_rep_priv *uplink_rpriv;
621 struct tc_skb_ext *tc_skb_ext;
622 struct mlx5_eswitch *esw;
623 u32 zone_restore_id;
624
625 tc_skb_ext = tc_skb_ext_alloc(skb);
626 if (!tc_skb_ext) {
627 WARN_ON(1);
628 return false;
629 }
630 tc_skb_ext->chain = chain;
631 zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
632 esw = priv->mdev->priv.eswitch;
633 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
634 uplink_priv = &uplink_rpriv->uplink_priv;
635 if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
636 zone_restore_id))
637 return false;
638 }
639 return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
640}
641#endif
642
643bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
644 struct sk_buff *skb,
645 struct mlx5e_tc_update_priv *tc_priv)
646{
647 struct mlx5_mapped_obj mapped_obj;
648 struct mlx5_eswitch *esw;
649 struct mlx5e_priv *priv;
650 u32 reg_c0, reg_c1;
651 int err;
652
653 reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
654 if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
655 return true;
656
657
658
659
660 skb->mark = 0;
661
662 reg_c1 = be32_to_cpu(cqe->ft_metadata);
663
664 priv = netdev_priv(skb->dev);
665 esw = priv->mdev->priv.eswitch;
666 err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
667 if (err) {
668 netdev_dbg(priv->netdev,
669 "Couldn't find mapped object for reg_c0: %d, err: %d\n",
670 reg_c0, err);
671 return false;
672 }
673
674#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
675 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN)
676 return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv);
677#endif
678#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
679 if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
680 mlx5_esw_sample_skb(skb, &mapped_obj);
681 return false;
682 }
683#endif
684 if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE &&
685 mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) {
686 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
687 return false;
688 }
689
690 return true;
691}
692
693void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
694{
695 if (tc_priv->tun_dev)
696 dev_put(tc_priv->tun_dev);
697}
698