1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/skbuff.h>
35#include <net/devlink.h>
36#include <net/pkt_cls.h>
37
38#include "cmsg.h"
39#include "main.h"
40#include "../nfpcore/nfp_cpp.h"
41#include "../nfpcore/nfp_nsp.h"
42#include "../nfp_app.h"
43#include "../nfp_main.h"
44#include "../nfp_net.h"
45#include "../nfp_port.h"
46
47#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49 TCPHDR_PSH | TCPHDR_URG)
50
51#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52 (FLOW_DIS_IS_FRAGMENT | \
53 FLOW_DIS_FIRST_FRAG)
54
55#define NFP_FLOWER_WHITELIST_DISSECTOR \
56 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60 BIT(FLOW_DISSECTOR_KEY_TCP) | \
61 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
70 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
71 BIT(FLOW_DISSECTOR_KEY_IP))
72
73#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
74 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
75 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
76 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
77 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
78 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
79 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
80
81#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
82 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
83 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
84 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
85
86static int
87nfp_flower_xmit_flow(struct net_device *netdev,
88 struct nfp_fl_payload *nfp_flow, u8 mtype)
89{
90 u32 meta_len, key_len, mask_len, act_len, tot_len;
91 struct nfp_repr *priv = netdev_priv(netdev);
92 struct sk_buff *skb;
93 unsigned char *msg;
94
95 meta_len = sizeof(struct nfp_fl_rule_metadata);
96 key_len = nfp_flow->meta.key_len;
97 mask_len = nfp_flow->meta.mask_len;
98 act_len = nfp_flow->meta.act_len;
99
100 tot_len = meta_len + key_len + mask_len + act_len;
101
102
103
104
105 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
106 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
107 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
108
109 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
110 if (!skb)
111 return -ENOMEM;
112
113 msg = nfp_flower_cmsg_get_data(skb);
114 memcpy(msg, &nfp_flow->meta, meta_len);
115 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
116 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
117 memcpy(&msg[meta_len + key_len + mask_len],
118 nfp_flow->action_data, act_len);
119
120
121
122
123 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
124 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
125 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
126
127 nfp_ctrl_tx(priv->app->ctrl, skb);
128
129 return 0;
130}
131
132static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
133{
134 return dissector_uses_key(f->dissector,
135 FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
136 dissector_uses_key(f->dissector,
137 FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
138 dissector_uses_key(f->dissector,
139 FLOW_DISSECTOR_KEY_PORTS) ||
140 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
141}
142
143static int
144nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
145 u32 *key_layer_two, int *key_size)
146{
147 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
148 return -EOPNOTSUPP;
149
150 if (enc_opts->len > 0) {
151 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
152 *key_size += sizeof(struct nfp_flower_geneve_options);
153 }
154
155 return 0;
156}
157
158static int
159nfp_flower_calculate_key_layers(struct nfp_app *app,
160 struct nfp_fl_key_ls *ret_key_ls,
161 struct tc_cls_flower_offload *flow,
162 bool egress,
163 enum nfp_flower_tun_type *tun_type)
164{
165 struct flow_dissector_key_basic *mask_basic = NULL;
166 struct flow_dissector_key_basic *key_basic = NULL;
167 struct nfp_flower_priv *priv = app->priv;
168 u32 key_layer_two;
169 u8 key_layer;
170 int key_size;
171 int err;
172
173 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
174 return -EOPNOTSUPP;
175
176
177 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
178 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
179 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
180 return -EOPNOTSUPP;
181
182 key_layer_two = 0;
183 key_layer = NFP_FLOWER_LAYER_PORT;
184 key_size = sizeof(struct nfp_flower_meta_tci) +
185 sizeof(struct nfp_flower_in_port);
186
187 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
188 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
189 key_layer |= NFP_FLOWER_LAYER_MAC;
190 key_size += sizeof(struct nfp_flower_mac_mpls);
191 }
192
193 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
194 struct flow_dissector_key_vlan *flow_vlan;
195
196 flow_vlan = skb_flow_dissector_target(flow->dissector,
197 FLOW_DISSECTOR_KEY_VLAN,
198 flow->mask);
199 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
200 flow_vlan->vlan_priority)
201 return -EOPNOTSUPP;
202 }
203
204 if (dissector_uses_key(flow->dissector,
205 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
206 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
207 struct flow_dissector_key_ports *mask_enc_ports = NULL;
208 struct flow_dissector_key_enc_opts *enc_op = NULL;
209 struct flow_dissector_key_ports *enc_ports = NULL;
210 struct flow_dissector_key_control *mask_enc_ctl =
211 skb_flow_dissector_target(flow->dissector,
212 FLOW_DISSECTOR_KEY_ENC_CONTROL,
213 flow->mask);
214 struct flow_dissector_key_control *enc_ctl =
215 skb_flow_dissector_target(flow->dissector,
216 FLOW_DISSECTOR_KEY_ENC_CONTROL,
217 flow->key);
218 if (!egress)
219 return -EOPNOTSUPP;
220
221 if (mask_enc_ctl->addr_type != 0xffff ||
222 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
223 return -EOPNOTSUPP;
224
225
226 mask_ipv4 =
227 skb_flow_dissector_target(flow->dissector,
228 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
229 flow->mask);
230 if (mask_ipv4->dst != cpu_to_be32(~0))
231 return -EOPNOTSUPP;
232
233 mask_enc_ports =
234 skb_flow_dissector_target(flow->dissector,
235 FLOW_DISSECTOR_KEY_ENC_PORTS,
236 flow->mask);
237 enc_ports =
238 skb_flow_dissector_target(flow->dissector,
239 FLOW_DISSECTOR_KEY_ENC_PORTS,
240 flow->key);
241
242 if (mask_enc_ports->dst != cpu_to_be16(~0))
243 return -EOPNOTSUPP;
244
245 if (dissector_uses_key(flow->dissector,
246 FLOW_DISSECTOR_KEY_ENC_OPTS)) {
247 enc_op = skb_flow_dissector_target(flow->dissector,
248 FLOW_DISSECTOR_KEY_ENC_OPTS,
249 flow->key);
250 }
251
252 switch (enc_ports->dst) {
253 case htons(NFP_FL_VXLAN_PORT):
254 *tun_type = NFP_FL_TUNNEL_VXLAN;
255 key_layer |= NFP_FLOWER_LAYER_VXLAN;
256 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
257
258 if (enc_op)
259 return -EOPNOTSUPP;
260 break;
261 case htons(NFP_FL_GENEVE_PORT):
262 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
263 return -EOPNOTSUPP;
264 *tun_type = NFP_FL_TUNNEL_GENEVE;
265 key_layer |= NFP_FLOWER_LAYER_EXT_META;
266 key_size += sizeof(struct nfp_flower_ext_meta);
267 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
268 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
269
270 if (!enc_op)
271 break;
272 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
273 return -EOPNOTSUPP;
274 err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
275 &key_size);
276 if (err)
277 return err;
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 } else if (egress) {
283
284 return -EOPNOTSUPP;
285 }
286
287 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
288 mask_basic = skb_flow_dissector_target(flow->dissector,
289 FLOW_DISSECTOR_KEY_BASIC,
290 flow->mask);
291
292 key_basic = skb_flow_dissector_target(flow->dissector,
293 FLOW_DISSECTOR_KEY_BASIC,
294 flow->key);
295 }
296
297 if (mask_basic && mask_basic->n_proto) {
298
299 switch (key_basic->n_proto) {
300 case cpu_to_be16(ETH_P_IP):
301 key_layer |= NFP_FLOWER_LAYER_IPV4;
302 key_size += sizeof(struct nfp_flower_ipv4);
303 break;
304
305 case cpu_to_be16(ETH_P_IPV6):
306 key_layer |= NFP_FLOWER_LAYER_IPV6;
307 key_size += sizeof(struct nfp_flower_ipv6);
308 break;
309
310
311
312
313 case cpu_to_be16(ETH_P_ARP):
314 return -EOPNOTSUPP;
315
316 case cpu_to_be16(ETH_P_MPLS_UC):
317 case cpu_to_be16(ETH_P_MPLS_MC):
318 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
319 key_layer |= NFP_FLOWER_LAYER_MAC;
320 key_size += sizeof(struct nfp_flower_mac_mpls);
321 }
322 break;
323
324
325 case cpu_to_be16(ETH_P_8021Q):
326 break;
327
328 default:
329
330
331
332 if (nfp_flower_check_higher_than_mac(flow))
333 return -EOPNOTSUPP;
334 break;
335 }
336 }
337
338 if (mask_basic && mask_basic->ip_proto) {
339
340 switch (key_basic->ip_proto) {
341 case IPPROTO_TCP:
342 case IPPROTO_UDP:
343 case IPPROTO_SCTP:
344 case IPPROTO_ICMP:
345 case IPPROTO_ICMPV6:
346 key_layer |= NFP_FLOWER_LAYER_TP;
347 key_size += sizeof(struct nfp_flower_tp_ports);
348 break;
349 default:
350
351
352
353 return -EOPNOTSUPP;
354 }
355 }
356
357 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
358 struct flow_dissector_key_tcp *tcp;
359 u32 tcp_flags;
360
361 tcp = skb_flow_dissector_target(flow->dissector,
362 FLOW_DISSECTOR_KEY_TCP,
363 flow->key);
364 tcp_flags = be16_to_cpu(tcp->flags);
365
366 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
367 return -EOPNOTSUPP;
368
369
370
371
372 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
373 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
374 return -EOPNOTSUPP;
375
376
377
378
379
380 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
381 key_layer |= NFP_FLOWER_LAYER_IPV4;
382 key_size += sizeof(struct nfp_flower_ipv4);
383 }
384 }
385
386 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
387 struct flow_dissector_key_control *key_ctl;
388
389 key_ctl = skb_flow_dissector_target(flow->dissector,
390 FLOW_DISSECTOR_KEY_CONTROL,
391 flow->key);
392
393 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
394 return -EOPNOTSUPP;
395 }
396
397 ret_key_ls->key_layer = key_layer;
398 ret_key_ls->key_layer_two = key_layer_two;
399 ret_key_ls->key_size = key_size;
400
401 return 0;
402}
403
404static struct nfp_fl_payload *
405nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
406{
407 struct nfp_fl_payload *flow_pay;
408
409 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
410 if (!flow_pay)
411 return NULL;
412
413 flow_pay->meta.key_len = key_layer->key_size;
414 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
415 if (!flow_pay->unmasked_data)
416 goto err_free_flow;
417
418 flow_pay->meta.mask_len = key_layer->key_size;
419 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
420 if (!flow_pay->mask_data)
421 goto err_free_unmasked;
422
423 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
424 if (!flow_pay->action_data)
425 goto err_free_mask;
426
427 flow_pay->nfp_tun_ipv4_addr = 0;
428 flow_pay->meta.flags = 0;
429 flow_pay->ingress_offload = !egress;
430
431 return flow_pay;
432
433err_free_mask:
434 kfree(flow_pay->mask_data);
435err_free_unmasked:
436 kfree(flow_pay->unmasked_data);
437err_free_flow:
438 kfree(flow_pay);
439 return NULL;
440}
441
442
443
444
445
446
447
448
449
450
451
452
453static int
454nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
455 struct tc_cls_flower_offload *flow, bool egress)
456{
457 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
458 struct nfp_port *port = nfp_port_from_netdev(netdev);
459 struct nfp_flower_priv *priv = app->priv;
460 struct nfp_fl_payload *flow_pay;
461 struct nfp_fl_key_ls *key_layer;
462 struct net_device *ingr_dev;
463 int err;
464
465 ingr_dev = egress ? NULL : netdev;
466 flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
467 NFP_FL_STATS_CTX_DONT_CARE);
468 if (flow_pay) {
469
470 if (flow_pay->ingress_offload && egress)
471 return 0;
472 else
473 return -EOPNOTSUPP;
474 }
475
476 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
477 if (!key_layer)
478 return -ENOMEM;
479
480 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
481 &tun_type);
482 if (err)
483 goto err_free_key_ls;
484
485 flow_pay = nfp_flower_allocate_new(key_layer, egress);
486 if (!flow_pay) {
487 err = -ENOMEM;
488 goto err_free_key_ls;
489 }
490
491 flow_pay->ingress_dev = egress ? NULL : netdev;
492
493 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
494 tun_type);
495 if (err)
496 goto err_destroy_flow;
497
498 err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
499 if (err)
500 goto err_destroy_flow;
501
502 err = nfp_compile_flow_metadata(app, flow, flow_pay,
503 flow_pay->ingress_dev);
504 if (err)
505 goto err_destroy_flow;
506
507 err = nfp_flower_xmit_flow(netdev, flow_pay,
508 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
509 if (err)
510 goto err_destroy_flow;
511
512 flow_pay->tc_flower_cookie = flow->cookie;
513 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
514 nfp_flower_table_params);
515 if (err)
516 goto err_destroy_flow;
517
518 port->tc_offload_cnt++;
519
520
521 kfree(key_layer);
522
523 return 0;
524
525err_destroy_flow:
526 kfree(flow_pay->action_data);
527 kfree(flow_pay->mask_data);
528 kfree(flow_pay->unmasked_data);
529 kfree(flow_pay);
530err_free_key_ls:
531 kfree(key_layer);
532 return err;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int
548nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
549 struct tc_cls_flower_offload *flow, bool egress)
550{
551 struct nfp_port *port = nfp_port_from_netdev(netdev);
552 struct nfp_flower_priv *priv = app->priv;
553 struct nfp_fl_payload *nfp_flow;
554 struct net_device *ingr_dev;
555 int err;
556
557 ingr_dev = egress ? NULL : netdev;
558 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
559 NFP_FL_STATS_CTX_DONT_CARE);
560 if (!nfp_flow)
561 return egress ? 0 : -ENOENT;
562
563 err = nfp_modify_flow_metadata(app, nfp_flow);
564 if (err)
565 goto err_free_flow;
566
567 if (nfp_flow->nfp_tun_ipv4_addr)
568 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
569
570 err = nfp_flower_xmit_flow(netdev, nfp_flow,
571 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
572 if (err)
573 goto err_free_flow;
574
575err_free_flow:
576 port->tc_offload_cnt--;
577 kfree(nfp_flow->action_data);
578 kfree(nfp_flow->mask_data);
579 kfree(nfp_flow->unmasked_data);
580 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
581 &nfp_flow->fl_node,
582 nfp_flower_table_params));
583 kfree_rcu(nfp_flow, rcu);
584 return err;
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599static int
600nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
601 struct tc_cls_flower_offload *flow, bool egress)
602{
603 struct nfp_flower_priv *priv = app->priv;
604 struct nfp_fl_payload *nfp_flow;
605 struct net_device *ingr_dev;
606 u32 ctx_id;
607
608 ingr_dev = egress ? NULL : netdev;
609 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
610 NFP_FL_STATS_CTX_DONT_CARE);
611 if (!nfp_flow)
612 return -EINVAL;
613
614 if (nfp_flow->ingress_offload && egress)
615 return 0;
616
617 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
618
619 spin_lock_bh(&priv->stats_lock);
620 tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
621 priv->stats[ctx_id].pkts,
622 priv->stats[ctx_id].used);
623
624 priv->stats[ctx_id].pkts = 0;
625 priv->stats[ctx_id].bytes = 0;
626 spin_unlock_bh(&priv->stats_lock);
627
628 return 0;
629}
630
631static int
632nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
633 struct tc_cls_flower_offload *flower, bool egress)
634{
635 if (!eth_proto_is_802_3(flower->common.protocol))
636 return -EOPNOTSUPP;
637
638 switch (flower->command) {
639 case TC_CLSFLOWER_REPLACE:
640 return nfp_flower_add_offload(app, netdev, flower, egress);
641 case TC_CLSFLOWER_DESTROY:
642 return nfp_flower_del_offload(app, netdev, flower, egress);
643 case TC_CLSFLOWER_STATS:
644 return nfp_flower_get_stats(app, netdev, flower, egress);
645 default:
646 return -EOPNOTSUPP;
647 }
648}
649
650int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
651 void *cb_priv)
652{
653 struct nfp_repr *repr = cb_priv;
654
655 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
656 return -EOPNOTSUPP;
657
658 switch (type) {
659 case TC_SETUP_CLSFLOWER:
660 return nfp_flower_repr_offload(repr->app, repr->netdev,
661 type_data, true);
662 default:
663 return -EOPNOTSUPP;
664 }
665}
666
667static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
668 void *type_data, void *cb_priv)
669{
670 struct nfp_repr *repr = cb_priv;
671
672 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
673 return -EOPNOTSUPP;
674
675 switch (type) {
676 case TC_SETUP_CLSFLOWER:
677 return nfp_flower_repr_offload(repr->app, repr->netdev,
678 type_data, false);
679 default:
680 return -EOPNOTSUPP;
681 }
682}
683
684static int nfp_flower_setup_tc_block(struct net_device *netdev,
685 struct tc_block_offload *f)
686{
687 struct nfp_repr *repr = netdev_priv(netdev);
688
689 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
690 return -EOPNOTSUPP;
691
692 switch (f->command) {
693 case TC_BLOCK_BIND:
694 return tcf_block_cb_register(f->block,
695 nfp_flower_setup_tc_block_cb,
696 repr, repr, f->extack);
697 case TC_BLOCK_UNBIND:
698 tcf_block_cb_unregister(f->block,
699 nfp_flower_setup_tc_block_cb,
700 repr);
701 return 0;
702 default:
703 return -EOPNOTSUPP;
704 }
705}
706
707int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
708 enum tc_setup_type type, void *type_data)
709{
710 switch (type) {
711 case TC_SETUP_BLOCK:
712 return nfp_flower_setup_tc_block(netdev, type_data);
713 default:
714 return -EOPNOTSUPP;
715 }
716}
717