1
2
3
4#include <linux/skbuff.h>
5#include <net/devlink.h>
6#include <net/pkt_cls.h>
7
8#include "cmsg.h"
9#include "main.h"
10#include "../nfpcore/nfp_cpp.h"
11#include "../nfpcore/nfp_nsp.h"
12#include "../nfp_app.h"
13#include "../nfp_main.h"
14#include "../nfp_net.h"
15#include "../nfp_port.h"
16
17#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
20
21#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
23 FLOW_DIS_FIRST_FRAG)
24
25#define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 BIT(FLOW_DISSECTOR_KEY_IP))
43
44#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52
53#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
56
57#define NFP_FLOWER_MERGE_FIELDS \
58 (NFP_FLOWER_LAYER_PORT | \
59 NFP_FLOWER_LAYER_MAC | \
60 NFP_FLOWER_LAYER_TP | \
61 NFP_FLOWER_LAYER_IPV4 | \
62 NFP_FLOWER_LAYER_IPV6)
63
64struct nfp_flower_merge_check {
65 union {
66 struct {
67 __be16 tci;
68 struct nfp_flower_mac_mpls l2;
69 struct nfp_flower_tp_ports l4;
70 union {
71 struct nfp_flower_ipv4 ipv4;
72 struct nfp_flower_ipv6 ipv6;
73 };
74 };
75 unsigned long vals[8];
76 };
77};
78
79static int
80nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
81 u8 mtype)
82{
83 u32 meta_len, key_len, mask_len, act_len, tot_len;
84 struct sk_buff *skb;
85 unsigned char *msg;
86
87 meta_len = sizeof(struct nfp_fl_rule_metadata);
88 key_len = nfp_flow->meta.key_len;
89 mask_len = nfp_flow->meta.mask_len;
90 act_len = nfp_flow->meta.act_len;
91
92 tot_len = meta_len + key_len + mask_len + act_len;
93
94
95
96
97 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
98 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
99 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
100
101 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
102 if (!skb)
103 return -ENOMEM;
104
105 msg = nfp_flower_cmsg_get_data(skb);
106 memcpy(msg, &nfp_flow->meta, meta_len);
107 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
108 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
109 memcpy(&msg[meta_len + key_len + mask_len],
110 nfp_flow->action_data, act_len);
111
112
113
114
115 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
116 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
117 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
118
119 nfp_ctrl_tx(app->ctrl, skb);
120
121 return 0;
122}
123
124static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
125{
126 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
127
128 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
129 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
130 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
131 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
132}
133
134static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
135{
136 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
137
138 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
139 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
140}
141
142static int
143nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
144 u32 *key_layer_two, int *key_size,
145 struct netlink_ext_ack *extack)
146{
147 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
148 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
149 return -EOPNOTSUPP;
150 }
151
152 if (enc_opts->len > 0) {
153 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
154 *key_size += sizeof(struct nfp_flower_geneve_options);
155 }
156
157 return 0;
158}
159
160static int
161nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
162 struct flow_dissector_key_enc_opts *enc_op,
163 u32 *key_layer_two, u8 *key_layer, int *key_size,
164 struct nfp_flower_priv *priv,
165 enum nfp_flower_tun_type *tun_type,
166 struct netlink_ext_ack *extack)
167{
168 int err;
169
170 switch (enc_ports->dst) {
171 case htons(IANA_VXLAN_UDP_PORT):
172 *tun_type = NFP_FL_TUNNEL_VXLAN;
173 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
174 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
175
176 if (enc_op) {
177 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
178 return -EOPNOTSUPP;
179 }
180 break;
181 case htons(GENEVE_UDP_PORT):
182 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
183 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
184 return -EOPNOTSUPP;
185 }
186 *tun_type = NFP_FL_TUNNEL_GENEVE;
187 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
188 *key_size += sizeof(struct nfp_flower_ext_meta);
189 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
190 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
191
192 if (!enc_op)
193 break;
194 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
195 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
196 return -EOPNOTSUPP;
197 }
198 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
199 key_size, extack);
200 if (err)
201 return err;
202 break;
203 default:
204 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
205 return -EOPNOTSUPP;
206 }
207
208 return 0;
209}
210
211static int
212nfp_flower_calculate_key_layers(struct nfp_app *app,
213 struct net_device *netdev,
214 struct nfp_fl_key_ls *ret_key_ls,
215 struct flow_cls_offload *flow,
216 enum nfp_flower_tun_type *tun_type,
217 struct netlink_ext_ack *extack)
218{
219 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
220 struct flow_dissector *dissector = rule->match.dissector;
221 struct flow_match_basic basic = { NULL, NULL};
222 struct nfp_flower_priv *priv = app->priv;
223 u32 key_layer_two;
224 u8 key_layer;
225 int key_size;
226 int err;
227
228 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
229 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
230 return -EOPNOTSUPP;
231 }
232
233
234 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
235 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
236 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
237 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
238 return -EOPNOTSUPP;
239 }
240
241 key_layer_two = 0;
242 key_layer = NFP_FLOWER_LAYER_PORT;
243 key_size = sizeof(struct nfp_flower_meta_tci) +
244 sizeof(struct nfp_flower_in_port);
245
246 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
247 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
248 key_layer |= NFP_FLOWER_LAYER_MAC;
249 key_size += sizeof(struct nfp_flower_mac_mpls);
250 }
251
252 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
253 struct flow_match_vlan vlan;
254
255 flow_rule_match_vlan(rule, &vlan);
256 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
257 vlan.key->vlan_priority) {
258 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
259 return -EOPNOTSUPP;
260 }
261 }
262
263 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
264 struct flow_match_enc_opts enc_op = { NULL, NULL };
265 struct flow_match_ipv4_addrs ipv4_addrs;
266 struct flow_match_control enc_ctl;
267 struct flow_match_ports enc_ports;
268
269 flow_rule_match_enc_control(rule, &enc_ctl);
270
271 if (enc_ctl.mask->addr_type != 0xffff) {
272 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
273 return -EOPNOTSUPP;
274 }
275 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
276 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
277 return -EOPNOTSUPP;
278 }
279
280
281 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
282 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
283 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
284 return -EOPNOTSUPP;
285 }
286
287 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
288 flow_rule_match_enc_opts(rule, &enc_op);
289
290
291 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
292
293 if (netif_is_gretap(netdev)) {
294 *tun_type = NFP_FL_TUNNEL_GRE;
295 key_layer |= NFP_FLOWER_LAYER_EXT_META;
296 key_size += sizeof(struct nfp_flower_ext_meta);
297 key_layer_two |= NFP_FLOWER_LAYER2_GRE;
298 key_size +=
299 sizeof(struct nfp_flower_ipv4_gre_tun);
300
301 if (enc_op.key) {
302 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
303 return -EOPNOTSUPP;
304 }
305 } else {
306 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
307 return -EOPNOTSUPP;
308 }
309 } else {
310 flow_rule_match_enc_ports(rule, &enc_ports);
311 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
313 return -EOPNOTSUPP;
314 }
315
316 err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
317 enc_op.key,
318 &key_layer_two,
319 &key_layer,
320 &key_size, priv,
321 tun_type, extack);
322 if (err)
323 return err;
324
325
326
327
328 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
329 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
330 return -EOPNOTSUPP;
331 }
332 }
333 }
334
335 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
336 flow_rule_match_basic(rule, &basic);
337
338 if (basic.mask && basic.mask->n_proto) {
339
340 switch (basic.key->n_proto) {
341 case cpu_to_be16(ETH_P_IP):
342 key_layer |= NFP_FLOWER_LAYER_IPV4;
343 key_size += sizeof(struct nfp_flower_ipv4);
344 break;
345
346 case cpu_to_be16(ETH_P_IPV6):
347 key_layer |= NFP_FLOWER_LAYER_IPV6;
348 key_size += sizeof(struct nfp_flower_ipv6);
349 break;
350
351
352
353
354 case cpu_to_be16(ETH_P_ARP):
355 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
356 return -EOPNOTSUPP;
357
358 case cpu_to_be16(ETH_P_MPLS_UC):
359 case cpu_to_be16(ETH_P_MPLS_MC):
360 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
361 key_layer |= NFP_FLOWER_LAYER_MAC;
362 key_size += sizeof(struct nfp_flower_mac_mpls);
363 }
364 break;
365
366
367 case cpu_to_be16(ETH_P_8021Q):
368 break;
369
370 default:
371 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
372 return -EOPNOTSUPP;
373 }
374 } else if (nfp_flower_check_higher_than_mac(flow)) {
375 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
376 return -EOPNOTSUPP;
377 }
378
379 if (basic.mask && basic.mask->ip_proto) {
380 switch (basic.key->ip_proto) {
381 case IPPROTO_TCP:
382 case IPPROTO_UDP:
383 case IPPROTO_SCTP:
384 case IPPROTO_ICMP:
385 case IPPROTO_ICMPV6:
386 key_layer |= NFP_FLOWER_LAYER_TP;
387 key_size += sizeof(struct nfp_flower_tp_ports);
388 break;
389 }
390 }
391
392 if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
393 nfp_flower_check_higher_than_l3(flow)) {
394 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
395 return -EOPNOTSUPP;
396 }
397
398 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
399 struct flow_match_tcp tcp;
400 u32 tcp_flags;
401
402 flow_rule_match_tcp(rule, &tcp);
403 tcp_flags = be16_to_cpu(tcp.key->flags);
404
405 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
406 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
407 return -EOPNOTSUPP;
408 }
409
410
411
412
413 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
414 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
415 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
416 return -EOPNOTSUPP;
417 }
418
419
420
421
422
423 if (!basic.key) {
424 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
425 return -EOPNOTSUPP;
426 }
427
428 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
429 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
430 switch (basic.key->n_proto) {
431 case cpu_to_be16(ETH_P_IP):
432 key_layer |= NFP_FLOWER_LAYER_IPV4;
433 key_size += sizeof(struct nfp_flower_ipv4);
434 break;
435
436 case cpu_to_be16(ETH_P_IPV6):
437 key_layer |= NFP_FLOWER_LAYER_IPV6;
438 key_size += sizeof(struct nfp_flower_ipv6);
439 break;
440
441 default:
442 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
443 return -EOPNOTSUPP;
444 }
445 }
446 }
447
448 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
449 struct flow_match_control ctl;
450
451 flow_rule_match_control(rule, &ctl);
452 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
453 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
454 return -EOPNOTSUPP;
455 }
456 }
457
458 ret_key_ls->key_layer = key_layer;
459 ret_key_ls->key_layer_two = key_layer_two;
460 ret_key_ls->key_size = key_size;
461
462 return 0;
463}
464
465static struct nfp_fl_payload *
466nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
467{
468 struct nfp_fl_payload *flow_pay;
469
470 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
471 if (!flow_pay)
472 return NULL;
473
474 flow_pay->meta.key_len = key_layer->key_size;
475 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
476 if (!flow_pay->unmasked_data)
477 goto err_free_flow;
478
479 flow_pay->meta.mask_len = key_layer->key_size;
480 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
481 if (!flow_pay->mask_data)
482 goto err_free_unmasked;
483
484 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
485 if (!flow_pay->action_data)
486 goto err_free_mask;
487
488 flow_pay->nfp_tun_ipv4_addr = 0;
489 flow_pay->meta.flags = 0;
490 INIT_LIST_HEAD(&flow_pay->linked_flows);
491 flow_pay->in_hw = false;
492
493 return flow_pay;
494
495err_free_mask:
496 kfree(flow_pay->mask_data);
497err_free_unmasked:
498 kfree(flow_pay->unmasked_data);
499err_free_flow:
500 kfree(flow_pay);
501 return NULL;
502}
503
504static int
505nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
506 struct nfp_flower_merge_check *merge,
507 u8 *last_act_id, int *act_out)
508{
509 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
510 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
511 struct nfp_fl_set_ip4_addrs *ipv4_add;
512 struct nfp_fl_set_ipv6_addr *ipv6_add;
513 struct nfp_fl_push_vlan *push_vlan;
514 struct nfp_fl_set_tport *tport;
515 struct nfp_fl_set_eth *eth;
516 struct nfp_fl_act_head *a;
517 unsigned int act_off = 0;
518 u8 act_id = 0;
519 u8 *ports;
520 int i;
521
522 while (act_off < flow->meta.act_len) {
523 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
524 act_id = a->jump_id;
525
526 switch (act_id) {
527 case NFP_FL_ACTION_OPCODE_OUTPUT:
528 if (act_out)
529 (*act_out)++;
530 break;
531 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
532 push_vlan = (struct nfp_fl_push_vlan *)a;
533 if (push_vlan->vlan_tci)
534 merge->tci = cpu_to_be16(0xffff);
535 break;
536 case NFP_FL_ACTION_OPCODE_POP_VLAN:
537 merge->tci = cpu_to_be16(0);
538 break;
539 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
540
541 eth_broadcast_addr(&merge->l2.mac_dst[0]);
542 eth_broadcast_addr(&merge->l2.mac_src[0]);
543 memset(&merge->l4, 0xff,
544 sizeof(struct nfp_flower_tp_ports));
545 memset(&merge->ipv4, 0xff,
546 sizeof(struct nfp_flower_ipv4));
547 break;
548 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
549 eth = (struct nfp_fl_set_eth *)a;
550 for (i = 0; i < ETH_ALEN; i++)
551 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
552 for (i = 0; i < ETH_ALEN; i++)
553 merge->l2.mac_src[i] |=
554 eth->eth_addr_mask[ETH_ALEN + i];
555 break;
556 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
557 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
558 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
559 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
560 break;
561 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
562 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
563 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
564 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
565 break;
566 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
567 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
568 for (i = 0; i < 4; i++)
569 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
570 ipv6_add->ipv6[i].mask;
571 break;
572 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
573 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
574 for (i = 0; i < 4; i++)
575 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
576 ipv6_add->ipv6[i].mask;
577 break;
578 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
579 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
580 merge->ipv6.ip_ext.ttl |=
581 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
582 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
583 merge->ipv6.ipv6_flow_label_exthdr |=
584 ipv6_tc_hl_fl->ipv6_label_mask;
585 break;
586 case NFP_FL_ACTION_OPCODE_SET_UDP:
587 case NFP_FL_ACTION_OPCODE_SET_TCP:
588 tport = (struct nfp_fl_set_tport *)a;
589 ports = (u8 *)&merge->l4.port_src;
590 for (i = 0; i < 4; i++)
591 ports[i] |= tport->tp_port_mask[i];
592 break;
593 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
594 case NFP_FL_ACTION_OPCODE_PRE_LAG:
595 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
596 break;
597 default:
598 return -EOPNOTSUPP;
599 }
600
601 act_off += a->len_lw << NFP_FL_LW_SIZ;
602 }
603
604 if (last_act_id)
605 *last_act_id = act_id;
606
607 return 0;
608}
609
610static int
611nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
612 struct nfp_flower_merge_check *merge,
613 bool extra_fields)
614{
615 struct nfp_flower_meta_tci *meta_tci;
616 u8 *mask = flow->mask_data;
617 u8 key_layer, match_size;
618
619 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
620
621 meta_tci = (struct nfp_flower_meta_tci *)mask;
622 key_layer = meta_tci->nfp_flow_key_layer;
623
624 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
625 return -EOPNOTSUPP;
626
627 merge->tci = meta_tci->tci;
628 mask += sizeof(struct nfp_flower_meta_tci);
629
630 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
631 mask += sizeof(struct nfp_flower_ext_meta);
632
633 mask += sizeof(struct nfp_flower_in_port);
634
635 if (key_layer & NFP_FLOWER_LAYER_MAC) {
636 match_size = sizeof(struct nfp_flower_mac_mpls);
637 memcpy(&merge->l2, mask, match_size);
638 mask += match_size;
639 }
640
641 if (key_layer & NFP_FLOWER_LAYER_TP) {
642 match_size = sizeof(struct nfp_flower_tp_ports);
643 memcpy(&merge->l4, mask, match_size);
644 mask += match_size;
645 }
646
647 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
648 match_size = sizeof(struct nfp_flower_ipv4);
649 memcpy(&merge->ipv4, mask, match_size);
650 }
651
652 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
653 match_size = sizeof(struct nfp_flower_ipv6);
654 memcpy(&merge->ipv6, mask, match_size);
655 }
656
657 return 0;
658}
659
660static int
661nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
662 struct nfp_fl_payload *sub_flow2)
663{
664
665
666
667
668
669 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
670 int err, act_out = 0;
671 u8 last_act_id = 0;
672
673 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
674 true);
675 if (err)
676 return err;
677
678 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
679 false);
680 if (err)
681 return err;
682
683 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
684 &last_act_id, &act_out);
685 if (err)
686 return err;
687
688
689 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
690 return -EOPNOTSUPP;
691
692
693
694
695 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
696 sub_flow1_merge.vals,
697 sizeof(struct nfp_flower_merge_check) * 8);
698 if (err)
699 return -EINVAL;
700
701 return 0;
702}
703
704static unsigned int
705nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
706 bool *tunnel_act)
707{
708 unsigned int act_off = 0, act_len;
709 struct nfp_fl_act_head *a;
710 u8 act_id = 0;
711
712 while (act_off < len) {
713 a = (struct nfp_fl_act_head *)&act_src[act_off];
714 act_len = a->len_lw << NFP_FL_LW_SIZ;
715 act_id = a->jump_id;
716
717 switch (act_id) {
718 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
719 if (tunnel_act)
720 *tunnel_act = true;
721
722 case NFP_FL_ACTION_OPCODE_PRE_LAG:
723 memcpy(act_dst + act_off, act_src + act_off, act_len);
724 break;
725 default:
726 return act_off;
727 }
728
729 act_off += act_len;
730 }
731
732 return act_off;
733}
734
735static int nfp_fl_verify_post_tun_acts(char *acts, int len)
736{
737 struct nfp_fl_act_head *a;
738 unsigned int act_off = 0;
739
740 while (act_off < len) {
741 a = (struct nfp_fl_act_head *)&acts[act_off];
742 if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
743 return -EOPNOTSUPP;
744
745 act_off += a->len_lw << NFP_FL_LW_SIZ;
746 }
747
748 return 0;
749}
750
751static int
752nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
753 struct nfp_fl_payload *sub_flow2,
754 struct nfp_fl_payload *merge_flow)
755{
756 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
757 bool tunnel_act = false;
758 char *merge_act;
759 int err;
760
761
762 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
763 sub2_act_len = sub_flow2->meta.act_len;
764
765 if (!sub2_act_len)
766 return -EINVAL;
767
768 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
769 return -EINVAL;
770
771
772 if (sub1_act_len)
773 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
774 else
775 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
776
777 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
778 merge_act = merge_flow->action_data;
779
780
781 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
782 sub_flow1->action_data,
783 sub1_act_len, &tunnel_act);
784 merge_act += pre_off1;
785 sub1_act_len -= pre_off1;
786 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
787 sub_flow2->action_data,
788 sub2_act_len, NULL);
789 merge_act += pre_off2;
790 sub2_act_len -= pre_off2;
791
792
793
794
795 if (tunnel_act) {
796 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
797
798 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
799 if (err)
800 return err;
801 }
802
803
804 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
805 merge_act += sub1_act_len;
806 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
807
808 return 0;
809}
810
811
812static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
813{
814 list_del(&link->merge_flow.list);
815 list_del(&link->sub_flow.list);
816 kfree(link);
817}
818
819static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
820 struct nfp_fl_payload *sub_flow)
821{
822 struct nfp_fl_payload_link *link;
823
824 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
825 if (link->sub_flow.flow == sub_flow) {
826 nfp_flower_unlink_flow(link);
827 return;
828 }
829}
830
831static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
832 struct nfp_fl_payload *sub_flow)
833{
834 struct nfp_fl_payload_link *link;
835
836 link = kmalloc(sizeof(*link), GFP_KERNEL);
837 if (!link)
838 return -ENOMEM;
839
840 link->merge_flow.flow = merge_flow;
841 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
842 link->sub_flow.flow = sub_flow;
843 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
844
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
860 struct nfp_fl_payload *sub_flow1,
861 struct nfp_fl_payload *sub_flow2)
862{
863 struct flow_cls_offload merge_tc_off;
864 struct nfp_flower_priv *priv = app->priv;
865 struct netlink_ext_ack *extack = NULL;
866 struct nfp_fl_payload *merge_flow;
867 struct nfp_fl_key_ls merge_key_ls;
868 int err;
869
870 ASSERT_RTNL();
871
872 extack = merge_tc_off.common.extack;
873 if (sub_flow1 == sub_flow2 ||
874 nfp_flower_is_merge_flow(sub_flow1) ||
875 nfp_flower_is_merge_flow(sub_flow2))
876 return -EINVAL;
877
878 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
879 if (err)
880 return err;
881
882 merge_key_ls.key_size = sub_flow1->meta.key_len;
883
884 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
885 if (!merge_flow)
886 return -ENOMEM;
887
888 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
889 merge_flow->ingress_dev = sub_flow1->ingress_dev;
890
891 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
892 sub_flow1->meta.key_len);
893 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
894 sub_flow1->meta.mask_len);
895
896 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
897 if (err)
898 goto err_destroy_merge_flow;
899
900 err = nfp_flower_link_flows(merge_flow, sub_flow1);
901 if (err)
902 goto err_destroy_merge_flow;
903
904 err = nfp_flower_link_flows(merge_flow, sub_flow2);
905 if (err)
906 goto err_unlink_sub_flow1;
907
908 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
909 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
910 merge_flow->ingress_dev, extack);
911 if (err)
912 goto err_unlink_sub_flow2;
913
914 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
915 nfp_flower_table_params);
916 if (err)
917 goto err_release_metadata;
918
919 err = nfp_flower_xmit_flow(app, merge_flow,
920 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
921 if (err)
922 goto err_remove_rhash;
923
924 merge_flow->in_hw = true;
925 sub_flow1->in_hw = false;
926
927 return 0;
928
929err_remove_rhash:
930 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
931 &merge_flow->fl_node,
932 nfp_flower_table_params));
933err_release_metadata:
934 nfp_modify_flow_metadata(app, merge_flow);
935err_unlink_sub_flow2:
936 nfp_flower_unlink_flows(merge_flow, sub_flow2);
937err_unlink_sub_flow1:
938 nfp_flower_unlink_flows(merge_flow, sub_flow1);
939err_destroy_merge_flow:
940 kfree(merge_flow->action_data);
941 kfree(merge_flow->mask_data);
942 kfree(merge_flow->unmasked_data);
943 kfree(merge_flow);
944 return err;
945}
946
947
948
949
950
951
952
953
954
955
956
957static int
958nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
959 struct flow_cls_offload *flow)
960{
961 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
962 struct nfp_flower_priv *priv = app->priv;
963 struct netlink_ext_ack *extack = NULL;
964 struct nfp_fl_payload *flow_pay;
965 struct nfp_fl_key_ls *key_layer;
966 struct nfp_port *port = NULL;
967 int err;
968
969 extack = flow->common.extack;
970 if (nfp_netdev_is_nfp_repr(netdev))
971 port = nfp_port_from_netdev(netdev);
972
973 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
974 if (!key_layer)
975 return -ENOMEM;
976
977 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
978 &tun_type, extack);
979 if (err)
980 goto err_free_key_ls;
981
982 flow_pay = nfp_flower_allocate_new(key_layer);
983 if (!flow_pay) {
984 err = -ENOMEM;
985 goto err_free_key_ls;
986 }
987
988 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
989 flow_pay, tun_type, extack);
990 if (err)
991 goto err_destroy_flow;
992
993 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
994 if (err)
995 goto err_destroy_flow;
996
997 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
998 if (err)
999 goto err_destroy_flow;
1000
1001 flow_pay->tc_flower_cookie = flow->cookie;
1002 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1003 nfp_flower_table_params);
1004 if (err) {
1005 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1006 goto err_release_metadata;
1007 }
1008
1009 err = nfp_flower_xmit_flow(app, flow_pay,
1010 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1011 if (err)
1012 goto err_remove_rhash;
1013
1014 if (port)
1015 port->tc_offload_cnt++;
1016
1017 flow_pay->in_hw = true;
1018
1019
1020 kfree(key_layer);
1021
1022 return 0;
1023
1024err_remove_rhash:
1025 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1026 &flow_pay->fl_node,
1027 nfp_flower_table_params));
1028err_release_metadata:
1029 nfp_modify_flow_metadata(app, flow_pay);
1030err_destroy_flow:
1031 kfree(flow_pay->action_data);
1032 kfree(flow_pay->mask_data);
1033 kfree(flow_pay->unmasked_data);
1034 kfree(flow_pay);
1035err_free_key_ls:
1036 kfree(key_layer);
1037 return err;
1038}
1039
1040static void
1041nfp_flower_remove_merge_flow(struct nfp_app *app,
1042 struct nfp_fl_payload *del_sub_flow,
1043 struct nfp_fl_payload *merge_flow)
1044{
1045 struct nfp_flower_priv *priv = app->priv;
1046 struct nfp_fl_payload_link *link, *temp;
1047 struct nfp_fl_payload *origin;
1048 bool mod = false;
1049 int err;
1050
1051 link = list_first_entry(&merge_flow->linked_flows,
1052 struct nfp_fl_payload_link, merge_flow.list);
1053 origin = link->sub_flow.flow;
1054
1055
1056 if (origin != del_sub_flow)
1057 mod = true;
1058
1059 err = nfp_modify_flow_metadata(app, merge_flow);
1060 if (err) {
1061 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1062 goto err_free_links;
1063 }
1064
1065 if (!mod) {
1066 err = nfp_flower_xmit_flow(app, merge_flow,
1067 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1068 if (err) {
1069 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1070 goto err_free_links;
1071 }
1072 } else {
1073 __nfp_modify_flow_metadata(priv, origin);
1074 err = nfp_flower_xmit_flow(app, origin,
1075 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1076 if (err)
1077 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1078 origin->in_hw = true;
1079 }
1080
1081err_free_links:
1082
1083 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1084 merge_flow.list)
1085 nfp_flower_unlink_flow(link);
1086
1087 kfree(merge_flow->action_data);
1088 kfree(merge_flow->mask_data);
1089 kfree(merge_flow->unmasked_data);
1090 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1091 &merge_flow->fl_node,
1092 nfp_flower_table_params));
1093 kfree_rcu(merge_flow, rcu);
1094}
1095
1096static void
1097nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1098 struct nfp_fl_payload *sub_flow)
1099{
1100 struct nfp_fl_payload_link *link, *temp;
1101
1102
1103 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1104 sub_flow.list)
1105 nfp_flower_remove_merge_flow(app, sub_flow,
1106 link->merge_flow.flow);
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static int
1121nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1122 struct flow_cls_offload *flow)
1123{
1124 struct nfp_flower_priv *priv = app->priv;
1125 struct netlink_ext_ack *extack = NULL;
1126 struct nfp_fl_payload *nfp_flow;
1127 struct nfp_port *port = NULL;
1128 int err;
1129
1130 extack = flow->common.extack;
1131 if (nfp_netdev_is_nfp_repr(netdev))
1132 port = nfp_port_from_netdev(netdev);
1133
1134 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1135 if (!nfp_flow) {
1136 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1137 return -ENOENT;
1138 }
1139
1140 err = nfp_modify_flow_metadata(app, nfp_flow);
1141 if (err)
1142 goto err_free_merge_flow;
1143
1144 if (nfp_flow->nfp_tun_ipv4_addr)
1145 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1146
1147 if (!nfp_flow->in_hw) {
1148 err = 0;
1149 goto err_free_merge_flow;
1150 }
1151
1152 err = nfp_flower_xmit_flow(app, nfp_flow,
1153 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1154
1155
1156err_free_merge_flow:
1157 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1158 if (port)
1159 port->tc_offload_cnt--;
1160 kfree(nfp_flow->action_data);
1161 kfree(nfp_flow->mask_data);
1162 kfree(nfp_flow->unmasked_data);
1163 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1164 &nfp_flow->fl_node,
1165 nfp_flower_table_params));
1166 kfree_rcu(nfp_flow, rcu);
1167 return err;
1168}
1169
1170static void
1171__nfp_flower_update_merge_stats(struct nfp_app *app,
1172 struct nfp_fl_payload *merge_flow)
1173{
1174 struct nfp_flower_priv *priv = app->priv;
1175 struct nfp_fl_payload_link *link;
1176 struct nfp_fl_payload *sub_flow;
1177 u64 pkts, bytes, used;
1178 u32 ctx_id;
1179
1180 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1181 pkts = priv->stats[ctx_id].pkts;
1182
1183 if (!pkts)
1184 return;
1185 bytes = priv->stats[ctx_id].bytes;
1186 used = priv->stats[ctx_id].used;
1187
1188
1189 priv->stats[ctx_id].pkts = 0;
1190 priv->stats[ctx_id].bytes = 0;
1191
1192
1193
1194
1195
1196 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1197 sub_flow = link->sub_flow.flow;
1198 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1199 priv->stats[ctx_id].pkts += pkts;
1200 priv->stats[ctx_id].bytes += bytes;
1201 max_t(u64, priv->stats[ctx_id].used, used);
1202 }
1203}
1204
1205static void
1206nfp_flower_update_merge_stats(struct nfp_app *app,
1207 struct nfp_fl_payload *sub_flow)
1208{
1209 struct nfp_fl_payload_link *link;
1210
1211
1212 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1213 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static int
1228nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1229 struct flow_cls_offload *flow)
1230{
1231 struct nfp_flower_priv *priv = app->priv;
1232 struct netlink_ext_ack *extack = NULL;
1233 struct nfp_fl_payload *nfp_flow;
1234 u32 ctx_id;
1235
1236 extack = flow->common.extack;
1237 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1238 if (!nfp_flow) {
1239 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1240 return -EINVAL;
1241 }
1242
1243 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1244
1245 spin_lock_bh(&priv->stats_lock);
1246
1247 if (!list_empty(&nfp_flow->linked_flows))
1248 nfp_flower_update_merge_stats(app, nfp_flow);
1249
1250 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1251 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1252
1253 priv->stats[ctx_id].pkts = 0;
1254 priv->stats[ctx_id].bytes = 0;
1255 spin_unlock_bh(&priv->stats_lock);
1256
1257 return 0;
1258}
1259
1260static int
1261nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1262 struct flow_cls_offload *flower)
1263{
1264 if (!eth_proto_is_802_3(flower->common.protocol))
1265 return -EOPNOTSUPP;
1266
1267 switch (flower->command) {
1268 case FLOW_CLS_REPLACE:
1269 return nfp_flower_add_offload(app, netdev, flower);
1270 case FLOW_CLS_DESTROY:
1271 return nfp_flower_del_offload(app, netdev, flower);
1272 case FLOW_CLS_STATS:
1273 return nfp_flower_get_stats(app, netdev, flower);
1274 default:
1275 return -EOPNOTSUPP;
1276 }
1277}
1278
1279static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1280 void *type_data, void *cb_priv)
1281{
1282 struct nfp_repr *repr = cb_priv;
1283
1284 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1285 return -EOPNOTSUPP;
1286
1287 switch (type) {
1288 case TC_SETUP_CLSFLOWER:
1289 return nfp_flower_repr_offload(repr->app, repr->netdev,
1290 type_data);
1291 case TC_SETUP_CLSMATCHALL:
1292 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1293 type_data);
1294 default:
1295 return -EOPNOTSUPP;
1296 }
1297}
1298
1299static LIST_HEAD(nfp_block_cb_list);
1300
1301static int nfp_flower_setup_tc_block(struct net_device *netdev,
1302 struct flow_block_offload *f)
1303{
1304 struct nfp_repr *repr = netdev_priv(netdev);
1305 struct nfp_flower_repr_priv *repr_priv;
1306 struct flow_block_cb *block_cb;
1307
1308 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1309 return -EOPNOTSUPP;
1310
1311 repr_priv = repr->app_priv;
1312 repr_priv->block_shared = f->block_shared;
1313 f->driver_block_list = &nfp_block_cb_list;
1314
1315 switch (f->command) {
1316 case FLOW_BLOCK_BIND:
1317 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1318 &nfp_block_cb_list))
1319 return -EBUSY;
1320
1321 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1322 repr, repr, NULL);
1323 if (IS_ERR(block_cb))
1324 return PTR_ERR(block_cb);
1325
1326 flow_block_cb_add(block_cb, f);
1327 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1328 return 0;
1329 case FLOW_BLOCK_UNBIND:
1330 block_cb = flow_block_cb_lookup(f->block,
1331 nfp_flower_setup_tc_block_cb,
1332 repr);
1333 if (!block_cb)
1334 return -ENOENT;
1335
1336 flow_block_cb_remove(block_cb, f);
1337 list_del(&block_cb->driver_list);
1338 return 0;
1339 default:
1340 return -EOPNOTSUPP;
1341 }
1342}
1343
1344int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1345 enum tc_setup_type type, void *type_data)
1346{
1347 switch (type) {
1348 case TC_SETUP_BLOCK:
1349 return nfp_flower_setup_tc_block(netdev, type_data);
1350 default:
1351 return -EOPNOTSUPP;
1352 }
1353}
1354
1355struct nfp_flower_indr_block_cb_priv {
1356 struct net_device *netdev;
1357 struct nfp_app *app;
1358 struct list_head list;
1359};
1360
1361static struct nfp_flower_indr_block_cb_priv *
1362nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1363 struct net_device *netdev)
1364{
1365 struct nfp_flower_indr_block_cb_priv *cb_priv;
1366 struct nfp_flower_priv *priv = app->priv;
1367
1368
1369 ASSERT_RTNL();
1370
1371 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1372 if (cb_priv->netdev == netdev)
1373 return cb_priv;
1374
1375 return NULL;
1376}
1377
1378static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1379 void *type_data, void *cb_priv)
1380{
1381 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1382 struct flow_cls_offload *flower = type_data;
1383
1384 if (flower->common.chain_index)
1385 return -EOPNOTSUPP;
1386
1387 switch (type) {
1388 case TC_SETUP_CLSFLOWER:
1389 return nfp_flower_repr_offload(priv->app, priv->netdev,
1390 type_data);
1391 default:
1392 return -EOPNOTSUPP;
1393 }
1394}
1395
1396static void nfp_flower_setup_indr_tc_release(void *cb_priv)
1397{
1398 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1399
1400 list_del(&priv->list);
1401 kfree(priv);
1402}
1403
1404static int
1405nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1406 struct flow_block_offload *f)
1407{
1408 struct nfp_flower_indr_block_cb_priv *cb_priv;
1409 struct nfp_flower_priv *priv = app->priv;
1410 struct flow_block_cb *block_cb;
1411
1412 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1413 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1414 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1415 nfp_flower_internal_port_can_offload(app, netdev)))
1416 return -EOPNOTSUPP;
1417
1418 switch (f->command) {
1419 case FLOW_BLOCK_BIND:
1420 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1421 if (cb_priv &&
1422 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1423 cb_priv,
1424 &nfp_block_cb_list))
1425 return -EBUSY;
1426
1427 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1428 if (!cb_priv)
1429 return -ENOMEM;
1430
1431 cb_priv->netdev = netdev;
1432 cb_priv->app = app;
1433 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1434
1435 block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1436 cb_priv, cb_priv,
1437 nfp_flower_setup_indr_tc_release);
1438 if (IS_ERR(block_cb)) {
1439 list_del(&cb_priv->list);
1440 kfree(cb_priv);
1441 return PTR_ERR(block_cb);
1442 }
1443
1444 flow_block_cb_add(block_cb, f);
1445 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1446 return 0;
1447 case FLOW_BLOCK_UNBIND:
1448 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1449 if (!cb_priv)
1450 return -ENOENT;
1451
1452 block_cb = flow_block_cb_lookup(f->block,
1453 nfp_flower_setup_indr_block_cb,
1454 cb_priv);
1455 if (!block_cb)
1456 return -ENOENT;
1457
1458 flow_block_cb_remove(block_cb, f);
1459 list_del(&block_cb->driver_list);
1460 return 0;
1461 default:
1462 return -EOPNOTSUPP;
1463 }
1464 return 0;
1465}
1466
1467static int
1468nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1469 enum tc_setup_type type, void *type_data)
1470{
1471 switch (type) {
1472 case TC_SETUP_BLOCK:
1473 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1474 type_data);
1475 default:
1476 return -EOPNOTSUPP;
1477 }
1478}
1479
1480int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1481 struct net_device *netdev,
1482 unsigned long event)
1483{
1484 int err;
1485
1486 if (!nfp_fl_is_netdev_to_offload(netdev))
1487 return NOTIFY_OK;
1488
1489 if (event == NETDEV_REGISTER) {
1490 err = __tc_indr_block_cb_register(netdev, app,
1491 nfp_flower_indr_setup_tc_cb,
1492 app);
1493 if (err)
1494 nfp_flower_cmsg_warn(app,
1495 "Indirect block reg failed - %s\n",
1496 netdev->name);
1497 } else if (event == NETDEV_UNREGISTER) {
1498 __tc_indr_block_cb_unregister(netdev,
1499 nfp_flower_indr_setup_tc_cb, app);
1500 }
1501
1502 return NOTIFY_OK;
1503}
1504