1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bitfield.h>
35#include <net/pkt_cls.h>
36#include <net/switchdev.h>
37#include <net/tc_act/tc_gact.h>
38#include <net/tc_act/tc_mirred.h>
39#include <net/tc_act/tc_pedit.h>
40#include <net/tc_act/tc_vlan.h>
41#include <net/tc_act/tc_tunnel_key.h>
42
43#include "cmsg.h"
44#include "main.h"
45#include "../nfp_net_repr.h"
46
47static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
48{
49 size_t act_size = sizeof(struct nfp_fl_pop_vlan);
50
51 pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
52 pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
53 pop_vlan->reserved = 0;
54}
55
56static void
57nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
58 const struct tc_action *action)
59{
60 size_t act_size = sizeof(struct nfp_fl_push_vlan);
61 u16 tmp_push_vlan_tci;
62
63 push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
64 push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
65 push_vlan->reserved = 0;
66 push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
67
68 tmp_push_vlan_tci =
69 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
70 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
71 NFP_FL_PUSH_VLAN_CFI;
72 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
73}
74
75static int
76nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
77 struct nfp_fl_payload *nfp_flow, int act_len)
78{
79 size_t act_size = sizeof(struct nfp_fl_pre_lag);
80 struct nfp_fl_pre_lag *pre_lag;
81 struct net_device *out_dev;
82 int err;
83
84 out_dev = tcf_mirred_dev(action);
85 if (!out_dev || !netif_is_lag_master(out_dev))
86 return 0;
87
88 if (act_len + act_size > NFP_FL_MAX_A_SIZ)
89 return -EOPNOTSUPP;
90
91
92
93
94 if (act_len)
95 memmove(nfp_flow->action_data + act_size,
96 nfp_flow->action_data, act_len);
97
98 pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
99 err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
100 if (err)
101 return err;
102
103 pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
104 pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
105
106 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
107
108 return act_size;
109}
110
111static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
112 enum nfp_flower_tun_type tun_type)
113{
114 if (!out_dev->rtnl_link_ops)
115 return false;
116
117 if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
118 return tun_type == NFP_FL_TUNNEL_VXLAN;
119
120 if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
121 return tun_type == NFP_FL_TUNNEL_GENEVE;
122
123 return false;
124}
125
126static int
127nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
128 const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
129 bool last, struct net_device *in_dev,
130 enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
131{
132 size_t act_size = sizeof(struct nfp_fl_output);
133 struct nfp_flower_priv *priv = app->priv;
134 struct net_device *out_dev;
135 u16 tmp_flags;
136
137 output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
138 output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
139
140 out_dev = tcf_mirred_dev(action);
141 if (!out_dev)
142 return -EOPNOTSUPP;
143
144 tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
145
146 if (tun_type) {
147
148 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
149 return -EOPNOTSUPP;
150
151 if (*tun_out_cnt)
152 return -EOPNOTSUPP;
153 (*tun_out_cnt)++;
154
155 output->flags = cpu_to_be16(tmp_flags |
156 NFP_FL_OUT_FLAGS_USE_TUN);
157 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
158 } else if (netif_is_lag_master(out_dev) &&
159 priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
160 int gid;
161
162 output->flags = cpu_to_be16(tmp_flags);
163 gid = nfp_flower_lag_get_output_id(app, out_dev);
164 if (gid < 0)
165 return gid;
166 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
167 } else {
168
169 output->flags = cpu_to_be16(tmp_flags);
170
171
172
173
174 if (!switchdev_port_same_parent_id(in_dev, out_dev))
175 return -EOPNOTSUPP;
176 if (!nfp_netdev_is_nfp_repr(out_dev))
177 return -EOPNOTSUPP;
178
179 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
180 if (!output->port)
181 return -EOPNOTSUPP;
182 }
183 nfp_flow->meta.shortcut = output->port;
184
185 return 0;
186}
187
188static enum nfp_flower_tun_type
189nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
190 const struct tc_action *action)
191{
192 struct ip_tunnel_info *tun = tcf_tunnel_info(action);
193 struct nfp_flower_priv *priv = app->priv;
194
195 switch (tun->key.tp_dst) {
196 case htons(NFP_FL_VXLAN_PORT):
197 return NFP_FL_TUNNEL_VXLAN;
198 case htons(NFP_FL_GENEVE_PORT):
199 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
200 return NFP_FL_TUNNEL_GENEVE;
201
202 default:
203 return NFP_FL_TUNNEL_NONE;
204 }
205}
206
207static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
208{
209 size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
210 struct nfp_fl_pre_tunnel *pre_tun_act;
211
212
213
214
215 if (act_len)
216 memmove(act_data + act_size, act_data, act_len);
217
218 pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
219
220 memset(pre_tun_act, 0, act_size);
221
222 pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
223 pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
224
225 return pre_tun_act;
226}
227
228static int
229nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
230 const struct tc_action *action,
231 struct nfp_fl_pre_tunnel *pre_tun,
232 enum nfp_flower_tun_type tun_type,
233 struct net_device *netdev)
234{
235 size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
236 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
237 u32 tmp_set_ip_tun_type_index = 0;
238
239 int pretun_idx = 0;
240 struct net *net;
241
242 if (ip_tun->options_len)
243 return -EOPNOTSUPP;
244
245 net = dev_net(netdev);
246
247 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
248 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
249
250
251 tmp_set_ip_tun_type_index |=
252 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
253 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
254
255 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
256 set_tun->tun_id = ip_tun->key.tun_id;
257 set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
258
259
260 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
261
262 return 0;
263}
264
265static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
266{
267 u32 oldvalue = get_unaligned((u32 *)p_exact);
268 u32 oldmask = get_unaligned((u32 *)p_mask);
269
270 value &= mask;
271 value |= oldvalue & ~mask;
272
273 put_unaligned(oldmask | mask, (u32 *)p_mask);
274 put_unaligned(value, (u32 *)p_exact);
275}
276
277static int
278nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
279 struct nfp_fl_set_eth *set_eth)
280{
281 u32 exact, mask;
282
283 if (off + 4 > ETH_ALEN * 2)
284 return -EOPNOTSUPP;
285
286 mask = ~tcf_pedit_mask(action, idx);
287 exact = tcf_pedit_val(action, idx);
288
289 if (exact & ~mask)
290 return -EOPNOTSUPP;
291
292 nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
293 &set_eth->eth_addr_mask[off]);
294
295 set_eth->reserved = cpu_to_be16(0);
296 set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
297 set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
298
299 return 0;
300}
301
302static int
303nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
304 struct nfp_fl_set_ip4_addrs *set_ip_addr)
305{
306 __be32 exact, mask;
307
308
309 mask = (__force __be32)~tcf_pedit_mask(action, idx);
310 exact = (__force __be32)tcf_pedit_val(action, idx);
311
312 if (exact & ~mask)
313 return -EOPNOTSUPP;
314
315 switch (off) {
316 case offsetof(struct iphdr, daddr):
317 set_ip_addr->ipv4_dst_mask = mask;
318 set_ip_addr->ipv4_dst = exact;
319 break;
320 case offsetof(struct iphdr, saddr):
321 set_ip_addr->ipv4_src_mask = mask;
322 set_ip_addr->ipv4_src = exact;
323 break;
324 default:
325 return -EOPNOTSUPP;
326 }
327
328 set_ip_addr->reserved = cpu_to_be16(0);
329 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
330 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
331
332 return 0;
333}
334
335static void
336nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
337 struct nfp_fl_set_ipv6_addr *ip6)
338{
339 ip6->ipv6[idx % 4].mask = mask;
340 ip6->ipv6[idx % 4].exact = exact;
341
342 ip6->reserved = cpu_to_be16(0);
343 ip6->head.jump_id = opcode_tag;
344 ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
345}
346
347static int
348nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
349 struct nfp_fl_set_ipv6_addr *ip_dst,
350 struct nfp_fl_set_ipv6_addr *ip_src)
351{
352 __be32 exact, mask;
353
354
355 mask = (__force __be32)~tcf_pedit_mask(action, idx);
356 exact = (__force __be32)tcf_pedit_val(action, idx);
357
358 if (exact & ~mask)
359 return -EOPNOTSUPP;
360
361 if (off < offsetof(struct ipv6hdr, saddr))
362 return -EOPNOTSUPP;
363 else if (off < offsetof(struct ipv6hdr, daddr))
364 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
365 exact, mask, ip_src);
366 else if (off < offsetof(struct ipv6hdr, daddr) +
367 sizeof(struct in6_addr))
368 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
369 exact, mask, ip_dst);
370 else
371 return -EOPNOTSUPP;
372
373 return 0;
374}
375
376static int
377nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
378 struct nfp_fl_set_tport *set_tport, int opcode)
379{
380 u32 exact, mask;
381
382 if (off)
383 return -EOPNOTSUPP;
384
385 mask = ~tcf_pedit_mask(action, idx);
386 exact = tcf_pedit_val(action, idx);
387
388 if (exact & ~mask)
389 return -EOPNOTSUPP;
390
391 nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
392 set_tport->tp_port_mask);
393
394 set_tport->reserved = cpu_to_be16(0);
395 set_tport->head.jump_id = opcode;
396 set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
397
398 return 0;
399}
400
401static int
402nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
403{
404 struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
405 struct nfp_fl_set_ip4_addrs set_ip_addr;
406 struct nfp_fl_set_tport set_tport;
407 struct nfp_fl_set_eth set_eth;
408 enum pedit_header_type htype;
409 int idx, nkeys, err;
410 size_t act_size;
411 u32 offset, cmd;
412
413 memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
414 memset(&set_ip6_src, 0, sizeof(set_ip6_src));
415 memset(&set_ip_addr, 0, sizeof(set_ip_addr));
416 memset(&set_tport, 0, sizeof(set_tport));
417 memset(&set_eth, 0, sizeof(set_eth));
418 nkeys = tcf_pedit_nkeys(action);
419
420 for (idx = 0; idx < nkeys; idx++) {
421 cmd = tcf_pedit_cmd(action, idx);
422 htype = tcf_pedit_htype(action, idx);
423 offset = tcf_pedit_offset(action, idx);
424
425 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
426 return -EOPNOTSUPP;
427
428 switch (htype) {
429 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
430 err = nfp_fl_set_eth(action, idx, offset, &set_eth);
431 break;
432 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
433 err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
434 break;
435 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
436 err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
437 &set_ip6_src);
438 break;
439 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
440 err = nfp_fl_set_tport(action, idx, offset, &set_tport,
441 NFP_FL_ACTION_OPCODE_SET_TCP);
442 break;
443 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
444 err = nfp_fl_set_tport(action, idx, offset, &set_tport,
445 NFP_FL_ACTION_OPCODE_SET_UDP);
446 break;
447 default:
448 return -EOPNOTSUPP;
449 }
450 if (err)
451 return err;
452 }
453
454 if (set_eth.head.len_lw) {
455 act_size = sizeof(set_eth);
456 memcpy(nfp_action, &set_eth, act_size);
457 *a_len += act_size;
458 } else if (set_ip_addr.head.len_lw) {
459 act_size = sizeof(set_ip_addr);
460 memcpy(nfp_action, &set_ip_addr, act_size);
461 *a_len += act_size;
462 } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
463
464
465
466 act_size = sizeof(set_ip6_src);
467 memcpy(nfp_action, &set_ip6_src, act_size);
468 *a_len += act_size;
469
470 act_size = sizeof(set_ip6_dst);
471 memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
472 act_size);
473 *a_len += act_size;
474 } else if (set_ip6_dst.head.len_lw) {
475 act_size = sizeof(set_ip6_dst);
476 memcpy(nfp_action, &set_ip6_dst, act_size);
477 *a_len += act_size;
478 } else if (set_ip6_src.head.len_lw) {
479 act_size = sizeof(set_ip6_src);
480 memcpy(nfp_action, &set_ip6_src, act_size);
481 *a_len += act_size;
482 } else if (set_tport.head.len_lw) {
483 act_size = sizeof(set_tport);
484 memcpy(nfp_action, &set_tport, act_size);
485 *a_len += act_size;
486 }
487
488 return 0;
489}
490
491static int
492nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
493 struct nfp_fl_payload *nfp_fl, int *a_len,
494 struct net_device *netdev, bool last,
495 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
496 int *out_cnt)
497{
498 struct nfp_flower_priv *priv = app->priv;
499 struct nfp_fl_output *output;
500 int err, prelag_size;
501
502 if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
503 return -EOPNOTSUPP;
504
505 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
506 err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
507 tun_out_cnt);
508 if (err)
509 return err;
510
511 *a_len += sizeof(struct nfp_fl_output);
512
513 if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
514
515
516
517 prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
518 if (prelag_size < 0)
519 return prelag_size;
520 else if (prelag_size > 0 && (!last || *out_cnt))
521 return -EOPNOTSUPP;
522
523 *a_len += prelag_size;
524 }
525 (*out_cnt)++;
526
527 return 0;
528}
529
530static int
531nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
532 struct nfp_fl_payload *nfp_fl, int *a_len,
533 struct net_device *netdev,
534 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
535 int *out_cnt)
536{
537 struct nfp_fl_set_ipv4_udp_tun *set_tun;
538 struct nfp_fl_pre_tunnel *pre_tun;
539 struct nfp_fl_push_vlan *psh_v;
540 struct nfp_fl_pop_vlan *pop_v;
541 int err;
542
543 if (is_tcf_gact_shot(a)) {
544 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
545 } else if (is_tcf_mirred_egress_redirect(a)) {
546 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
547 true, tun_type, tun_out_cnt,
548 out_cnt);
549 if (err)
550 return err;
551
552 } else if (is_tcf_mirred_egress_mirror(a)) {
553 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
554 false, tun_type, tun_out_cnt,
555 out_cnt);
556 if (err)
557 return err;
558
559 } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
560 if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
561 return -EOPNOTSUPP;
562
563 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
564 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
565
566 nfp_fl_pop_vlan(pop_v);
567 *a_len += sizeof(struct nfp_fl_pop_vlan);
568 } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
569 if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
570 return -EOPNOTSUPP;
571
572 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
573 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
574
575 nfp_fl_push_vlan(psh_v, a);
576 *a_len += sizeof(struct nfp_fl_push_vlan);
577 } else if (is_tcf_tunnel_set(a)) {
578 struct nfp_repr *repr = netdev_priv(netdev);
579 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
580 if (*tun_type == NFP_FL_TUNNEL_NONE)
581 return -EOPNOTSUPP;
582
583
584
585
586
587 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
588 sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
589 return -EOPNOTSUPP;
590
591 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
592 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
593 *a_len += sizeof(struct nfp_fl_pre_tunnel);
594
595 set_tun = (void *)&nfp_fl->action_data[*a_len];
596 err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
597 netdev);
598 if (err)
599 return err;
600 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
601 } else if (is_tcf_tunnel_release(a)) {
602
603 return 0;
604 } else if (is_tcf_pedit(a)) {
605 if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
606 return -EOPNOTSUPP;
607 } else {
608
609 return -EOPNOTSUPP;
610 }
611
612 return 0;
613}
614
615int nfp_flower_compile_action(struct nfp_app *app,
616 struct tc_cls_flower_offload *flow,
617 struct net_device *netdev,
618 struct nfp_fl_payload *nfp_flow)
619{
620 int act_len, act_cnt, err, tun_out_cnt, out_cnt;
621 enum nfp_flower_tun_type tun_type;
622 const struct tc_action *a;
623 LIST_HEAD(actions);
624
625 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
626 nfp_flow->meta.act_len = 0;
627 tun_type = NFP_FL_TUNNEL_NONE;
628 act_len = 0;
629 act_cnt = 0;
630 tun_out_cnt = 0;
631 out_cnt = 0;
632
633 tcf_exts_to_list(flow->exts, &actions);
634 list_for_each_entry(a, &actions, list) {
635 err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
636 &tun_type, &tun_out_cnt, &out_cnt);
637 if (err)
638 return err;
639 act_cnt++;
640 }
641
642
643
644
645 if (act_cnt > 1)
646 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
647
648 nfp_flow->meta.act_len = act_len;
649
650 return 0;
651}
652