linux/drivers/net/ethernet/netronome/nfp/flower/action.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <net/pkt_cls.h>
   6#include <net/tc_act/tc_csum.h>
   7#include <net/tc_act/tc_gact.h>
   8#include <net/tc_act/tc_mirred.h>
   9#include <net/tc_act/tc_pedit.h>
  10#include <net/tc_act/tc_vlan.h>
  11#include <net/tc_act/tc_tunnel_key.h>
  12
  13#include "cmsg.h"
  14#include "main.h"
  15#include "../nfp_net_repr.h"
  16
  17/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
  18 * to change. Such changes will break our FW ABI.
  19 */
  20#define NFP_FL_TUNNEL_CSUM                      cpu_to_be16(0x01)
  21#define NFP_FL_TUNNEL_KEY                       cpu_to_be16(0x04)
  22#define NFP_FL_TUNNEL_GENEVE_OPT                cpu_to_be16(0x0800)
  23#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS      IP_TUNNEL_INFO_TX
  24#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS     (NFP_FL_TUNNEL_CSUM | \
  25                                                 NFP_FL_TUNNEL_KEY | \
  26                                                 NFP_FL_TUNNEL_GENEVE_OPT)
  27
  28static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
  29{
  30        size_t act_size = sizeof(struct nfp_fl_pop_vlan);
  31
  32        pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
  33        pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  34        pop_vlan->reserved = 0;
  35}
  36
  37static void
  38nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
  39                 const struct flow_action_entry *act)
  40{
  41        size_t act_size = sizeof(struct nfp_fl_push_vlan);
  42        u16 tmp_push_vlan_tci;
  43
  44        push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
  45        push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  46        push_vlan->reserved = 0;
  47        push_vlan->vlan_tpid = act->vlan.proto;
  48
  49        tmp_push_vlan_tci =
  50                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
  51                FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
  52        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
  53}
  54
  55static int
  56nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
  57               struct nfp_fl_payload *nfp_flow, int act_len,
  58               struct netlink_ext_ack *extack)
  59{
  60        size_t act_size = sizeof(struct nfp_fl_pre_lag);
  61        struct nfp_fl_pre_lag *pre_lag;
  62        struct net_device *out_dev;
  63        int err;
  64
  65        out_dev = act->dev;
  66        if (!out_dev || !netif_is_lag_master(out_dev))
  67                return 0;
  68
  69        if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
  70                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
  71                return -EOPNOTSUPP;
  72        }
  73
  74        /* Pre_lag action must be first on action list.
  75         * If other actions already exist they need pushed forward.
  76         */
  77        if (act_len)
  78                memmove(nfp_flow->action_data + act_size,
  79                        nfp_flow->action_data, act_len);
  80
  81        pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
  82        err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
  83        if (err)
  84                return err;
  85
  86        pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
  87        pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  88
  89        nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  90
  91        return act_size;
  92}
  93
  94static int
  95nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
  96              const struct flow_action_entry *act,
  97              struct nfp_fl_payload *nfp_flow,
  98              bool last, struct net_device *in_dev,
  99              enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
 100              struct netlink_ext_ack *extack)
 101{
 102        size_t act_size = sizeof(struct nfp_fl_output);
 103        struct nfp_flower_priv *priv = app->priv;
 104        struct net_device *out_dev;
 105        u16 tmp_flags;
 106
 107        output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
 108        output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 109
 110        out_dev = act->dev;
 111        if (!out_dev) {
 112                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
 113                return -EOPNOTSUPP;
 114        }
 115
 116        tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
 117
 118        if (tun_type) {
 119                /* Verify the egress netdev matches the tunnel type. */
 120                if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
 121                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
 122                        return -EOPNOTSUPP;
 123                }
 124
 125                if (*tun_out_cnt) {
 126                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
 127                        return -EOPNOTSUPP;
 128                }
 129                (*tun_out_cnt)++;
 130
 131                output->flags = cpu_to_be16(tmp_flags |
 132                                            NFP_FL_OUT_FLAGS_USE_TUN);
 133                output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
 134        } else if (netif_is_lag_master(out_dev) &&
 135                   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
 136                int gid;
 137
 138                output->flags = cpu_to_be16(tmp_flags);
 139                gid = nfp_flower_lag_get_output_id(app, out_dev);
 140                if (gid < 0) {
 141                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
 142                        return gid;
 143                }
 144                output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
 145        } else {
 146                /* Set action output parameters. */
 147                output->flags = cpu_to_be16(tmp_flags);
 148
 149                if (nfp_netdev_is_nfp_repr(in_dev)) {
 150                        /* Confirm ingress and egress are on same device. */
 151                        if (!netdev_port_same_parent_id(in_dev, out_dev)) {
 152                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
 153                                return -EOPNOTSUPP;
 154                        }
 155                }
 156
 157                if (!nfp_netdev_is_nfp_repr(out_dev)) {
 158                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
 159                        return -EOPNOTSUPP;
 160                }
 161
 162                output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
 163                if (!output->port) {
 164                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
 165                        return -EOPNOTSUPP;
 166                }
 167        }
 168        nfp_flow->meta.shortcut = output->port;
 169
 170        return 0;
 171}
 172
 173static bool
 174nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
 175{
 176        struct flow_action_entry *act = flow->rule->action.entries;
 177        int num_act = flow->rule->action.num_entries;
 178        int act_idx;
 179
 180        /* Preparse action list for next mirred or redirect action */
 181        for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
 182                if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
 183                    act[act_idx].id == FLOW_ACTION_MIRRED)
 184                        return netif_is_gretap(act[act_idx].dev);
 185
 186        return false;
 187}
 188
 189static enum nfp_flower_tun_type
 190nfp_fl_get_tun_from_act(struct nfp_app *app,
 191                        struct flow_cls_offload *flow,
 192                        const struct flow_action_entry *act, int act_idx)
 193{
 194        const struct ip_tunnel_info *tun = act->tunnel;
 195        struct nfp_flower_priv *priv = app->priv;
 196
 197        /* Determine the tunnel type based on the egress netdev
 198         * in the mirred action for tunnels without l4.
 199         */
 200        if (nfp_flower_tun_is_gre(flow, act_idx))
 201                return NFP_FL_TUNNEL_GRE;
 202
 203        switch (tun->key.tp_dst) {
 204        case htons(IANA_VXLAN_UDP_PORT):
 205                return NFP_FL_TUNNEL_VXLAN;
 206        case htons(GENEVE_UDP_PORT):
 207                if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
 208                        return NFP_FL_TUNNEL_GENEVE;
 209                /* FALLTHROUGH */
 210        default:
 211                return NFP_FL_TUNNEL_NONE;
 212        }
 213}
 214
 215static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 216{
 217        size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
 218        struct nfp_fl_pre_tunnel *pre_tun_act;
 219
 220        /* Pre_tunnel action must be first on action list.
 221         * If other actions already exist they need to be pushed forward.
 222         */
 223        if (act_len)
 224                memmove(act_data + act_size, act_data, act_len);
 225
 226        pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
 227
 228        memset(pre_tun_act, 0, act_size);
 229
 230        pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
 231        pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 232
 233        return pre_tun_act;
 234}
 235
 236static int
 237nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
 238                           const struct flow_action_entry *act,
 239                           struct netlink_ext_ack *extack)
 240{
 241        struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
 242        int opt_len, opt_cnt, act_start, tot_push_len;
 243        u8 *src = ip_tunnel_info_opts(ip_tun);
 244
 245        /* We need to populate the options in reverse order for HW.
 246         * Therefore we go through the options, calculating the
 247         * number of options and the total size, then we populate
 248         * them in reverse order in the action list.
 249         */
 250        opt_cnt = 0;
 251        tot_push_len = 0;
 252        opt_len = ip_tun->options_len;
 253        while (opt_len > 0) {
 254                struct geneve_opt *opt = (struct geneve_opt *)src;
 255
 256                opt_cnt++;
 257                if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
 258                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
 259                        return -EOPNOTSUPP;
 260                }
 261
 262                tot_push_len += sizeof(struct nfp_fl_push_geneve) +
 263                               opt->length * 4;
 264                if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
 265                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
 266                        return -EOPNOTSUPP;
 267                }
 268
 269                opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
 270                src += sizeof(struct geneve_opt) + opt->length * 4;
 271        }
 272
 273        if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
 274                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
 275                return -EOPNOTSUPP;
 276        }
 277
 278        act_start = *list_len;
 279        *list_len += tot_push_len;
 280        src = ip_tunnel_info_opts(ip_tun);
 281        while (opt_cnt) {
 282                struct geneve_opt *opt = (struct geneve_opt *)src;
 283                struct nfp_fl_push_geneve *push;
 284                size_t act_size, len;
 285
 286                opt_cnt--;
 287                act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
 288                tot_push_len -= act_size;
 289                len = act_start + tot_push_len;
 290
 291                push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
 292                push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
 293                push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 294                push->reserved = 0;
 295                push->class = opt->opt_class;
 296                push->type = opt->type;
 297                push->length = opt->length;
 298                memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
 299
 300                src += sizeof(struct geneve_opt) + opt->length * 4;
 301        }
 302
 303        return 0;
 304}
 305
 306static int
 307nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
 308                    const struct flow_action_entry *act,
 309                    struct nfp_fl_pre_tunnel *pre_tun,
 310                    enum nfp_flower_tun_type tun_type,
 311                    struct net_device *netdev, struct netlink_ext_ack *extack)
 312{
 313        size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
 314        const struct ip_tunnel_info *ip_tun = act->tunnel;
 315        struct nfp_flower_priv *priv = app->priv;
 316        u32 tmp_set_ip_tun_type_index = 0;
 317        /* Currently support one pre-tunnel so index is always 0. */
 318        int pretun_idx = 0;
 319
 320        BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
 321                     NFP_FL_TUNNEL_KEY  != TUNNEL_KEY ||
 322                     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
 323        if (ip_tun->options_len &&
 324            (tun_type != NFP_FL_TUNNEL_GENEVE ||
 325            !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
 326                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
 327                return -EOPNOTSUPP;
 328        }
 329
 330        set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
 331        set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 332
 333        /* Set tunnel type and pre-tunnel index. */
 334        tmp_set_ip_tun_type_index |=
 335                FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
 336                FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
 337
 338        set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
 339        set_tun->tun_id = ip_tun->key.tun_id;
 340
 341        if (ip_tun->key.ttl) {
 342                set_tun->ttl = ip_tun->key.ttl;
 343        } else {
 344                struct net *net = dev_net(netdev);
 345                struct flowi4 flow = {};
 346                struct rtable *rt;
 347                int err;
 348
 349                /* Do a route lookup to determine ttl - if fails then use
 350                 * default. Note that CONFIG_INET is a requirement of
 351                 * CONFIG_NET_SWITCHDEV so must be defined here.
 352                 */
 353                flow.daddr = ip_tun->key.u.ipv4.dst;
 354                flow.flowi4_proto = IPPROTO_UDP;
 355                rt = ip_route_output_key(net, &flow);
 356                err = PTR_ERR_OR_ZERO(rt);
 357                if (!err) {
 358                        set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
 359                        ip_rt_put(rt);
 360                } else {
 361                        set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
 362                }
 363        }
 364
 365        set_tun->tos = ip_tun->key.tos;
 366
 367        if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
 368            ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
 369                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
 370                return -EOPNOTSUPP;
 371        }
 372        set_tun->tun_flags = ip_tun->key.tun_flags;
 373
 374        if (tun_type == NFP_FL_TUNNEL_GENEVE) {
 375                set_tun->tun_proto = htons(ETH_P_TEB);
 376                set_tun->tun_len = ip_tun->options_len / 4;
 377        }
 378
 379        /* Complete pre_tunnel action. */
 380        pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 381
 382        return 0;
 383}
 384
 385static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
 386{
 387        u32 oldvalue = get_unaligned((u32 *)p_exact);
 388        u32 oldmask = get_unaligned((u32 *)p_mask);
 389
 390        value &= mask;
 391        value |= oldvalue & ~mask;
 392
 393        put_unaligned(oldmask | mask, (u32 *)p_mask);
 394        put_unaligned(value, (u32 *)p_exact);
 395}
 396
 397static int
 398nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
 399               struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
 400{
 401        u32 exact, mask;
 402
 403        if (off + 4 > ETH_ALEN * 2) {
 404                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
 405                return -EOPNOTSUPP;
 406        }
 407
 408        mask = ~act->mangle.mask;
 409        exact = act->mangle.val;
 410
 411        if (exact & ~mask) {
 412                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
 413                return -EOPNOTSUPP;
 414        }
 415
 416        nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
 417                            &set_eth->eth_addr_mask[off]);
 418
 419        set_eth->reserved = cpu_to_be16(0);
 420        set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
 421        set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
 422
 423        return 0;
 424}
 425
 426struct ipv4_ttl_word {
 427        __u8    ttl;
 428        __u8    protocol;
 429        __sum16 check;
 430};
 431
 432static int
 433nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
 434               struct nfp_fl_set_ip4_addrs *set_ip_addr,
 435               struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
 436               struct netlink_ext_ack *extack)
 437{
 438        struct ipv4_ttl_word *ttl_word_mask;
 439        struct ipv4_ttl_word *ttl_word;
 440        struct iphdr *tos_word_mask;
 441        struct iphdr *tos_word;
 442        __be32 exact, mask;
 443
 444        /* We are expecting tcf_pedit to return a big endian value */
 445        mask = (__force __be32)~act->mangle.mask;
 446        exact = (__force __be32)act->mangle.val;
 447
 448        if (exact & ~mask) {
 449                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
 450                return -EOPNOTSUPP;
 451        }
 452
 453        switch (off) {
 454        case offsetof(struct iphdr, daddr):
 455                set_ip_addr->ipv4_dst_mask |= mask;
 456                set_ip_addr->ipv4_dst &= ~mask;
 457                set_ip_addr->ipv4_dst |= exact & mask;
 458                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 459                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 460                                           NFP_FL_LW_SIZ;
 461                break;
 462        case offsetof(struct iphdr, saddr):
 463                set_ip_addr->ipv4_src_mask |= mask;
 464                set_ip_addr->ipv4_src &= ~mask;
 465                set_ip_addr->ipv4_src |= exact & mask;
 466                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 467                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 468                                           NFP_FL_LW_SIZ;
 469                break;
 470        case offsetof(struct iphdr, ttl):
 471                ttl_word_mask = (struct ipv4_ttl_word *)&mask;
 472                ttl_word = (struct ipv4_ttl_word *)&exact;
 473
 474                if (ttl_word_mask->protocol || ttl_word_mask->check) {
 475                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
 476                        return -EOPNOTSUPP;
 477                }
 478
 479                set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
 480                set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
 481                set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
 482                set_ip_ttl_tos->head.jump_id =
 483                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 484                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 485                                              NFP_FL_LW_SIZ;
 486                break;
 487        case round_down(offsetof(struct iphdr, tos), 4):
 488                tos_word_mask = (struct iphdr *)&mask;
 489                tos_word = (struct iphdr *)&exact;
 490
 491                if (tos_word_mask->version || tos_word_mask->ihl ||
 492                    tos_word_mask->tot_len) {
 493                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
 494                        return -EOPNOTSUPP;
 495                }
 496
 497                set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
 498                set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
 499                set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
 500                set_ip_ttl_tos->head.jump_id =
 501                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 502                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 503                                              NFP_FL_LW_SIZ;
 504                break;
 505        default:
 506                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
 507                return -EOPNOTSUPP;
 508        }
 509
 510        return 0;
 511}
 512
 513static void
 514nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
 515                      struct nfp_fl_set_ipv6_addr *ip6)
 516{
 517        ip6->ipv6[word].mask |= mask;
 518        ip6->ipv6[word].exact &= ~mask;
 519        ip6->ipv6[word].exact |= exact & mask;
 520
 521        ip6->reserved = cpu_to_be16(0);
 522        ip6->head.jump_id = opcode_tag;
 523        ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
 524}
 525
 526struct ipv6_hop_limit_word {
 527        __be16 payload_len;
 528        u8 nexthdr;
 529        u8 hop_limit;
 530};
 531
 532static int
 533nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
 534                                    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
 535                                    struct netlink_ext_ack *extack)
 536{
 537        struct ipv6_hop_limit_word *fl_hl_mask;
 538        struct ipv6_hop_limit_word *fl_hl;
 539
 540        switch (off) {
 541        case offsetof(struct ipv6hdr, payload_len):
 542                fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
 543                fl_hl = (struct ipv6_hop_limit_word *)&exact;
 544
 545                if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
 546                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
 547                        return -EOPNOTSUPP;
 548                }
 549
 550                ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
 551                ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
 552                ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
 553                                            fl_hl_mask->hop_limit;
 554                break;
 555        case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
 556                if (mask & ~IPV6_FLOW_LABEL_MASK ||
 557                    exact & ~IPV6_FLOW_LABEL_MASK) {
 558                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
 559                        return -EOPNOTSUPP;
 560                }
 561
 562                ip_hl_fl->ipv6_label_mask |= mask;
 563                ip_hl_fl->ipv6_label &= ~mask;
 564                ip_hl_fl->ipv6_label |= exact & mask;
 565                break;
 566        }
 567
 568        ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
 569        ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
 570
 571        return 0;
 572}
 573
 574static int
 575nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
 576               struct nfp_fl_set_ipv6_addr *ip_dst,
 577               struct nfp_fl_set_ipv6_addr *ip_src,
 578               struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
 579               struct netlink_ext_ack *extack)
 580{
 581        __be32 exact, mask;
 582        int err = 0;
 583        u8 word;
 584
 585        /* We are expecting tcf_pedit to return a big endian value */
 586        mask = (__force __be32)~act->mangle.mask;
 587        exact = (__force __be32)act->mangle.val;
 588
 589        if (exact & ~mask) {
 590                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
 591                return -EOPNOTSUPP;
 592        }
 593
 594        if (off < offsetof(struct ipv6hdr, saddr)) {
 595                err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
 596                                                          ip_hl_fl, extack);
 597        } else if (off < offsetof(struct ipv6hdr, daddr)) {
 598                word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
 599                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
 600                                      exact, mask, ip_src);
 601        } else if (off < offsetof(struct ipv6hdr, daddr) +
 602                       sizeof(struct in6_addr)) {
 603                word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
 604                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
 605                                      exact, mask, ip_dst);
 606        } else {
 607                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
 608                return -EOPNOTSUPP;
 609        }
 610
 611        return err;
 612}
 613
 614static int
 615nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
 616                 struct nfp_fl_set_tport *set_tport, int opcode,
 617                 struct netlink_ext_ack *extack)
 618{
 619        u32 exact, mask;
 620
 621        if (off) {
 622                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
 623                return -EOPNOTSUPP;
 624        }
 625
 626        mask = ~act->mangle.mask;
 627        exact = act->mangle.val;
 628
 629        if (exact & ~mask) {
 630                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
 631                return -EOPNOTSUPP;
 632        }
 633
 634        nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
 635                            set_tport->tp_port_mask);
 636
 637        set_tport->reserved = cpu_to_be16(0);
 638        set_tport->head.jump_id = opcode;
 639        set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
 640
 641        return 0;
 642}
 643
 644static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
 645{
 646        switch (ip_proto) {
 647        case 0:
 648                /* Filter doesn't force proto match,
 649                 * both TCP and UDP will be updated if encountered
 650                 */
 651                return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
 652        case IPPROTO_TCP:
 653                return TCA_CSUM_UPDATE_FLAG_TCP;
 654        case IPPROTO_UDP:
 655                return TCA_CSUM_UPDATE_FLAG_UDP;
 656        default:
 657                /* All other protocols will be ignored by FW */
 658                return 0;
 659        }
 660}
 661
 662struct nfp_flower_pedit_acts {
 663        struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
 664        struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
 665        struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
 666        struct nfp_fl_set_ip4_addrs set_ip_addr;
 667        struct nfp_fl_set_tport set_tport;
 668        struct nfp_fl_set_eth set_eth;
 669};
 670
 671static int
 672nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
 673                     int *a_len, struct nfp_flower_pedit_acts *set_act,
 674                     u32 *csum_updated)
 675{
 676        struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
 677        size_t act_size = 0;
 678        u8 ip_proto = 0;
 679
 680        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
 681                struct flow_match_basic match;
 682
 683                flow_rule_match_basic(rule, &match);
 684                ip_proto = match.key->ip_proto;
 685        }
 686
 687        if (set_act->set_eth.head.len_lw) {
 688                act_size = sizeof(set_act->set_eth);
 689                memcpy(nfp_action, &set_act->set_eth, act_size);
 690                *a_len += act_size;
 691        }
 692
 693        if (set_act->set_ip_ttl_tos.head.len_lw) {
 694                nfp_action += act_size;
 695                act_size = sizeof(set_act->set_ip_ttl_tos);
 696                memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
 697                *a_len += act_size;
 698
 699                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 700                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 701                                nfp_fl_csum_l4_to_flag(ip_proto);
 702        }
 703
 704        if (set_act->set_ip_addr.head.len_lw) {
 705                nfp_action += act_size;
 706                act_size = sizeof(set_act->set_ip_addr);
 707                memcpy(nfp_action, &set_act->set_ip_addr, act_size);
 708                *a_len += act_size;
 709
 710                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 711                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 712                                nfp_fl_csum_l4_to_flag(ip_proto);
 713        }
 714
 715        if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
 716                nfp_action += act_size;
 717                act_size = sizeof(set_act->set_ip6_tc_hl_fl);
 718                memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
 719                *a_len += act_size;
 720
 721                /* Hardware will automatically fix TCP/UDP checksum. */
 722                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 723        }
 724
 725        if (set_act->set_ip6_dst.head.len_lw &&
 726            set_act->set_ip6_src.head.len_lw) {
 727                /* TC compiles set src and dst IPv6 address as a single action,
 728                 * the hardware requires this to be 2 separate actions.
 729                 */
 730                nfp_action += act_size;
 731                act_size = sizeof(set_act->set_ip6_src);
 732                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 733                *a_len += act_size;
 734
 735                act_size = sizeof(set_act->set_ip6_dst);
 736                memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
 737                       &set_act->set_ip6_dst, act_size);
 738                *a_len += act_size;
 739
 740                /* Hardware will automatically fix TCP/UDP checksum. */
 741                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 742        } else if (set_act->set_ip6_dst.head.len_lw) {
 743                nfp_action += act_size;
 744                act_size = sizeof(set_act->set_ip6_dst);
 745                memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
 746                *a_len += act_size;
 747
 748                /* Hardware will automatically fix TCP/UDP checksum. */
 749                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 750        } else if (set_act->set_ip6_src.head.len_lw) {
 751                nfp_action += act_size;
 752                act_size = sizeof(set_act->set_ip6_src);
 753                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 754                *a_len += act_size;
 755
 756                /* Hardware will automatically fix TCP/UDP checksum. */
 757                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 758        }
 759        if (set_act->set_tport.head.len_lw) {
 760                nfp_action += act_size;
 761                act_size = sizeof(set_act->set_tport);
 762                memcpy(nfp_action, &set_act->set_tport, act_size);
 763                *a_len += act_size;
 764
 765                /* Hardware will automatically fix TCP/UDP checksum. */
 766                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 767        }
 768
 769        return 0;
 770}
 771
 772static int
 773nfp_fl_pedit(const struct flow_action_entry *act,
 774             struct flow_cls_offload *flow, char *nfp_action, int *a_len,
 775             u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
 776             struct netlink_ext_ack *extack)
 777{
 778        enum flow_action_mangle_base htype;
 779        u32 offset;
 780
 781        htype = act->mangle.htype;
 782        offset = act->mangle.offset;
 783
 784        switch (htype) {
 785        case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
 786                return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
 787        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
 788                return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
 789                                      &set_act->set_ip_ttl_tos, extack);
 790        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 791                return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
 792                                      &set_act->set_ip6_src,
 793                                      &set_act->set_ip6_tc_hl_fl, extack);
 794        case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 795                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 796                                        NFP_FL_ACTION_OPCODE_SET_TCP, extack);
 797        case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
 798                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 799                                        NFP_FL_ACTION_OPCODE_SET_UDP, extack);
 800        default:
 801                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
 802                return -EOPNOTSUPP;
 803        }
 804}
 805
 806static int
 807nfp_flower_output_action(struct nfp_app *app,
 808                         const struct flow_action_entry *act,
 809                         struct nfp_fl_payload *nfp_fl, int *a_len,
 810                         struct net_device *netdev, bool last,
 811                         enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 812                         int *out_cnt, u32 *csum_updated,
 813                         struct netlink_ext_ack *extack)
 814{
 815        struct nfp_flower_priv *priv = app->priv;
 816        struct nfp_fl_output *output;
 817        int err, prelag_size;
 818
 819        /* If csum_updated has not been reset by now, it means HW will
 820         * incorrectly update csums when they are not requested.
 821         */
 822        if (*csum_updated) {
 823                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
 824                return -EOPNOTSUPP;
 825        }
 826
 827        if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
 828                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
 829                return -EOPNOTSUPP;
 830        }
 831
 832        output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
 833        err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
 834                            tun_out_cnt, extack);
 835        if (err)
 836                return err;
 837
 838        *a_len += sizeof(struct nfp_fl_output);
 839
 840        if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
 841                /* nfp_fl_pre_lag returns -err or size of prelag action added.
 842                 * This will be 0 if it is not egressing to a lag dev.
 843                 */
 844                prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
 845                if (prelag_size < 0) {
 846                        return prelag_size;
 847                } else if (prelag_size > 0 && (!last || *out_cnt)) {
 848                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
 849                        return -EOPNOTSUPP;
 850                }
 851
 852                *a_len += prelag_size;
 853        }
 854        (*out_cnt)++;
 855
 856        return 0;
 857}
 858
 859static int
 860nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
 861                       struct flow_cls_offload *flow,
 862                       struct nfp_fl_payload *nfp_fl, int *a_len,
 863                       struct net_device *netdev,
 864                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 865                       int *out_cnt, u32 *csum_updated,
 866                       struct nfp_flower_pedit_acts *set_act,
 867                       struct netlink_ext_ack *extack, int act_idx)
 868{
 869        struct nfp_fl_set_ipv4_tun *set_tun;
 870        struct nfp_fl_pre_tunnel *pre_tun;
 871        struct nfp_fl_push_vlan *psh_v;
 872        struct nfp_fl_pop_vlan *pop_v;
 873        int err;
 874
 875        switch (act->id) {
 876        case FLOW_ACTION_DROP:
 877                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
 878                break;
 879        case FLOW_ACTION_REDIRECT:
 880                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
 881                                               true, tun_type, tun_out_cnt,
 882                                               out_cnt, csum_updated, extack);
 883                if (err)
 884                        return err;
 885                break;
 886        case FLOW_ACTION_MIRRED:
 887                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
 888                                               false, tun_type, tun_out_cnt,
 889                                               out_cnt, csum_updated, extack);
 890                if (err)
 891                        return err;
 892                break;
 893        case FLOW_ACTION_VLAN_POP:
 894                if (*a_len +
 895                    sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
 896                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
 897                        return -EOPNOTSUPP;
 898                }
 899
 900                pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
 901                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
 902
 903                nfp_fl_pop_vlan(pop_v);
 904                *a_len += sizeof(struct nfp_fl_pop_vlan);
 905                break;
 906        case FLOW_ACTION_VLAN_PUSH:
 907                if (*a_len +
 908                    sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
 909                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
 910                        return -EOPNOTSUPP;
 911                }
 912
 913                psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
 914                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 915
 916                nfp_fl_push_vlan(psh_v, act);
 917                *a_len += sizeof(struct nfp_fl_push_vlan);
 918                break;
 919        case FLOW_ACTION_TUNNEL_ENCAP: {
 920                const struct ip_tunnel_info *ip_tun = act->tunnel;
 921
 922                *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
 923                if (*tun_type == NFP_FL_TUNNEL_NONE) {
 924                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
 925                        return -EOPNOTSUPP;
 926                }
 927
 928                if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
 929                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
 930                        return -EOPNOTSUPP;
 931                }
 932
 933                /* Pre-tunnel action is required for tunnel encap.
 934                 * This checks for next hop entries on NFP.
 935                 * If none, the packet falls back before applying other actions.
 936                 */
 937                if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
 938                    sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
 939                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
 940                        return -EOPNOTSUPP;
 941                }
 942
 943                pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
 944                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 945                *a_len += sizeof(struct nfp_fl_pre_tunnel);
 946
 947                err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
 948                if (err)
 949                        return err;
 950
 951                set_tun = (void *)&nfp_fl->action_data[*a_len];
 952                err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
 953                                          *tun_type, netdev, extack);
 954                if (err)
 955                        return err;
 956                *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
 957                }
 958                break;
 959        case FLOW_ACTION_TUNNEL_DECAP:
 960                /* Tunnel decap is handled by default so accept action. */
 961                return 0;
 962        case FLOW_ACTION_MANGLE:
 963                if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
 964                                 a_len, csum_updated, set_act, extack))
 965                        return -EOPNOTSUPP;
 966                break;
 967        case FLOW_ACTION_CSUM:
 968                /* csum action requests recalc of something we have not fixed */
 969                if (act->csum_flags & ~*csum_updated) {
 970                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
 971                        return -EOPNOTSUPP;
 972                }
 973                /* If we will correctly fix the csum we can remove it from the
 974                 * csum update list. Which will later be used to check support.
 975                 */
 976                *csum_updated &= ~act->csum_flags;
 977                break;
 978        default:
 979                /* Currently we do not handle any other actions. */
 980                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
 981                return -EOPNOTSUPP;
 982        }
 983
 984        return 0;
 985}
 986
 987static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
 988                                      int current_act_idx)
 989{
 990        struct flow_action_entry current_act;
 991        struct flow_action_entry prev_act;
 992
 993        current_act = flow_act->entries[current_act_idx];
 994        if (current_act.id != FLOW_ACTION_MANGLE)
 995                return false;
 996
 997        if (current_act_idx == 0)
 998                return true;
 999
1000        prev_act = flow_act->entries[current_act_idx - 1];
1001
1002        return prev_act.id != FLOW_ACTION_MANGLE;
1003}
1004
1005static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
1006                                    int current_act_idx)
1007{
1008        struct flow_action_entry current_act;
1009        struct flow_action_entry next_act;
1010
1011        current_act = flow_act->entries[current_act_idx];
1012        if (current_act.id != FLOW_ACTION_MANGLE)
1013                return false;
1014
1015        if (current_act_idx == flow_act->num_entries)
1016                return true;
1017
1018        next_act = flow_act->entries[current_act_idx + 1];
1019
1020        return next_act.id != FLOW_ACTION_MANGLE;
1021}
1022
1023int nfp_flower_compile_action(struct nfp_app *app,
1024                              struct flow_cls_offload *flow,
1025                              struct net_device *netdev,
1026                              struct nfp_fl_payload *nfp_flow,
1027                              struct netlink_ext_ack *extack)
1028{
1029        int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
1030        struct nfp_flower_pedit_acts set_act;
1031        enum nfp_flower_tun_type tun_type;
1032        struct flow_action_entry *act;
1033        u32 csum_updated = 0;
1034
1035        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
1036        nfp_flow->meta.act_len = 0;
1037        tun_type = NFP_FL_TUNNEL_NONE;
1038        act_len = 0;
1039        act_cnt = 0;
1040        tun_out_cnt = 0;
1041        out_cnt = 0;
1042
1043        flow_action_for_each(i, act, &flow->rule->action) {
1044                if (nfp_fl_check_mangle_start(&flow->rule->action, i))
1045                        memset(&set_act, 0, sizeof(set_act));
1046                err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
1047                                             netdev, &tun_type, &tun_out_cnt,
1048                                             &out_cnt, &csum_updated,
1049                                             &set_act, extack, i);
1050                if (err)
1051                        return err;
1052                act_cnt++;
1053                if (nfp_fl_check_mangle_end(&flow->rule->action, i))
1054                        nfp_fl_commit_mangle(flow,
1055                                             &nfp_flow->action_data[act_len],
1056                                             &act_len, &set_act, &csum_updated);
1057        }
1058
1059        /* We optimise when the action list is small, this can unfortunately
1060         * not happen once we have more than one action in the action list.
1061         */
1062        if (act_cnt > 1)
1063                nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1064
1065        nfp_flow->meta.act_len = act_len;
1066
1067        return 0;
1068}
1069