linux/drivers/net/ethernet/netronome/nfp/flower/action.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <linux/mpls.h>
   6#include <net/pkt_cls.h>
   7#include <net/tc_act/tc_csum.h>
   8#include <net/tc_act/tc_gact.h>
   9#include <net/tc_act/tc_mirred.h>
  10#include <net/tc_act/tc_mpls.h>
  11#include <net/tc_act/tc_pedit.h>
  12#include <net/tc_act/tc_vlan.h>
  13#include <net/tc_act/tc_tunnel_key.h>
  14
  15#include "cmsg.h"
  16#include "main.h"
  17#include "../nfp_net_repr.h"
  18
  19/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
  20 * to change. Such changes will break our FW ABI.
  21 */
  22#define NFP_FL_TUNNEL_CSUM                      cpu_to_be16(0x01)
  23#define NFP_FL_TUNNEL_KEY                       cpu_to_be16(0x04)
  24#define NFP_FL_TUNNEL_GENEVE_OPT                cpu_to_be16(0x0800)
  25#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS      (IP_TUNNEL_INFO_TX | \
  26                                                 IP_TUNNEL_INFO_IPV6)
  27#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS          (NFP_FL_TUNNEL_CSUM | \
  28                                                 NFP_FL_TUNNEL_KEY | \
  29                                                 NFP_FL_TUNNEL_GENEVE_OPT)
  30
  31static int
  32nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
  33                 const struct flow_action_entry *act,
  34                 struct netlink_ext_ack *extack)
  35{
  36        size_t act_size = sizeof(struct nfp_fl_push_mpls);
  37        u32 mpls_lse = 0;
  38
  39        push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
  40        push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  41
  42        /* BOS is optional in the TC action but required for offload. */
  43        if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
  44                mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
  45        } else {
  46                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
  47                return -EOPNOTSUPP;
  48        }
  49
  50        /* Leave MPLS TC as a default value of 0 if not explicitly set. */
  51        if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
  52                mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
  53
  54        /* Proto, label and TTL are enforced and verified for MPLS push. */
  55        mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
  56        mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
  57        push_mpls->ethtype = act->mpls_push.proto;
  58        push_mpls->lse = cpu_to_be32(mpls_lse);
  59
  60        return 0;
  61}
  62
  63static void
  64nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
  65                const struct flow_action_entry *act)
  66{
  67        size_t act_size = sizeof(struct nfp_fl_pop_mpls);
  68
  69        pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
  70        pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  71        pop_mpls->ethtype = act->mpls_pop.proto;
  72}
  73
  74static void
  75nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
  76                const struct flow_action_entry *act)
  77{
  78        size_t act_size = sizeof(struct nfp_fl_set_mpls);
  79        u32 mpls_lse = 0, mpls_mask = 0;
  80
  81        set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
  82        set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  83
  84        if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
  85                mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
  86                mpls_mask |= MPLS_LS_LABEL_MASK;
  87        }
  88        if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
  89                mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
  90                mpls_mask |= MPLS_LS_TC_MASK;
  91        }
  92        if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
  93                mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
  94                mpls_mask |= MPLS_LS_S_MASK;
  95        }
  96        if (act->mpls_mangle.ttl) {
  97                mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
  98                mpls_mask |= MPLS_LS_TTL_MASK;
  99        }
 100
 101        set_mpls->lse = cpu_to_be32(mpls_lse);
 102        set_mpls->lse_mask = cpu_to_be32(mpls_mask);
 103}
 104
 105static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
 106{
 107        size_t act_size = sizeof(struct nfp_fl_pop_vlan);
 108
 109        pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
 110        pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 111        pop_vlan->reserved = 0;
 112}
 113
 114static void
 115nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 116                 const struct flow_action_entry *act)
 117{
 118        size_t act_size = sizeof(struct nfp_fl_push_vlan);
 119        u16 tmp_push_vlan_tci;
 120
 121        push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
 122        push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 123        push_vlan->reserved = 0;
 124        push_vlan->vlan_tpid = act->vlan.proto;
 125
 126        tmp_push_vlan_tci =
 127                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
 128                FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
 129        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 130}
 131
 132static int
 133nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
 134               struct nfp_fl_payload *nfp_flow, int act_len,
 135               struct netlink_ext_ack *extack)
 136{
 137        size_t act_size = sizeof(struct nfp_fl_pre_lag);
 138        struct nfp_fl_pre_lag *pre_lag;
 139        struct net_device *out_dev;
 140        int err;
 141
 142        out_dev = act->dev;
 143        if (!out_dev || !netif_is_lag_master(out_dev))
 144                return 0;
 145
 146        if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
 147                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
 148                return -EOPNOTSUPP;
 149        }
 150
 151        /* Pre_lag action must be first on action list.
 152         * If other actions already exist they need pushed forward.
 153         */
 154        if (act_len)
 155                memmove(nfp_flow->action_data + act_size,
 156                        nfp_flow->action_data, act_len);
 157
 158        pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
 159        err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
 160        if (err)
 161                return err;
 162
 163        pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
 164        pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 165
 166        nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 167
 168        return act_size;
 169}
 170
 171static int
 172nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
 173              const struct flow_action_entry *act,
 174              struct nfp_fl_payload *nfp_flow,
 175              bool last, struct net_device *in_dev,
 176              enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
 177              bool pkt_host, struct netlink_ext_ack *extack)
 178{
 179        size_t act_size = sizeof(struct nfp_fl_output);
 180        struct nfp_flower_priv *priv = app->priv;
 181        struct net_device *out_dev;
 182        u16 tmp_flags;
 183
 184        output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
 185        output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 186
 187        out_dev = act->dev;
 188        if (!out_dev) {
 189                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
 190                return -EOPNOTSUPP;
 191        }
 192
 193        tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
 194
 195        if (tun_type) {
 196                /* Verify the egress netdev matches the tunnel type. */
 197                if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
 198                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
 199                        return -EOPNOTSUPP;
 200                }
 201
 202                if (*tun_out_cnt) {
 203                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
 204                        return -EOPNOTSUPP;
 205                }
 206                (*tun_out_cnt)++;
 207
 208                output->flags = cpu_to_be16(tmp_flags |
 209                                            NFP_FL_OUT_FLAGS_USE_TUN);
 210                output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
 211        } else if (netif_is_lag_master(out_dev) &&
 212                   priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
 213                int gid;
 214
 215                output->flags = cpu_to_be16(tmp_flags);
 216                gid = nfp_flower_lag_get_output_id(app, out_dev);
 217                if (gid < 0) {
 218                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
 219                        return gid;
 220                }
 221                output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
 222        } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
 223                if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
 224                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
 225                        return -EOPNOTSUPP;
 226                }
 227
 228                if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
 229                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
 230                        return -EOPNOTSUPP;
 231                }
 232
 233                nfp_flow->pre_tun_rule.dev = out_dev;
 234
 235                return 0;
 236        } else {
 237                /* Set action output parameters. */
 238                output->flags = cpu_to_be16(tmp_flags);
 239
 240                if (nfp_netdev_is_nfp_repr(in_dev)) {
 241                        /* Confirm ingress and egress are on same device. */
 242                        if (!netdev_port_same_parent_id(in_dev, out_dev)) {
 243                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
 244                                return -EOPNOTSUPP;
 245                        }
 246                }
 247
 248                if (!nfp_netdev_is_nfp_repr(out_dev)) {
 249                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
 250                        return -EOPNOTSUPP;
 251                }
 252
 253                output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
 254                if (!output->port) {
 255                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
 256                        return -EOPNOTSUPP;
 257                }
 258        }
 259        nfp_flow->meta.shortcut = output->port;
 260
 261        return 0;
 262}
 263
 264static bool
 265nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
 266{
 267        struct flow_action_entry *act = rule->action.entries;
 268        int num_act = rule->action.num_entries;
 269        int act_idx;
 270
 271        /* Preparse action list for next mirred or redirect action */
 272        for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
 273                if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
 274                    act[act_idx].id == FLOW_ACTION_MIRRED)
 275                        return netif_is_gretap(act[act_idx].dev);
 276
 277        return false;
 278}
 279
 280static enum nfp_flower_tun_type
 281nfp_fl_get_tun_from_act(struct nfp_app *app,
 282                        struct flow_rule *rule,
 283                        const struct flow_action_entry *act, int act_idx)
 284{
 285        const struct ip_tunnel_info *tun = act->tunnel;
 286        struct nfp_flower_priv *priv = app->priv;
 287
 288        /* Determine the tunnel type based on the egress netdev
 289         * in the mirred action for tunnels without l4.
 290         */
 291        if (nfp_flower_tun_is_gre(rule, act_idx))
 292                return NFP_FL_TUNNEL_GRE;
 293
 294        switch (tun->key.tp_dst) {
 295        case htons(IANA_VXLAN_UDP_PORT):
 296                return NFP_FL_TUNNEL_VXLAN;
 297        case htons(GENEVE_UDP_PORT):
 298                if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
 299                        return NFP_FL_TUNNEL_GENEVE;
 300                fallthrough;
 301        default:
 302                return NFP_FL_TUNNEL_NONE;
 303        }
 304}
 305
 306static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 307{
 308        size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
 309        struct nfp_fl_pre_tunnel *pre_tun_act;
 310
 311        /* Pre_tunnel action must be first on action list.
 312         * If other actions already exist they need to be pushed forward.
 313         */
 314        if (act_len)
 315                memmove(act_data + act_size, act_data, act_len);
 316
 317        pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
 318
 319        memset(pre_tun_act, 0, act_size);
 320
 321        pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
 322        pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 323
 324        return pre_tun_act;
 325}
 326
 327static int
 328nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
 329                           const struct flow_action_entry *act,
 330                           struct netlink_ext_ack *extack)
 331{
 332        struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
 333        int opt_len, opt_cnt, act_start, tot_push_len;
 334        u8 *src = ip_tunnel_info_opts(ip_tun);
 335
 336        /* We need to populate the options in reverse order for HW.
 337         * Therefore we go through the options, calculating the
 338         * number of options and the total size, then we populate
 339         * them in reverse order in the action list.
 340         */
 341        opt_cnt = 0;
 342        tot_push_len = 0;
 343        opt_len = ip_tun->options_len;
 344        while (opt_len > 0) {
 345                struct geneve_opt *opt = (struct geneve_opt *)src;
 346
 347                opt_cnt++;
 348                if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
 349                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
 350                        return -EOPNOTSUPP;
 351                }
 352
 353                tot_push_len += sizeof(struct nfp_fl_push_geneve) +
 354                               opt->length * 4;
 355                if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
 356                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
 357                        return -EOPNOTSUPP;
 358                }
 359
 360                opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
 361                src += sizeof(struct geneve_opt) + opt->length * 4;
 362        }
 363
 364        if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
 365                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
 366                return -EOPNOTSUPP;
 367        }
 368
 369        act_start = *list_len;
 370        *list_len += tot_push_len;
 371        src = ip_tunnel_info_opts(ip_tun);
 372        while (opt_cnt) {
 373                struct geneve_opt *opt = (struct geneve_opt *)src;
 374                struct nfp_fl_push_geneve *push;
 375                size_t act_size, len;
 376
 377                opt_cnt--;
 378                act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
 379                tot_push_len -= act_size;
 380                len = act_start + tot_push_len;
 381
 382                push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
 383                push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
 384                push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 385                push->reserved = 0;
 386                push->class = opt->opt_class;
 387                push->type = opt->type;
 388                push->length = opt->length;
 389                memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
 390
 391                src += sizeof(struct geneve_opt) + opt->length * 4;
 392        }
 393
 394        return 0;
 395}
 396
 397static int
 398nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
 399               const struct flow_action_entry *act,
 400               struct nfp_fl_pre_tunnel *pre_tun,
 401               enum nfp_flower_tun_type tun_type,
 402               struct net_device *netdev, struct netlink_ext_ack *extack)
 403{
 404        const struct ip_tunnel_info *ip_tun = act->tunnel;
 405        bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
 406        size_t act_size = sizeof(struct nfp_fl_set_tun);
 407        struct nfp_flower_priv *priv = app->priv;
 408        u32 tmp_set_ip_tun_type_index = 0;
 409        /* Currently support one pre-tunnel so index is always 0. */
 410        int pretun_idx = 0;
 411
 412        if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
 413                return -EOPNOTSUPP;
 414
 415        if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
 416                return -EOPNOTSUPP;
 417
 418        BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
 419                     NFP_FL_TUNNEL_KEY  != TUNNEL_KEY ||
 420                     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
 421        if (ip_tun->options_len &&
 422            (tun_type != NFP_FL_TUNNEL_GENEVE ||
 423            !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
 424                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
 425                return -EOPNOTSUPP;
 426        }
 427
 428        set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
 429        set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 430
 431        /* Set tunnel type and pre-tunnel index. */
 432        tmp_set_ip_tun_type_index |=
 433                FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
 434                FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
 435
 436        set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
 437        set_tun->tun_id = ip_tun->key.tun_id;
 438
 439        if (ip_tun->key.ttl) {
 440                set_tun->ttl = ip_tun->key.ttl;
 441#ifdef CONFIG_IPV6
 442        } else if (ipv6) {
 443                struct net *net = dev_net(netdev);
 444                struct flowi6 flow = {};
 445                struct dst_entry *dst;
 446
 447                flow.daddr = ip_tun->key.u.ipv6.dst;
 448                flow.flowi4_proto = IPPROTO_UDP;
 449                dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
 450                if (!IS_ERR(dst)) {
 451                        set_tun->ttl = ip6_dst_hoplimit(dst);
 452                        dst_release(dst);
 453                } else {
 454                        set_tun->ttl = net->ipv6.devconf_all->hop_limit;
 455                }
 456#endif
 457        } else {
 458                struct net *net = dev_net(netdev);
 459                struct flowi4 flow = {};
 460                struct rtable *rt;
 461                int err;
 462
 463                /* Do a route lookup to determine ttl - if fails then use
 464                 * default. Note that CONFIG_INET is a requirement of
 465                 * CONFIG_NET_SWITCHDEV so must be defined here.
 466                 */
 467                flow.daddr = ip_tun->key.u.ipv4.dst;
 468                flow.flowi4_proto = IPPROTO_UDP;
 469                rt = ip_route_output_key(net, &flow);
 470                err = PTR_ERR_OR_ZERO(rt);
 471                if (!err) {
 472                        set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
 473                        ip_rt_put(rt);
 474                } else {
 475                        set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
 476                }
 477        }
 478
 479        set_tun->tos = ip_tun->key.tos;
 480
 481        if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
 482            ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
 483                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
 484                return -EOPNOTSUPP;
 485        }
 486        set_tun->tun_flags = ip_tun->key.tun_flags;
 487
 488        if (tun_type == NFP_FL_TUNNEL_GENEVE) {
 489                set_tun->tun_proto = htons(ETH_P_TEB);
 490                set_tun->tun_len = ip_tun->options_len / 4;
 491        }
 492
 493        /* Complete pre_tunnel action. */
 494        if (ipv6) {
 495                pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
 496                pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
 497        } else {
 498                pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 499        }
 500
 501        return 0;
 502}
 503
 504static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
 505{
 506        u32 oldvalue = get_unaligned((u32 *)p_exact);
 507        u32 oldmask = get_unaligned((u32 *)p_mask);
 508
 509        value &= mask;
 510        value |= oldvalue & ~mask;
 511
 512        put_unaligned(oldmask | mask, (u32 *)p_mask);
 513        put_unaligned(value, (u32 *)p_exact);
 514}
 515
 516static int
 517nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
 518               struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
 519{
 520        u32 exact, mask;
 521
 522        if (off + 4 > ETH_ALEN * 2) {
 523                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
 524                return -EOPNOTSUPP;
 525        }
 526
 527        mask = ~act->mangle.mask;
 528        exact = act->mangle.val;
 529
 530        if (exact & ~mask) {
 531                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
 532                return -EOPNOTSUPP;
 533        }
 534
 535        nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
 536                            &set_eth->eth_addr_mask[off]);
 537
 538        set_eth->reserved = cpu_to_be16(0);
 539        set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
 540        set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
 541
 542        return 0;
 543}
 544
 545struct ipv4_ttl_word {
 546        __u8    ttl;
 547        __u8    protocol;
 548        __sum16 check;
 549};
 550
 551static int
 552nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
 553               struct nfp_fl_set_ip4_addrs *set_ip_addr,
 554               struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
 555               struct netlink_ext_ack *extack)
 556{
 557        struct ipv4_ttl_word *ttl_word_mask;
 558        struct ipv4_ttl_word *ttl_word;
 559        struct iphdr *tos_word_mask;
 560        struct iphdr *tos_word;
 561        __be32 exact, mask;
 562
 563        /* We are expecting tcf_pedit to return a big endian value */
 564        mask = (__force __be32)~act->mangle.mask;
 565        exact = (__force __be32)act->mangle.val;
 566
 567        if (exact & ~mask) {
 568                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
 569                return -EOPNOTSUPP;
 570        }
 571
 572        switch (off) {
 573        case offsetof(struct iphdr, daddr):
 574                set_ip_addr->ipv4_dst_mask |= mask;
 575                set_ip_addr->ipv4_dst &= ~mask;
 576                set_ip_addr->ipv4_dst |= exact & mask;
 577                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 578                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 579                                           NFP_FL_LW_SIZ;
 580                break;
 581        case offsetof(struct iphdr, saddr):
 582                set_ip_addr->ipv4_src_mask |= mask;
 583                set_ip_addr->ipv4_src &= ~mask;
 584                set_ip_addr->ipv4_src |= exact & mask;
 585                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 586                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 587                                           NFP_FL_LW_SIZ;
 588                break;
 589        case offsetof(struct iphdr, ttl):
 590                ttl_word_mask = (struct ipv4_ttl_word *)&mask;
 591                ttl_word = (struct ipv4_ttl_word *)&exact;
 592
 593                if (ttl_word_mask->protocol || ttl_word_mask->check) {
 594                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
 595                        return -EOPNOTSUPP;
 596                }
 597
 598                set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
 599                set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
 600                set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
 601                set_ip_ttl_tos->head.jump_id =
 602                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 603                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 604                                              NFP_FL_LW_SIZ;
 605                break;
 606        case round_down(offsetof(struct iphdr, tos), 4):
 607                tos_word_mask = (struct iphdr *)&mask;
 608                tos_word = (struct iphdr *)&exact;
 609
 610                if (tos_word_mask->version || tos_word_mask->ihl ||
 611                    tos_word_mask->tot_len) {
 612                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
 613                        return -EOPNOTSUPP;
 614                }
 615
 616                set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
 617                set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
 618                set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
 619                set_ip_ttl_tos->head.jump_id =
 620                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 621                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 622                                              NFP_FL_LW_SIZ;
 623                break;
 624        default:
 625                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
 626                return -EOPNOTSUPP;
 627        }
 628
 629        return 0;
 630}
 631
 632static void
 633nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
 634                      struct nfp_fl_set_ipv6_addr *ip6)
 635{
 636        ip6->ipv6[word].mask |= mask;
 637        ip6->ipv6[word].exact &= ~mask;
 638        ip6->ipv6[word].exact |= exact & mask;
 639
 640        ip6->reserved = cpu_to_be16(0);
 641        ip6->head.jump_id = opcode_tag;
 642        ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
 643}
 644
 645struct ipv6_hop_limit_word {
 646        __be16 payload_len;
 647        u8 nexthdr;
 648        u8 hop_limit;
 649};
 650
 651static int
 652nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
 653                                    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
 654                                    struct netlink_ext_ack *extack)
 655{
 656        struct ipv6_hop_limit_word *fl_hl_mask;
 657        struct ipv6_hop_limit_word *fl_hl;
 658
 659        switch (off) {
 660        case offsetof(struct ipv6hdr, payload_len):
 661                fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
 662                fl_hl = (struct ipv6_hop_limit_word *)&exact;
 663
 664                if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
 665                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
 666                        return -EOPNOTSUPP;
 667                }
 668
 669                ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
 670                ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
 671                ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
 672                                            fl_hl_mask->hop_limit;
 673                break;
 674        case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
 675                if (mask & ~IPV6_FLOW_LABEL_MASK ||
 676                    exact & ~IPV6_FLOW_LABEL_MASK) {
 677                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
 678                        return -EOPNOTSUPP;
 679                }
 680
 681                ip_hl_fl->ipv6_label_mask |= mask;
 682                ip_hl_fl->ipv6_label &= ~mask;
 683                ip_hl_fl->ipv6_label |= exact & mask;
 684                break;
 685        }
 686
 687        ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
 688        ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
 689
 690        return 0;
 691}
 692
 693static int
 694nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
 695               struct nfp_fl_set_ipv6_addr *ip_dst,
 696               struct nfp_fl_set_ipv6_addr *ip_src,
 697               struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
 698               struct netlink_ext_ack *extack)
 699{
 700        __be32 exact, mask;
 701        int err = 0;
 702        u8 word;
 703
 704        /* We are expecting tcf_pedit to return a big endian value */
 705        mask = (__force __be32)~act->mangle.mask;
 706        exact = (__force __be32)act->mangle.val;
 707
 708        if (exact & ~mask) {
 709                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
 710                return -EOPNOTSUPP;
 711        }
 712
 713        if (off < offsetof(struct ipv6hdr, saddr)) {
 714                err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
 715                                                          ip_hl_fl, extack);
 716        } else if (off < offsetof(struct ipv6hdr, daddr)) {
 717                word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
 718                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
 719                                      exact, mask, ip_src);
 720        } else if (off < offsetof(struct ipv6hdr, daddr) +
 721                       sizeof(struct in6_addr)) {
 722                word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
 723                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
 724                                      exact, mask, ip_dst);
 725        } else {
 726                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
 727                return -EOPNOTSUPP;
 728        }
 729
 730        return err;
 731}
 732
 733static int
 734nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
 735                 struct nfp_fl_set_tport *set_tport, int opcode,
 736                 struct netlink_ext_ack *extack)
 737{
 738        u32 exact, mask;
 739
 740        if (off) {
 741                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
 742                return -EOPNOTSUPP;
 743        }
 744
 745        mask = ~act->mangle.mask;
 746        exact = act->mangle.val;
 747
 748        if (exact & ~mask) {
 749                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
 750                return -EOPNOTSUPP;
 751        }
 752
 753        nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
 754                            set_tport->tp_port_mask);
 755
 756        set_tport->reserved = cpu_to_be16(0);
 757        set_tport->head.jump_id = opcode;
 758        set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
 759
 760        return 0;
 761}
 762
 763static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
 764{
 765        switch (ip_proto) {
 766        case 0:
 767                /* Filter doesn't force proto match,
 768                 * both TCP and UDP will be updated if encountered
 769                 */
 770                return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
 771        case IPPROTO_TCP:
 772                return TCA_CSUM_UPDATE_FLAG_TCP;
 773        case IPPROTO_UDP:
 774                return TCA_CSUM_UPDATE_FLAG_UDP;
 775        default:
 776                /* All other protocols will be ignored by FW */
 777                return 0;
 778        }
 779}
 780
 781struct nfp_flower_pedit_acts {
 782        struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
 783        struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
 784        struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
 785        struct nfp_fl_set_ip4_addrs set_ip_addr;
 786        struct nfp_fl_set_tport set_tport;
 787        struct nfp_fl_set_eth set_eth;
 788};
 789
 790static int
 791nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action,
 792                     int *a_len, struct nfp_flower_pedit_acts *set_act,
 793                     u32 *csum_updated)
 794{
 795        size_t act_size = 0;
 796        u8 ip_proto = 0;
 797
 798        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
 799                struct flow_match_basic match;
 800
 801                flow_rule_match_basic(rule, &match);
 802                ip_proto = match.key->ip_proto;
 803        }
 804
 805        if (set_act->set_eth.head.len_lw) {
 806                act_size = sizeof(set_act->set_eth);
 807                memcpy(nfp_action, &set_act->set_eth, act_size);
 808                *a_len += act_size;
 809        }
 810
 811        if (set_act->set_ip_ttl_tos.head.len_lw) {
 812                nfp_action += act_size;
 813                act_size = sizeof(set_act->set_ip_ttl_tos);
 814                memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
 815                *a_len += act_size;
 816
 817                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 818                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 819                                nfp_fl_csum_l4_to_flag(ip_proto);
 820        }
 821
 822        if (set_act->set_ip_addr.head.len_lw) {
 823                nfp_action += act_size;
 824                act_size = sizeof(set_act->set_ip_addr);
 825                memcpy(nfp_action, &set_act->set_ip_addr, act_size);
 826                *a_len += act_size;
 827
 828                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 829                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 830                                nfp_fl_csum_l4_to_flag(ip_proto);
 831        }
 832
 833        if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
 834                nfp_action += act_size;
 835                act_size = sizeof(set_act->set_ip6_tc_hl_fl);
 836                memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
 837                *a_len += act_size;
 838
 839                /* Hardware will automatically fix TCP/UDP checksum. */
 840                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 841        }
 842
 843        if (set_act->set_ip6_dst.head.len_lw &&
 844            set_act->set_ip6_src.head.len_lw) {
 845                /* TC compiles set src and dst IPv6 address as a single action,
 846                 * the hardware requires this to be 2 separate actions.
 847                 */
 848                nfp_action += act_size;
 849                act_size = sizeof(set_act->set_ip6_src);
 850                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 851                *a_len += act_size;
 852
 853                act_size = sizeof(set_act->set_ip6_dst);
 854                memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
 855                       &set_act->set_ip6_dst, act_size);
 856                *a_len += act_size;
 857
 858                /* Hardware will automatically fix TCP/UDP checksum. */
 859                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 860        } else if (set_act->set_ip6_dst.head.len_lw) {
 861                nfp_action += act_size;
 862                act_size = sizeof(set_act->set_ip6_dst);
 863                memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
 864                *a_len += act_size;
 865
 866                /* Hardware will automatically fix TCP/UDP checksum. */
 867                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 868        } else if (set_act->set_ip6_src.head.len_lw) {
 869                nfp_action += act_size;
 870                act_size = sizeof(set_act->set_ip6_src);
 871                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 872                *a_len += act_size;
 873
 874                /* Hardware will automatically fix TCP/UDP checksum. */
 875                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 876        }
 877        if (set_act->set_tport.head.len_lw) {
 878                nfp_action += act_size;
 879                act_size = sizeof(set_act->set_tport);
 880                memcpy(nfp_action, &set_act->set_tport, act_size);
 881                *a_len += act_size;
 882
 883                /* Hardware will automatically fix TCP/UDP checksum. */
 884                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 885        }
 886
 887        return 0;
 888}
 889
 890static int
 891nfp_fl_pedit(const struct flow_action_entry *act,
 892             char *nfp_action, int *a_len,
 893             u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
 894             struct netlink_ext_ack *extack)
 895{
 896        enum flow_action_mangle_base htype;
 897        u32 offset;
 898
 899        htype = act->mangle.htype;
 900        offset = act->mangle.offset;
 901
 902        switch (htype) {
 903        case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
 904                return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
 905        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
 906                return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
 907                                      &set_act->set_ip_ttl_tos, extack);
 908        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 909                return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
 910                                      &set_act->set_ip6_src,
 911                                      &set_act->set_ip6_tc_hl_fl, extack);
 912        case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 913                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 914                                        NFP_FL_ACTION_OPCODE_SET_TCP, extack);
 915        case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
 916                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 917                                        NFP_FL_ACTION_OPCODE_SET_UDP, extack);
 918        default:
 919                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
 920                return -EOPNOTSUPP;
 921        }
 922}
 923
 924static int
 925nfp_flower_output_action(struct nfp_app *app,
 926                         const struct flow_action_entry *act,
 927                         struct nfp_fl_payload *nfp_fl, int *a_len,
 928                         struct net_device *netdev, bool last,
 929                         enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 930                         int *out_cnt, u32 *csum_updated, bool pkt_host,
 931                         struct netlink_ext_ack *extack)
 932{
 933        struct nfp_flower_priv *priv = app->priv;
 934        struct nfp_fl_output *output;
 935        int err, prelag_size;
 936
 937        /* If csum_updated has not been reset by now, it means HW will
 938         * incorrectly update csums when they are not requested.
 939         */
 940        if (*csum_updated) {
 941                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
 942                return -EOPNOTSUPP;
 943        }
 944
 945        if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
 946                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
 947                return -EOPNOTSUPP;
 948        }
 949
 950        output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
 951        err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
 952                            tun_out_cnt, pkt_host, extack);
 953        if (err)
 954                return err;
 955
 956        *a_len += sizeof(struct nfp_fl_output);
 957
 958        if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
 959                /* nfp_fl_pre_lag returns -err or size of prelag action added.
 960                 * This will be 0 if it is not egressing to a lag dev.
 961                 */
 962                prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
 963                if (prelag_size < 0) {
 964                        return prelag_size;
 965                } else if (prelag_size > 0 && (!last || *out_cnt)) {
 966                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
 967                        return -EOPNOTSUPP;
 968                }
 969
 970                *a_len += prelag_size;
 971        }
 972        (*out_cnt)++;
 973
 974        return 0;
 975}
 976
 977static int
 978nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
 979                       struct flow_rule *rule,
 980                       struct nfp_fl_payload *nfp_fl, int *a_len,
 981                       struct net_device *netdev,
 982                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 983                       int *out_cnt, u32 *csum_updated,
 984                       struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
 985                       struct netlink_ext_ack *extack, int act_idx)
 986{
 987        struct nfp_fl_pre_tunnel *pre_tun;
 988        struct nfp_fl_set_tun *set_tun;
 989        struct nfp_fl_push_vlan *psh_v;
 990        struct nfp_fl_push_mpls *psh_m;
 991        struct nfp_fl_pop_vlan *pop_v;
 992        struct nfp_fl_pop_mpls *pop_m;
 993        struct nfp_fl_set_mpls *set_m;
 994        int err;
 995
 996        switch (act->id) {
 997        case FLOW_ACTION_DROP:
 998                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
 999                break;
1000        case FLOW_ACTION_REDIRECT_INGRESS:
1001        case FLOW_ACTION_REDIRECT:
1002                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
1003                                               true, tun_type, tun_out_cnt,
1004                                               out_cnt, csum_updated, *pkt_host,
1005                                               extack);
1006                if (err)
1007                        return err;
1008                break;
1009        case FLOW_ACTION_MIRRED_INGRESS:
1010        case FLOW_ACTION_MIRRED:
1011                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
1012                                               false, tun_type, tun_out_cnt,
1013                                               out_cnt, csum_updated, *pkt_host,
1014                                               extack);
1015                if (err)
1016                        return err;
1017                break;
1018        case FLOW_ACTION_VLAN_POP:
1019                if (*a_len +
1020                    sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
1021                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
1022                        return -EOPNOTSUPP;
1023                }
1024
1025                pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
1026                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
1027
1028                nfp_fl_pop_vlan(pop_v);
1029                *a_len += sizeof(struct nfp_fl_pop_vlan);
1030                break;
1031        case FLOW_ACTION_VLAN_PUSH:
1032                if (*a_len +
1033                    sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
1034                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
1035                        return -EOPNOTSUPP;
1036                }
1037
1038                psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
1039                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1040
1041                nfp_fl_push_vlan(psh_v, act);
1042                *a_len += sizeof(struct nfp_fl_push_vlan);
1043                break;
1044        case FLOW_ACTION_TUNNEL_ENCAP: {
1045                const struct ip_tunnel_info *ip_tun = act->tunnel;
1046
1047                *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx);
1048                if (*tun_type == NFP_FL_TUNNEL_NONE) {
1049                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
1050                        return -EOPNOTSUPP;
1051                }
1052
1053                if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
1054                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
1055                        return -EOPNOTSUPP;
1056                }
1057
1058                /* Pre-tunnel action is required for tunnel encap.
1059                 * This checks for next hop entries on NFP.
1060                 * If none, the packet falls back before applying other actions.
1061                 */
1062                if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
1063                    sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
1064                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
1065                        return -EOPNOTSUPP;
1066                }
1067
1068                pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
1069                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1070                *a_len += sizeof(struct nfp_fl_pre_tunnel);
1071
1072                err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
1073                if (err)
1074                        return err;
1075
1076                set_tun = (void *)&nfp_fl->action_data[*a_len];
1077                err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
1078                                     netdev, extack);
1079                if (err)
1080                        return err;
1081                *a_len += sizeof(struct nfp_fl_set_tun);
1082                }
1083                break;
1084        case FLOW_ACTION_TUNNEL_DECAP:
1085                /* Tunnel decap is handled by default so accept action. */
1086                return 0;
1087        case FLOW_ACTION_MANGLE:
1088                if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
1089                                 a_len, csum_updated, set_act, extack))
1090                        return -EOPNOTSUPP;
1091                break;
1092        case FLOW_ACTION_CSUM:
1093                /* csum action requests recalc of something we have not fixed */
1094                if (act->csum_flags & ~*csum_updated) {
1095                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
1096                        return -EOPNOTSUPP;
1097                }
1098                /* If we will correctly fix the csum we can remove it from the
1099                 * csum update list. Which will later be used to check support.
1100                 */
1101                *csum_updated &= ~act->csum_flags;
1102                break;
1103        case FLOW_ACTION_MPLS_PUSH:
1104                if (*a_len +
1105                    sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
1106                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1107                        return -EOPNOTSUPP;
1108                }
1109
1110                psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
1111                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1112
1113                err = nfp_fl_push_mpls(psh_m, act, extack);
1114                if (err)
1115                        return err;
1116                *a_len += sizeof(struct nfp_fl_push_mpls);
1117                break;
1118        case FLOW_ACTION_MPLS_POP:
1119                if (*a_len +
1120                    sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
1121                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1122                        return -EOPNOTSUPP;
1123                }
1124
1125                pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
1126                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1127
1128                nfp_fl_pop_mpls(pop_m, act);
1129                *a_len += sizeof(struct nfp_fl_pop_mpls);
1130                break;
1131        case FLOW_ACTION_MPLS_MANGLE:
1132                if (*a_len +
1133                    sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
1134                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1135                        return -EOPNOTSUPP;
1136                }
1137
1138                set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
1139                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1140
1141                nfp_fl_set_mpls(set_m, act);
1142                *a_len += sizeof(struct nfp_fl_set_mpls);
1143                break;
1144        case FLOW_ACTION_PTYPE:
1145                /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
1146                if (act->ptype != PACKET_HOST)
1147                        return -EOPNOTSUPP;
1148
1149                *pkt_host = true;
1150                break;
1151        default:
1152                /* Currently we do not handle any other actions. */
1153                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
1154                return -EOPNOTSUPP;
1155        }
1156
1157        return 0;
1158}
1159
1160static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
1161                                      int current_act_idx)
1162{
1163        struct flow_action_entry current_act;
1164        struct flow_action_entry prev_act;
1165
1166        current_act = flow_act->entries[current_act_idx];
1167        if (current_act.id != FLOW_ACTION_MANGLE)
1168                return false;
1169
1170        if (current_act_idx == 0)
1171                return true;
1172
1173        prev_act = flow_act->entries[current_act_idx - 1];
1174
1175        return prev_act.id != FLOW_ACTION_MANGLE;
1176}
1177
1178static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
1179                                    int current_act_idx)
1180{
1181        struct flow_action_entry current_act;
1182        struct flow_action_entry next_act;
1183
1184        current_act = flow_act->entries[current_act_idx];
1185        if (current_act.id != FLOW_ACTION_MANGLE)
1186                return false;
1187
1188        if (current_act_idx == flow_act->num_entries)
1189                return true;
1190
1191        next_act = flow_act->entries[current_act_idx + 1];
1192
1193        return next_act.id != FLOW_ACTION_MANGLE;
1194}
1195
1196int nfp_flower_compile_action(struct nfp_app *app,
1197                              struct flow_rule *rule,
1198                              struct net_device *netdev,
1199                              struct nfp_fl_payload *nfp_flow,
1200                              struct netlink_ext_ack *extack)
1201{
1202        int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
1203        struct nfp_flower_pedit_acts set_act;
1204        enum nfp_flower_tun_type tun_type;
1205        struct flow_action_entry *act;
1206        bool pkt_host = false;
1207        u32 csum_updated = 0;
1208
1209        if (!flow_action_hw_stats_check(&rule->action, extack,
1210                                        FLOW_ACTION_HW_STATS_DELAYED_BIT))
1211                return -EOPNOTSUPP;
1212
1213        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
1214        nfp_flow->meta.act_len = 0;
1215        tun_type = NFP_FL_TUNNEL_NONE;
1216        act_len = 0;
1217        act_cnt = 0;
1218        tun_out_cnt = 0;
1219        out_cnt = 0;
1220
1221        flow_action_for_each(i, act, &rule->action) {
1222                if (nfp_fl_check_mangle_start(&rule->action, i))
1223                        memset(&set_act, 0, sizeof(set_act));
1224                err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len,
1225                                             netdev, &tun_type, &tun_out_cnt,
1226                                             &out_cnt, &csum_updated,
1227                                             &set_act, &pkt_host, extack, i);
1228                if (err)
1229                        return err;
1230                act_cnt++;
1231                if (nfp_fl_check_mangle_end(&rule->action, i))
1232                        nfp_fl_commit_mangle(rule,
1233                                             &nfp_flow->action_data[act_len],
1234                                             &act_len, &set_act, &csum_updated);
1235        }
1236
1237        /* We optimise when the action list is small, this can unfortunately
1238         * not happen once we have more than one action in the action list.
1239         */
1240        if (act_cnt > 1)
1241                nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1242
1243        nfp_flow->meta.act_len = act_len;
1244
1245        return 0;
1246}
1247