linux/drivers/net/ethernet/netronome/nfp/flower/action.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <net/pkt_cls.h>
   6#include <net/tc_act/tc_csum.h>
   7#include <net/tc_act/tc_gact.h>
   8#include <net/tc_act/tc_mirred.h>
   9#include <net/tc_act/tc_pedit.h>
  10#include <net/tc_act/tc_vlan.h>
  11#include <net/tc_act/tc_tunnel_key.h>
  12
  13#include "cmsg.h"
  14#include "main.h"
  15#include "../nfp_net_repr.h"
  16
  17/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
  18 * to change. Such changes will break our FW ABI.
  19 */
  20#define NFP_FL_TUNNEL_CSUM                      cpu_to_be16(0x01)
  21#define NFP_FL_TUNNEL_KEY                       cpu_to_be16(0x04)
  22#define NFP_FL_TUNNEL_GENEVE_OPT                cpu_to_be16(0x0800)
  23#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS      IP_TUNNEL_INFO_TX
  24#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS     (NFP_FL_TUNNEL_CSUM | \
  25                                                 NFP_FL_TUNNEL_KEY | \
  26                                                 NFP_FL_TUNNEL_GENEVE_OPT)
  27
  28static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
  29{
  30        size_t act_size = sizeof(struct nfp_fl_pop_vlan);
  31
  32        pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
  33        pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  34        pop_vlan->reserved = 0;
  35}
  36
  37static void
  38nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
  39                 const struct flow_action_entry *act)
  40{
  41        size_t act_size = sizeof(struct nfp_fl_push_vlan);
  42        u16 tmp_push_vlan_tci;
  43
  44        push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
  45        push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  46        push_vlan->reserved = 0;
  47        push_vlan->vlan_tpid = act->vlan.proto;
  48
  49        tmp_push_vlan_tci =
  50                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
  51                FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
  52        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
  53}
  54
  55static int
  56nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
  57               struct nfp_fl_payload *nfp_flow, int act_len)
  58{
  59        size_t act_size = sizeof(struct nfp_fl_pre_lag);
  60        struct nfp_fl_pre_lag *pre_lag;
  61        struct net_device *out_dev;
  62        int err;
  63
  64        out_dev = act->dev;
  65        if (!out_dev || !netif_is_lag_master(out_dev))
  66                return 0;
  67
  68        if (act_len + act_size > NFP_FL_MAX_A_SIZ)
  69                return -EOPNOTSUPP;
  70
  71        /* Pre_lag action must be first on action list.
  72         * If other actions already exist they need pushed forward.
  73         */
  74        if (act_len)
  75                memmove(nfp_flow->action_data + act_size,
  76                        nfp_flow->action_data, act_len);
  77
  78        pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
  79        err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
  80        if (err)
  81                return err;
  82
  83        pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
  84        pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  85
  86        nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  87
  88        return act_size;
  89}
  90
  91static int
  92nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
  93              const struct flow_action_entry *act,
  94              struct nfp_fl_payload *nfp_flow,
  95              bool last, struct net_device *in_dev,
  96              enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
  97{
  98        size_t act_size = sizeof(struct nfp_fl_output);
  99        struct nfp_flower_priv *priv = app->priv;
 100        struct net_device *out_dev;
 101        u16 tmp_flags;
 102
 103        output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
 104        output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 105
 106        out_dev = act->dev;
 107        if (!out_dev)
 108                return -EOPNOTSUPP;
 109
 110        tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
 111
 112        if (tun_type) {
 113                /* Verify the egress netdev matches the tunnel type. */
 114                if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
 115                        return -EOPNOTSUPP;
 116
 117                if (*tun_out_cnt)
 118                        return -EOPNOTSUPP;
 119                (*tun_out_cnt)++;
 120
 121                output->flags = cpu_to_be16(tmp_flags |
 122                                            NFP_FL_OUT_FLAGS_USE_TUN);
 123                output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
 124        } else if (netif_is_lag_master(out_dev) &&
 125                   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
 126                int gid;
 127
 128                output->flags = cpu_to_be16(tmp_flags);
 129                gid = nfp_flower_lag_get_output_id(app, out_dev);
 130                if (gid < 0)
 131                        return gid;
 132                output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
 133        } else {
 134                /* Set action output parameters. */
 135                output->flags = cpu_to_be16(tmp_flags);
 136
 137                if (nfp_netdev_is_nfp_repr(in_dev)) {
 138                        /* Confirm ingress and egress are on same device. */
 139                        if (!netdev_port_same_parent_id(in_dev, out_dev))
 140                                return -EOPNOTSUPP;
 141                }
 142
 143                if (!nfp_netdev_is_nfp_repr(out_dev))
 144                        return -EOPNOTSUPP;
 145
 146                output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
 147                if (!output->port)
 148                        return -EOPNOTSUPP;
 149        }
 150        nfp_flow->meta.shortcut = output->port;
 151
 152        return 0;
 153}
 154
 155static enum nfp_flower_tun_type
 156nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
 157                                const struct flow_action_entry *act)
 158{
 159        const struct ip_tunnel_info *tun = act->tunnel;
 160        struct nfp_flower_priv *priv = app->priv;
 161
 162        switch (tun->key.tp_dst) {
 163        case htons(IANA_VXLAN_UDP_PORT):
 164                return NFP_FL_TUNNEL_VXLAN;
 165        case htons(GENEVE_UDP_PORT):
 166                if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
 167                        return NFP_FL_TUNNEL_GENEVE;
 168                /* FALLTHROUGH */
 169        default:
 170                return NFP_FL_TUNNEL_NONE;
 171        }
 172}
 173
 174static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 175{
 176        size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
 177        struct nfp_fl_pre_tunnel *pre_tun_act;
 178
 179        /* Pre_tunnel action must be first on action list.
 180         * If other actions already exist they need to be pushed forward.
 181         */
 182        if (act_len)
 183                memmove(act_data + act_size, act_data, act_len);
 184
 185        pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
 186
 187        memset(pre_tun_act, 0, act_size);
 188
 189        pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
 190        pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 191
 192        return pre_tun_act;
 193}
 194
 195static int
 196nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
 197                           const struct flow_action_entry *act)
 198{
 199        struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
 200        int opt_len, opt_cnt, act_start, tot_push_len;
 201        u8 *src = ip_tunnel_info_opts(ip_tun);
 202
 203        /* We need to populate the options in reverse order for HW.
 204         * Therefore we go through the options, calculating the
 205         * number of options and the total size, then we populate
 206         * them in reverse order in the action list.
 207         */
 208        opt_cnt = 0;
 209        tot_push_len = 0;
 210        opt_len = ip_tun->options_len;
 211        while (opt_len > 0) {
 212                struct geneve_opt *opt = (struct geneve_opt *)src;
 213
 214                opt_cnt++;
 215                if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
 216                        return -EOPNOTSUPP;
 217
 218                tot_push_len += sizeof(struct nfp_fl_push_geneve) +
 219                               opt->length * 4;
 220                if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
 221                        return -EOPNOTSUPP;
 222
 223                opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
 224                src += sizeof(struct geneve_opt) + opt->length * 4;
 225        }
 226
 227        if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
 228                return -EOPNOTSUPP;
 229
 230        act_start = *list_len;
 231        *list_len += tot_push_len;
 232        src = ip_tunnel_info_opts(ip_tun);
 233        while (opt_cnt) {
 234                struct geneve_opt *opt = (struct geneve_opt *)src;
 235                struct nfp_fl_push_geneve *push;
 236                size_t act_size, len;
 237
 238                opt_cnt--;
 239                act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
 240                tot_push_len -= act_size;
 241                len = act_start + tot_push_len;
 242
 243                push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
 244                push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
 245                push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 246                push->reserved = 0;
 247                push->class = opt->opt_class;
 248                push->type = opt->type;
 249                push->length = opt->length;
 250                memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
 251
 252                src += sizeof(struct geneve_opt) + opt->length * 4;
 253        }
 254
 255        return 0;
 256}
 257
 258static int
 259nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
 260                        struct nfp_fl_set_ipv4_udp_tun *set_tun,
 261                        const struct flow_action_entry *act,
 262                        struct nfp_fl_pre_tunnel *pre_tun,
 263                        enum nfp_flower_tun_type tun_type,
 264                        struct net_device *netdev)
 265{
 266        size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
 267        const struct ip_tunnel_info *ip_tun = act->tunnel;
 268        struct nfp_flower_priv *priv = app->priv;
 269        u32 tmp_set_ip_tun_type_index = 0;
 270        /* Currently support one pre-tunnel so index is always 0. */
 271        int pretun_idx = 0;
 272
 273        BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
 274                     NFP_FL_TUNNEL_KEY  != TUNNEL_KEY ||
 275                     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
 276        if (ip_tun->options_len &&
 277            (tun_type != NFP_FL_TUNNEL_GENEVE ||
 278            !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
 279                return -EOPNOTSUPP;
 280
 281        set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
 282        set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 283
 284        /* Set tunnel type and pre-tunnel index. */
 285        tmp_set_ip_tun_type_index |=
 286                FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
 287                FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
 288
 289        set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
 290        set_tun->tun_id = ip_tun->key.tun_id;
 291
 292        if (ip_tun->key.ttl) {
 293                set_tun->ttl = ip_tun->key.ttl;
 294        } else {
 295                struct net *net = dev_net(netdev);
 296                struct flowi4 flow = {};
 297                struct rtable *rt;
 298                int err;
 299
 300                /* Do a route lookup to determine ttl - if fails then use
 301                 * default. Note that CONFIG_INET is a requirement of
 302                 * CONFIG_NET_SWITCHDEV so must be defined here.
 303                 */
 304                flow.daddr = ip_tun->key.u.ipv4.dst;
 305                flow.flowi4_proto = IPPROTO_UDP;
 306                rt = ip_route_output_key(net, &flow);
 307                err = PTR_ERR_OR_ZERO(rt);
 308                if (!err) {
 309                        set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
 310                        ip_rt_put(rt);
 311                } else {
 312                        set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
 313                }
 314        }
 315
 316        set_tun->tos = ip_tun->key.tos;
 317
 318        if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
 319            ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
 320                return -EOPNOTSUPP;
 321        set_tun->tun_flags = ip_tun->key.tun_flags;
 322
 323        if (tun_type == NFP_FL_TUNNEL_GENEVE) {
 324                set_tun->tun_proto = htons(ETH_P_TEB);
 325                set_tun->tun_len = ip_tun->options_len / 4;
 326        }
 327
 328        /* Complete pre_tunnel action. */
 329        pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 330
 331        return 0;
 332}
 333
 334static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
 335{
 336        u32 oldvalue = get_unaligned((u32 *)p_exact);
 337        u32 oldmask = get_unaligned((u32 *)p_mask);
 338
 339        value &= mask;
 340        value |= oldvalue & ~mask;
 341
 342        put_unaligned(oldmask | mask, (u32 *)p_mask);
 343        put_unaligned(value, (u32 *)p_exact);
 344}
 345
 346static int
 347nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
 348               struct nfp_fl_set_eth *set_eth)
 349{
 350        u32 exact, mask;
 351
 352        if (off + 4 > ETH_ALEN * 2)
 353                return -EOPNOTSUPP;
 354
 355        mask = ~act->mangle.mask;
 356        exact = act->mangle.val;
 357
 358        if (exact & ~mask)
 359                return -EOPNOTSUPP;
 360
 361        nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
 362                            &set_eth->eth_addr_mask[off]);
 363
 364        set_eth->reserved = cpu_to_be16(0);
 365        set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
 366        set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
 367
 368        return 0;
 369}
 370
 371struct ipv4_ttl_word {
 372        __u8    ttl;
 373        __u8    protocol;
 374        __sum16 check;
 375};
 376
 377static int
 378nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
 379               struct nfp_fl_set_ip4_addrs *set_ip_addr,
 380               struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
 381{
 382        struct ipv4_ttl_word *ttl_word_mask;
 383        struct ipv4_ttl_word *ttl_word;
 384        struct iphdr *tos_word_mask;
 385        struct iphdr *tos_word;
 386        __be32 exact, mask;
 387
 388        /* We are expecting tcf_pedit to return a big endian value */
 389        mask = (__force __be32)~act->mangle.mask;
 390        exact = (__force __be32)act->mangle.val;
 391
 392        if (exact & ~mask)
 393                return -EOPNOTSUPP;
 394
 395        switch (off) {
 396        case offsetof(struct iphdr, daddr):
 397                set_ip_addr->ipv4_dst_mask |= mask;
 398                set_ip_addr->ipv4_dst &= ~mask;
 399                set_ip_addr->ipv4_dst |= exact & mask;
 400                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 401                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 402                                           NFP_FL_LW_SIZ;
 403                break;
 404        case offsetof(struct iphdr, saddr):
 405                set_ip_addr->ipv4_src_mask |= mask;
 406                set_ip_addr->ipv4_src &= ~mask;
 407                set_ip_addr->ipv4_src |= exact & mask;
 408                set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
 409                set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
 410                                           NFP_FL_LW_SIZ;
 411                break;
 412        case offsetof(struct iphdr, ttl):
 413                ttl_word_mask = (struct ipv4_ttl_word *)&mask;
 414                ttl_word = (struct ipv4_ttl_word *)&exact;
 415
 416                if (ttl_word_mask->protocol || ttl_word_mask->check)
 417                        return -EOPNOTSUPP;
 418
 419                set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
 420                set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
 421                set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
 422                set_ip_ttl_tos->head.jump_id =
 423                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 424                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 425                                              NFP_FL_LW_SIZ;
 426                break;
 427        case round_down(offsetof(struct iphdr, tos), 4):
 428                tos_word_mask = (struct iphdr *)&mask;
 429                tos_word = (struct iphdr *)&exact;
 430
 431                if (tos_word_mask->version || tos_word_mask->ihl ||
 432                    tos_word_mask->tot_len)
 433                        return -EOPNOTSUPP;
 434
 435                set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
 436                set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
 437                set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
 438                set_ip_ttl_tos->head.jump_id =
 439                        NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
 440                set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
 441                                              NFP_FL_LW_SIZ;
 442                break;
 443        default:
 444                return -EOPNOTSUPP;
 445        }
 446
 447        return 0;
 448}
 449
 450static void
 451nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
 452                      struct nfp_fl_set_ipv6_addr *ip6)
 453{
 454        ip6->ipv6[word].mask |= mask;
 455        ip6->ipv6[word].exact &= ~mask;
 456        ip6->ipv6[word].exact |= exact & mask;
 457
 458        ip6->reserved = cpu_to_be16(0);
 459        ip6->head.jump_id = opcode_tag;
 460        ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
 461}
 462
 463struct ipv6_hop_limit_word {
 464        __be16 payload_len;
 465        u8 nexthdr;
 466        u8 hop_limit;
 467};
 468
 469static int
 470nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
 471                                    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
 472{
 473        struct ipv6_hop_limit_word *fl_hl_mask;
 474        struct ipv6_hop_limit_word *fl_hl;
 475
 476        switch (off) {
 477        case offsetof(struct ipv6hdr, payload_len):
 478                fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
 479                fl_hl = (struct ipv6_hop_limit_word *)&exact;
 480
 481                if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
 482                        return -EOPNOTSUPP;
 483
 484                ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
 485                ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
 486                ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
 487                                            fl_hl_mask->hop_limit;
 488                break;
 489        case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
 490                if (mask & ~IPV6_FLOW_LABEL_MASK ||
 491                    exact & ~IPV6_FLOW_LABEL_MASK)
 492                        return -EOPNOTSUPP;
 493
 494                ip_hl_fl->ipv6_label_mask |= mask;
 495                ip_hl_fl->ipv6_label &= ~mask;
 496                ip_hl_fl->ipv6_label |= exact & mask;
 497                break;
 498        }
 499
 500        ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
 501        ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
 502
 503        return 0;
 504}
 505
 506static int
 507nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
 508               struct nfp_fl_set_ipv6_addr *ip_dst,
 509               struct nfp_fl_set_ipv6_addr *ip_src,
 510               struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
 511{
 512        __be32 exact, mask;
 513        int err = 0;
 514        u8 word;
 515
 516        /* We are expecting tcf_pedit to return a big endian value */
 517        mask = (__force __be32)~act->mangle.mask;
 518        exact = (__force __be32)act->mangle.val;
 519
 520        if (exact & ~mask)
 521                return -EOPNOTSUPP;
 522
 523        if (off < offsetof(struct ipv6hdr, saddr)) {
 524                err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
 525                                                          ip_hl_fl);
 526        } else if (off < offsetof(struct ipv6hdr, daddr)) {
 527                word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
 528                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
 529                                      exact, mask, ip_src);
 530        } else if (off < offsetof(struct ipv6hdr, daddr) +
 531                       sizeof(struct in6_addr)) {
 532                word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
 533                nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
 534                                      exact, mask, ip_dst);
 535        } else {
 536                return -EOPNOTSUPP;
 537        }
 538
 539        return err;
 540}
 541
 542static int
 543nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
 544                 struct nfp_fl_set_tport *set_tport, int opcode)
 545{
 546        u32 exact, mask;
 547
 548        if (off)
 549                return -EOPNOTSUPP;
 550
 551        mask = ~act->mangle.mask;
 552        exact = act->mangle.val;
 553
 554        if (exact & ~mask)
 555                return -EOPNOTSUPP;
 556
 557        nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
 558                            set_tport->tp_port_mask);
 559
 560        set_tport->reserved = cpu_to_be16(0);
 561        set_tport->head.jump_id = opcode;
 562        set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
 563
 564        return 0;
 565}
 566
 567static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
 568{
 569        switch (ip_proto) {
 570        case 0:
 571                /* Filter doesn't force proto match,
 572                 * both TCP and UDP will be updated if encountered
 573                 */
 574                return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
 575        case IPPROTO_TCP:
 576                return TCA_CSUM_UPDATE_FLAG_TCP;
 577        case IPPROTO_UDP:
 578                return TCA_CSUM_UPDATE_FLAG_UDP;
 579        default:
 580                /* All other protocols will be ignored by FW */
 581                return 0;
 582        }
 583}
 584
 585struct nfp_flower_pedit_acts {
 586        struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
 587        struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
 588        struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
 589        struct nfp_fl_set_ip4_addrs set_ip_addr;
 590        struct nfp_fl_set_tport set_tport;
 591        struct nfp_fl_set_eth set_eth;
 592};
 593
 594static int
 595nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
 596                     int *a_len, struct nfp_flower_pedit_acts *set_act,
 597                     u32 *csum_updated)
 598{
 599        struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
 600        size_t act_size = 0;
 601        u8 ip_proto = 0;
 602
 603        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
 604                struct flow_match_basic match;
 605
 606                flow_rule_match_basic(rule, &match);
 607                ip_proto = match.key->ip_proto;
 608        }
 609
 610        if (set_act->set_eth.head.len_lw) {
 611                act_size = sizeof(set_act->set_eth);
 612                memcpy(nfp_action, &set_act->set_eth, act_size);
 613                *a_len += act_size;
 614        }
 615
 616        if (set_act->set_ip_ttl_tos.head.len_lw) {
 617                nfp_action += act_size;
 618                act_size = sizeof(set_act->set_ip_ttl_tos);
 619                memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
 620                *a_len += act_size;
 621
 622                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 623                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 624                                nfp_fl_csum_l4_to_flag(ip_proto);
 625        }
 626
 627        if (set_act->set_ip_addr.head.len_lw) {
 628                nfp_action += act_size;
 629                act_size = sizeof(set_act->set_ip_addr);
 630                memcpy(nfp_action, &set_act->set_ip_addr, act_size);
 631                *a_len += act_size;
 632
 633                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
 634                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 635                                nfp_fl_csum_l4_to_flag(ip_proto);
 636        }
 637
 638        if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
 639                nfp_action += act_size;
 640                act_size = sizeof(set_act->set_ip6_tc_hl_fl);
 641                memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
 642                *a_len += act_size;
 643
 644                /* Hardware will automatically fix TCP/UDP checksum. */
 645                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 646        }
 647
 648        if (set_act->set_ip6_dst.head.len_lw &&
 649            set_act->set_ip6_src.head.len_lw) {
 650                /* TC compiles set src and dst IPv6 address as a single action,
 651                 * the hardware requires this to be 2 separate actions.
 652                 */
 653                nfp_action += act_size;
 654                act_size = sizeof(set_act->set_ip6_src);
 655                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 656                *a_len += act_size;
 657
 658                act_size = sizeof(set_act->set_ip6_dst);
 659                memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
 660                       &set_act->set_ip6_dst, act_size);
 661                *a_len += act_size;
 662
 663                /* Hardware will automatically fix TCP/UDP checksum. */
 664                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 665        } else if (set_act->set_ip6_dst.head.len_lw) {
 666                nfp_action += act_size;
 667                act_size = sizeof(set_act->set_ip6_dst);
 668                memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
 669                *a_len += act_size;
 670
 671                /* Hardware will automatically fix TCP/UDP checksum. */
 672                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 673        } else if (set_act->set_ip6_src.head.len_lw) {
 674                nfp_action += act_size;
 675                act_size = sizeof(set_act->set_ip6_src);
 676                memcpy(nfp_action, &set_act->set_ip6_src, act_size);
 677                *a_len += act_size;
 678
 679                /* Hardware will automatically fix TCP/UDP checksum. */
 680                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 681        }
 682        if (set_act->set_tport.head.len_lw) {
 683                nfp_action += act_size;
 684                act_size = sizeof(set_act->set_tport);
 685                memcpy(nfp_action, &set_act->set_tport, act_size);
 686                *a_len += act_size;
 687
 688                /* Hardware will automatically fix TCP/UDP checksum. */
 689                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 690        }
 691
 692        return 0;
 693}
 694
 695static int
 696nfp_fl_pedit(const struct flow_action_entry *act,
 697             struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
 698             u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
 699{
 700        enum flow_action_mangle_base htype;
 701        u32 offset;
 702
 703        htype = act->mangle.htype;
 704        offset = act->mangle.offset;
 705
 706        switch (htype) {
 707        case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
 708                return nfp_fl_set_eth(act, offset, &set_act->set_eth);
 709        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
 710                return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
 711                                      &set_act->set_ip_ttl_tos);
 712        case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 713                return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
 714                                      &set_act->set_ip6_src,
 715                                      &set_act->set_ip6_tc_hl_fl);
 716        case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 717                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 718                                        NFP_FL_ACTION_OPCODE_SET_TCP);
 719        case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
 720                return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 721                                        NFP_FL_ACTION_OPCODE_SET_UDP);
 722        default:
 723                return -EOPNOTSUPP;
 724        }
 725}
 726
 727static int
 728nfp_flower_output_action(struct nfp_app *app,
 729                         const struct flow_action_entry *act,
 730                         struct nfp_fl_payload *nfp_fl, int *a_len,
 731                         struct net_device *netdev, bool last,
 732                         enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 733                         int *out_cnt, u32 *csum_updated)
 734{
 735        struct nfp_flower_priv *priv = app->priv;
 736        struct nfp_fl_output *output;
 737        int err, prelag_size;
 738
 739        /* If csum_updated has not been reset by now, it means HW will
 740         * incorrectly update csums when they are not requested.
 741         */
 742        if (*csum_updated)
 743                return -EOPNOTSUPP;
 744
 745        if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
 746                return -EOPNOTSUPP;
 747
 748        output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
 749        err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
 750                            tun_out_cnt);
 751        if (err)
 752                return err;
 753
 754        *a_len += sizeof(struct nfp_fl_output);
 755
 756        if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
 757                /* nfp_fl_pre_lag returns -err or size of prelag action added.
 758                 * This will be 0 if it is not egressing to a lag dev.
 759                 */
 760                prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
 761                if (prelag_size < 0)
 762                        return prelag_size;
 763                else if (prelag_size > 0 && (!last || *out_cnt))
 764                        return -EOPNOTSUPP;
 765
 766                *a_len += prelag_size;
 767        }
 768        (*out_cnt)++;
 769
 770        return 0;
 771}
 772
 773static int
 774nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
 775                       struct tc_cls_flower_offload *flow,
 776                       struct nfp_fl_payload *nfp_fl, int *a_len,
 777                       struct net_device *netdev,
 778                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 779                       int *out_cnt, u32 *csum_updated,
 780                       struct nfp_flower_pedit_acts *set_act)
 781{
 782        struct nfp_fl_set_ipv4_udp_tun *set_tun;
 783        struct nfp_fl_pre_tunnel *pre_tun;
 784        struct nfp_fl_push_vlan *psh_v;
 785        struct nfp_fl_pop_vlan *pop_v;
 786        int err;
 787
 788        switch (act->id) {
 789        case FLOW_ACTION_DROP:
 790                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
 791                break;
 792        case FLOW_ACTION_REDIRECT:
 793                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
 794                                               true, tun_type, tun_out_cnt,
 795                                               out_cnt, csum_updated);
 796                if (err)
 797                        return err;
 798                break;
 799        case FLOW_ACTION_MIRRED:
 800                err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
 801                                               false, tun_type, tun_out_cnt,
 802                                               out_cnt, csum_updated);
 803                if (err)
 804                        return err;
 805                break;
 806        case FLOW_ACTION_VLAN_POP:
 807                if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
 808                        return -EOPNOTSUPP;
 809
 810                pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
 811                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
 812
 813                nfp_fl_pop_vlan(pop_v);
 814                *a_len += sizeof(struct nfp_fl_pop_vlan);
 815                break;
 816        case FLOW_ACTION_VLAN_PUSH:
 817                if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
 818                        return -EOPNOTSUPP;
 819
 820                psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
 821                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 822
 823                nfp_fl_push_vlan(psh_v, act);
 824                *a_len += sizeof(struct nfp_fl_push_vlan);
 825                break;
 826        case FLOW_ACTION_TUNNEL_ENCAP: {
 827                const struct ip_tunnel_info *ip_tun = act->tunnel;
 828
 829                *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
 830                if (*tun_type == NFP_FL_TUNNEL_NONE)
 831                        return -EOPNOTSUPP;
 832
 833                if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
 834                        return -EOPNOTSUPP;
 835
 836                /* Pre-tunnel action is required for tunnel encap.
 837                 * This checks for next hop entries on NFP.
 838                 * If none, the packet falls back before applying other actions.
 839                 */
 840                if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
 841                    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
 842                        return -EOPNOTSUPP;
 843
 844                pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
 845                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 846                *a_len += sizeof(struct nfp_fl_pre_tunnel);
 847
 848                err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
 849                if (err)
 850                        return err;
 851
 852                set_tun = (void *)&nfp_fl->action_data[*a_len];
 853                err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
 854                                              *tun_type, netdev);
 855                if (err)
 856                        return err;
 857                *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
 858                }
 859                break;
 860        case FLOW_ACTION_TUNNEL_DECAP:
 861                /* Tunnel decap is handled by default so accept action. */
 862                return 0;
 863        case FLOW_ACTION_MANGLE:
 864                if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
 865                                 a_len, csum_updated, set_act))
 866                        return -EOPNOTSUPP;
 867                break;
 868        case FLOW_ACTION_CSUM:
 869                /* csum action requests recalc of something we have not fixed */
 870                if (act->csum_flags & ~*csum_updated)
 871                        return -EOPNOTSUPP;
 872                /* If we will correctly fix the csum we can remove it from the
 873                 * csum update list. Which will later be used to check support.
 874                 */
 875                *csum_updated &= ~act->csum_flags;
 876                break;
 877        default:
 878                /* Currently we do not handle any other actions. */
 879                return -EOPNOTSUPP;
 880        }
 881
 882        return 0;
 883}
 884
 885static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
 886                                      int current_act_idx)
 887{
 888        struct flow_action_entry current_act;
 889        struct flow_action_entry prev_act;
 890
 891        current_act = flow_act->entries[current_act_idx];
 892        if (current_act.id != FLOW_ACTION_MANGLE)
 893                return false;
 894
 895        if (current_act_idx == 0)
 896                return true;
 897
 898        prev_act = flow_act->entries[current_act_idx - 1];
 899
 900        return prev_act.id != FLOW_ACTION_MANGLE;
 901}
 902
 903static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
 904                                    int current_act_idx)
 905{
 906        struct flow_action_entry current_act;
 907        struct flow_action_entry next_act;
 908
 909        current_act = flow_act->entries[current_act_idx];
 910        if (current_act.id != FLOW_ACTION_MANGLE)
 911                return false;
 912
 913        if (current_act_idx == flow_act->num_entries)
 914                return true;
 915
 916        next_act = flow_act->entries[current_act_idx + 1];
 917
 918        return next_act.id != FLOW_ACTION_MANGLE;
 919}
 920
 921int nfp_flower_compile_action(struct nfp_app *app,
 922                              struct tc_cls_flower_offload *flow,
 923                              struct net_device *netdev,
 924                              struct nfp_fl_payload *nfp_flow)
 925{
 926        int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
 927        struct nfp_flower_pedit_acts set_act;
 928        enum nfp_flower_tun_type tun_type;
 929        struct flow_action_entry *act;
 930        u32 csum_updated = 0;
 931
 932        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
 933        nfp_flow->meta.act_len = 0;
 934        tun_type = NFP_FL_TUNNEL_NONE;
 935        act_len = 0;
 936        act_cnt = 0;
 937        tun_out_cnt = 0;
 938        out_cnt = 0;
 939
 940        flow_action_for_each(i, act, &flow->rule->action) {
 941                if (nfp_fl_check_mangle_start(&flow->rule->action, i))
 942                        memset(&set_act, 0, sizeof(set_act));
 943                err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
 944                                             netdev, &tun_type, &tun_out_cnt,
 945                                             &out_cnt, &csum_updated, &set_act);
 946                if (err)
 947                        return err;
 948                act_cnt++;
 949                if (nfp_fl_check_mangle_end(&flow->rule->action, i))
 950                        nfp_fl_commit_mangle(flow,
 951                                             &nfp_flow->action_data[act_len],
 952                                             &act_len, &set_act, &csum_updated);
 953        }
 954
 955        /* We optimise when the action list is small, this can unfortunately
 956         * not happen once we have more than one action in the action list.
 957         */
 958        if (act_cnt > 1)
 959                nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 960
 961        nfp_flow->meta.act_len = act_len;
 962
 963        return 0;
 964}
 965