linux/drivers/net/ethernet/netronome/nfp/flower/offload.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/skbuff.h>
   5#include <net/devlink.h>
   6#include <net/pkt_cls.h>
   7
   8#include "cmsg.h"
   9#include "main.h"
  10#include "conntrack.h"
  11#include "../nfpcore/nfp_cpp.h"
  12#include "../nfpcore/nfp_nsp.h"
  13#include "../nfp_app.h"
  14#include "../nfp_main.h"
  15#include "../nfp_net.h"
  16#include "../nfp_port.h"
  17
  18#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
  19        (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
  20         TCPHDR_PSH | TCPHDR_URG)
  21
  22#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
  23        (FLOW_DIS_IS_FRAGMENT | \
  24         FLOW_DIS_FIRST_FRAG)
  25
  26#define NFP_FLOWER_WHITELIST_DISSECTOR \
  27        (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
  28         BIT(FLOW_DISSECTOR_KEY_BASIC) | \
  29         BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
  30         BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
  31         BIT(FLOW_DISSECTOR_KEY_TCP) | \
  32         BIT(FLOW_DISSECTOR_KEY_PORTS) | \
  33         BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
  34         BIT(FLOW_DISSECTOR_KEY_VLAN) | \
  35         BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
  36         BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
  37         BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
  38         BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
  39         BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  40         BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
  41         BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
  42         BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
  43         BIT(FLOW_DISSECTOR_KEY_MPLS) | \
  44         BIT(FLOW_DISSECTOR_KEY_IP))
  45
  46#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
  47        (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  48         BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
  49         BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
  50         BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
  51         BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
  52         BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
  53         BIT(FLOW_DISSECTOR_KEY_ENC_IP))
  54
  55#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
  56        (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  57         BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
  58
  59#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
  60        (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
  61         BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
  62
  63#define NFP_FLOWER_MERGE_FIELDS \
  64        (NFP_FLOWER_LAYER_PORT | \
  65         NFP_FLOWER_LAYER_MAC | \
  66         NFP_FLOWER_LAYER_TP | \
  67         NFP_FLOWER_LAYER_IPV4 | \
  68         NFP_FLOWER_LAYER_IPV6)
  69
  70#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
  71        (NFP_FLOWER_LAYER_EXT_META | \
  72         NFP_FLOWER_LAYER_PORT | \
  73         NFP_FLOWER_LAYER_MAC | \
  74         NFP_FLOWER_LAYER_IPV4 | \
  75         NFP_FLOWER_LAYER_IPV6)
  76
  77struct nfp_flower_merge_check {
  78        union {
  79                struct {
  80                        __be16 tci;
  81                        struct nfp_flower_mac_mpls l2;
  82                        struct nfp_flower_tp_ports l4;
  83                        union {
  84                                struct nfp_flower_ipv4 ipv4;
  85                                struct nfp_flower_ipv6 ipv6;
  86                        };
  87                };
  88                unsigned long vals[8];
  89        };
  90};
  91
  92static int
  93nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
  94                     u8 mtype)
  95{
  96        u32 meta_len, key_len, mask_len, act_len, tot_len;
  97        struct sk_buff *skb;
  98        unsigned char *msg;
  99
 100        meta_len =  sizeof(struct nfp_fl_rule_metadata);
 101        key_len = nfp_flow->meta.key_len;
 102        mask_len = nfp_flow->meta.mask_len;
 103        act_len = nfp_flow->meta.act_len;
 104
 105        tot_len = meta_len + key_len + mask_len + act_len;
 106
 107        /* Convert to long words as firmware expects
 108         * lengths in units of NFP_FL_LW_SIZ.
 109         */
 110        nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
 111        nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
 112        nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
 113
 114        skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
 115        if (!skb)
 116                return -ENOMEM;
 117
 118        msg = nfp_flower_cmsg_get_data(skb);
 119        memcpy(msg, &nfp_flow->meta, meta_len);
 120        memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
 121        memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
 122        memcpy(&msg[meta_len + key_len + mask_len],
 123               nfp_flow->action_data, act_len);
 124
 125        /* Convert back to bytes as software expects
 126         * lengths in units of bytes.
 127         */
 128        nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
 129        nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
 130        nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
 131
 132        nfp_ctrl_tx(app->ctrl, skb);
 133
 134        return 0;
 135}
 136
 137static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
 138{
 139        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
 140
 141        return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
 142               flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
 143               flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
 144               flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
 145}
 146
 147static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
 148{
 149        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
 150
 151        return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
 152               flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
 153}
 154
 155static int
 156nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
 157                          u32 *key_layer_two, int *key_size, bool ipv6,
 158                          struct netlink_ext_ack *extack)
 159{
 160        if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
 161            (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
 162                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
 163                return -EOPNOTSUPP;
 164        }
 165
 166        if (enc_opts->len > 0) {
 167                *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
 168                *key_size += sizeof(struct nfp_flower_geneve_options);
 169        }
 170
 171        return 0;
 172}
 173
 174static int
 175nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
 176                              struct flow_dissector_key_enc_opts *enc_op,
 177                              u32 *key_layer_two, u8 *key_layer, int *key_size,
 178                              struct nfp_flower_priv *priv,
 179                              enum nfp_flower_tun_type *tun_type, bool ipv6,
 180                              struct netlink_ext_ack *extack)
 181{
 182        int err;
 183
 184        switch (enc_ports->dst) {
 185        case htons(IANA_VXLAN_UDP_PORT):
 186                *tun_type = NFP_FL_TUNNEL_VXLAN;
 187                *key_layer |= NFP_FLOWER_LAYER_VXLAN;
 188
 189                if (ipv6) {
 190                        *key_layer |= NFP_FLOWER_LAYER_EXT_META;
 191                        *key_size += sizeof(struct nfp_flower_ext_meta);
 192                        *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
 193                        *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
 194                } else {
 195                        *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
 196                }
 197
 198                if (enc_op) {
 199                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
 200                        return -EOPNOTSUPP;
 201                }
 202                break;
 203        case htons(GENEVE_UDP_PORT):
 204                if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
 205                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
 206                        return -EOPNOTSUPP;
 207                }
 208                *tun_type = NFP_FL_TUNNEL_GENEVE;
 209                *key_layer |= NFP_FLOWER_LAYER_EXT_META;
 210                *key_size += sizeof(struct nfp_flower_ext_meta);
 211                *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
 212
 213                if (ipv6) {
 214                        *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
 215                        *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
 216                } else {
 217                        *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
 218                }
 219
 220                if (!enc_op)
 221                        break;
 222                if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
 223                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
 224                        return -EOPNOTSUPP;
 225                }
 226                err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
 227                                                ipv6, extack);
 228                if (err)
 229                        return err;
 230                break;
 231        default:
 232                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
 233                return -EOPNOTSUPP;
 234        }
 235
 236        return 0;
 237}
 238
 239static int
 240nfp_flower_calculate_key_layers(struct nfp_app *app,
 241                                struct net_device *netdev,
 242                                struct nfp_fl_key_ls *ret_key_ls,
 243                                struct flow_cls_offload *flow,
 244                                enum nfp_flower_tun_type *tun_type,
 245                                struct netlink_ext_ack *extack)
 246{
 247        struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
 248        struct flow_dissector *dissector = rule->match.dissector;
 249        struct flow_match_basic basic = { NULL, NULL};
 250        struct nfp_flower_priv *priv = app->priv;
 251        u32 key_layer_two;
 252        u8 key_layer;
 253        int key_size;
 254        int err;
 255
 256        if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
 257                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
 258                return -EOPNOTSUPP;
 259        }
 260
 261        /* If any tun dissector is used then the required set must be used. */
 262        if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
 263            (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
 264            != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
 265            (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
 266            != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
 267                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
 268                return -EOPNOTSUPP;
 269        }
 270
 271        key_layer_two = 0;
 272        key_layer = NFP_FLOWER_LAYER_PORT;
 273        key_size = sizeof(struct nfp_flower_meta_tci) +
 274                   sizeof(struct nfp_flower_in_port);
 275
 276        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
 277            flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
 278                key_layer |= NFP_FLOWER_LAYER_MAC;
 279                key_size += sizeof(struct nfp_flower_mac_mpls);
 280        }
 281
 282        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
 283                struct flow_match_vlan vlan;
 284
 285                flow_rule_match_vlan(rule, &vlan);
 286                if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
 287                    vlan.key->vlan_priority) {
 288                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
 289                        return -EOPNOTSUPP;
 290                }
 291                if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
 292                    !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
 293                        key_layer |= NFP_FLOWER_LAYER_EXT_META;
 294                        key_size += sizeof(struct nfp_flower_ext_meta);
 295                        key_size += sizeof(struct nfp_flower_vlan);
 296                        key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
 297                }
 298        }
 299
 300        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
 301                struct flow_match_vlan cvlan;
 302
 303                if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
 304                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
 305                        return -EOPNOTSUPP;
 306                }
 307
 308                flow_rule_match_vlan(rule, &cvlan);
 309                if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
 310                        key_layer |= NFP_FLOWER_LAYER_EXT_META;
 311                        key_size += sizeof(struct nfp_flower_ext_meta);
 312                        key_size += sizeof(struct nfp_flower_vlan);
 313                        key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
 314                }
 315        }
 316
 317        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
 318                struct flow_match_enc_opts enc_op = { NULL, NULL };
 319                struct flow_match_ipv4_addrs ipv4_addrs;
 320                struct flow_match_ipv6_addrs ipv6_addrs;
 321                struct flow_match_control enc_ctl;
 322                struct flow_match_ports enc_ports;
 323                bool ipv6_tun = false;
 324
 325                flow_rule_match_enc_control(rule, &enc_ctl);
 326
 327                if (enc_ctl.mask->addr_type != 0xffff) {
 328                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
 329                        return -EOPNOTSUPP;
 330                }
 331
 332                ipv6_tun = enc_ctl.key->addr_type ==
 333                                FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 334                if (ipv6_tun &&
 335                    !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
 336                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
 337                        return -EOPNOTSUPP;
 338                }
 339
 340                if (!ipv6_tun &&
 341                    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
 342                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
 343                        return -EOPNOTSUPP;
 344                }
 345
 346                if (ipv6_tun) {
 347                        flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
 348                        if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
 349                                       sizeof(ipv6_addrs.mask->dst))) {
 350                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
 351                                return -EOPNOTSUPP;
 352                        }
 353                } else {
 354                        flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
 355                        if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
 356                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
 357                                return -EOPNOTSUPP;
 358                        }
 359                }
 360
 361                if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
 362                        flow_rule_match_enc_opts(rule, &enc_op);
 363
 364                if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
 365                        /* check if GRE, which has no enc_ports */
 366                        if (!netif_is_gretap(netdev)) {
 367                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
 368                                return -EOPNOTSUPP;
 369                        }
 370
 371                        *tun_type = NFP_FL_TUNNEL_GRE;
 372                        key_layer |= NFP_FLOWER_LAYER_EXT_META;
 373                        key_size += sizeof(struct nfp_flower_ext_meta);
 374                        key_layer_two |= NFP_FLOWER_LAYER2_GRE;
 375
 376                        if (ipv6_tun) {
 377                                key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
 378                                key_size +=
 379                                        sizeof(struct nfp_flower_ipv6_udp_tun);
 380                        } else {
 381                                key_size +=
 382                                        sizeof(struct nfp_flower_ipv4_udp_tun);
 383                        }
 384
 385                        if (enc_op.key) {
 386                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
 387                                return -EOPNOTSUPP;
 388                        }
 389                } else {
 390                        flow_rule_match_enc_ports(rule, &enc_ports);
 391                        if (enc_ports.mask->dst != cpu_to_be16(~0)) {
 392                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
 393                                return -EOPNOTSUPP;
 394                        }
 395
 396                        err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
 397                                                            enc_op.key,
 398                                                            &key_layer_two,
 399                                                            &key_layer,
 400                                                            &key_size, priv,
 401                                                            tun_type, ipv6_tun,
 402                                                            extack);
 403                        if (err)
 404                                return err;
 405
 406                        /* Ensure the ingress netdev matches the expected
 407                         * tun type.
 408                         */
 409                        if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
 410                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
 411                                return -EOPNOTSUPP;
 412                        }
 413                }
 414        }
 415
 416        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
 417                flow_rule_match_basic(rule, &basic);
 418
 419        if (basic.mask && basic.mask->n_proto) {
 420                /* Ethernet type is present in the key. */
 421                switch (basic.key->n_proto) {
 422                case cpu_to_be16(ETH_P_IP):
 423                        key_layer |= NFP_FLOWER_LAYER_IPV4;
 424                        key_size += sizeof(struct nfp_flower_ipv4);
 425                        break;
 426
 427                case cpu_to_be16(ETH_P_IPV6):
 428                        key_layer |= NFP_FLOWER_LAYER_IPV6;
 429                        key_size += sizeof(struct nfp_flower_ipv6);
 430                        break;
 431
 432                /* Currently we do not offload ARP
 433                 * because we rely on it to get to the host.
 434                 */
 435                case cpu_to_be16(ETH_P_ARP):
 436                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
 437                        return -EOPNOTSUPP;
 438
 439                case cpu_to_be16(ETH_P_MPLS_UC):
 440                case cpu_to_be16(ETH_P_MPLS_MC):
 441                        if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
 442                                key_layer |= NFP_FLOWER_LAYER_MAC;
 443                                key_size += sizeof(struct nfp_flower_mac_mpls);
 444                        }
 445                        break;
 446
 447                /* Will be included in layer 2. */
 448                case cpu_to_be16(ETH_P_8021Q):
 449                        break;
 450
 451                default:
 452                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
 453                        return -EOPNOTSUPP;
 454                }
 455        } else if (nfp_flower_check_higher_than_mac(flow)) {
 456                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
 457                return -EOPNOTSUPP;
 458        }
 459
 460        if (basic.mask && basic.mask->ip_proto) {
 461                switch (basic.key->ip_proto) {
 462                case IPPROTO_TCP:
 463                case IPPROTO_UDP:
 464                case IPPROTO_SCTP:
 465                case IPPROTO_ICMP:
 466                case IPPROTO_ICMPV6:
 467                        key_layer |= NFP_FLOWER_LAYER_TP;
 468                        key_size += sizeof(struct nfp_flower_tp_ports);
 469                        break;
 470                }
 471        }
 472
 473        if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
 474            nfp_flower_check_higher_than_l3(flow)) {
 475                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
 476                return -EOPNOTSUPP;
 477        }
 478
 479        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
 480                struct flow_match_tcp tcp;
 481                u32 tcp_flags;
 482
 483                flow_rule_match_tcp(rule, &tcp);
 484                tcp_flags = be16_to_cpu(tcp.key->flags);
 485
 486                if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
 487                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
 488                        return -EOPNOTSUPP;
 489                }
 490
 491                /* We only support PSH and URG flags when either
 492                 * FIN, SYN or RST is present as well.
 493                 */
 494                if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
 495                    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
 496                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
 497                        return -EOPNOTSUPP;
 498                }
 499
 500                /* We need to store TCP flags in the either the IPv4 or IPv6 key
 501                 * space, thus we need to ensure we include a IPv4/IPv6 key
 502                 * layer if we have not done so already.
 503                 */
 504                if (!basic.key) {
 505                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
 506                        return -EOPNOTSUPP;
 507                }
 508
 509                if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
 510                    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
 511                        switch (basic.key->n_proto) {
 512                        case cpu_to_be16(ETH_P_IP):
 513                                key_layer |= NFP_FLOWER_LAYER_IPV4;
 514                                key_size += sizeof(struct nfp_flower_ipv4);
 515                                break;
 516
 517                        case cpu_to_be16(ETH_P_IPV6):
 518                                        key_layer |= NFP_FLOWER_LAYER_IPV6;
 519                                key_size += sizeof(struct nfp_flower_ipv6);
 520                                break;
 521
 522                        default:
 523                                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
 524                                return -EOPNOTSUPP;
 525                        }
 526                }
 527        }
 528
 529        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
 530                struct flow_match_control ctl;
 531
 532                flow_rule_match_control(rule, &ctl);
 533                if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
 534                        NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
 535                        return -EOPNOTSUPP;
 536                }
 537        }
 538
 539        ret_key_ls->key_layer = key_layer;
 540        ret_key_ls->key_layer_two = key_layer_two;
 541        ret_key_ls->key_size = key_size;
 542
 543        return 0;
 544}
 545
 546static struct nfp_fl_payload *
 547nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
 548{
 549        struct nfp_fl_payload *flow_pay;
 550
 551        flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
 552        if (!flow_pay)
 553                return NULL;
 554
 555        flow_pay->meta.key_len = key_layer->key_size;
 556        flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
 557        if (!flow_pay->unmasked_data)
 558                goto err_free_flow;
 559
 560        flow_pay->meta.mask_len = key_layer->key_size;
 561        flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
 562        if (!flow_pay->mask_data)
 563                goto err_free_unmasked;
 564
 565        flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
 566        if (!flow_pay->action_data)
 567                goto err_free_mask;
 568
 569        flow_pay->nfp_tun_ipv4_addr = 0;
 570        flow_pay->nfp_tun_ipv6 = NULL;
 571        flow_pay->meta.flags = 0;
 572        INIT_LIST_HEAD(&flow_pay->linked_flows);
 573        flow_pay->in_hw = false;
 574        flow_pay->pre_tun_rule.dev = NULL;
 575
 576        return flow_pay;
 577
 578err_free_mask:
 579        kfree(flow_pay->mask_data);
 580err_free_unmasked:
 581        kfree(flow_pay->unmasked_data);
 582err_free_flow:
 583        kfree(flow_pay);
 584        return NULL;
 585}
 586
 587static int
 588nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
 589                                     struct nfp_flower_merge_check *merge,
 590                                     u8 *last_act_id, int *act_out)
 591{
 592        struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
 593        struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
 594        struct nfp_fl_set_ip4_addrs *ipv4_add;
 595        struct nfp_fl_set_ipv6_addr *ipv6_add;
 596        struct nfp_fl_push_vlan *push_vlan;
 597        struct nfp_fl_pre_tunnel *pre_tun;
 598        struct nfp_fl_set_tport *tport;
 599        struct nfp_fl_set_eth *eth;
 600        struct nfp_fl_act_head *a;
 601        unsigned int act_off = 0;
 602        bool ipv6_tun = false;
 603        u8 act_id = 0;
 604        u8 *ports;
 605        int i;
 606
 607        while (act_off < flow->meta.act_len) {
 608                a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
 609                act_id = a->jump_id;
 610
 611                switch (act_id) {
 612                case NFP_FL_ACTION_OPCODE_OUTPUT:
 613                        if (act_out)
 614                                (*act_out)++;
 615                        break;
 616                case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
 617                        push_vlan = (struct nfp_fl_push_vlan *)a;
 618                        if (push_vlan->vlan_tci)
 619                                merge->tci = cpu_to_be16(0xffff);
 620                        break;
 621                case NFP_FL_ACTION_OPCODE_POP_VLAN:
 622                        merge->tci = cpu_to_be16(0);
 623                        break;
 624                case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
 625                        /* New tunnel header means l2 to l4 can be matched. */
 626                        eth_broadcast_addr(&merge->l2.mac_dst[0]);
 627                        eth_broadcast_addr(&merge->l2.mac_src[0]);
 628                        memset(&merge->l4, 0xff,
 629                               sizeof(struct nfp_flower_tp_ports));
 630                        if (ipv6_tun)
 631                                memset(&merge->ipv6, 0xff,
 632                                       sizeof(struct nfp_flower_ipv6));
 633                        else
 634                                memset(&merge->ipv4, 0xff,
 635                                       sizeof(struct nfp_flower_ipv4));
 636                        break;
 637                case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
 638                        eth = (struct nfp_fl_set_eth *)a;
 639                        for (i = 0; i < ETH_ALEN; i++)
 640                                merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
 641                        for (i = 0; i < ETH_ALEN; i++)
 642                                merge->l2.mac_src[i] |=
 643                                        eth->eth_addr_mask[ETH_ALEN + i];
 644                        break;
 645                case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
 646                        ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
 647                        merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
 648                        merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
 649                        break;
 650                case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
 651                        ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
 652                        merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
 653                        merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
 654                        break;
 655                case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
 656                        ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
 657                        for (i = 0; i < 4; i++)
 658                                merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
 659                                        ipv6_add->ipv6[i].mask;
 660                        break;
 661                case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
 662                        ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
 663                        for (i = 0; i < 4; i++)
 664                                merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
 665                                        ipv6_add->ipv6[i].mask;
 666                        break;
 667                case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
 668                        ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
 669                        merge->ipv6.ip_ext.ttl |=
 670                                ipv6_tc_hl_fl->ipv6_hop_limit_mask;
 671                        merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
 672                        merge->ipv6.ipv6_flow_label_exthdr |=
 673                                ipv6_tc_hl_fl->ipv6_label_mask;
 674                        break;
 675                case NFP_FL_ACTION_OPCODE_SET_UDP:
 676                case NFP_FL_ACTION_OPCODE_SET_TCP:
 677                        tport = (struct nfp_fl_set_tport *)a;
 678                        ports = (u8 *)&merge->l4.port_src;
 679                        for (i = 0; i < 4; i++)
 680                                ports[i] |= tport->tp_port_mask[i];
 681                        break;
 682                case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
 683                        pre_tun = (struct nfp_fl_pre_tunnel *)a;
 684                        ipv6_tun = be16_to_cpu(pre_tun->flags) &
 685                                        NFP_FL_PRE_TUN_IPV6;
 686                        break;
 687                case NFP_FL_ACTION_OPCODE_PRE_LAG:
 688                case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
 689                        break;
 690                default:
 691                        return -EOPNOTSUPP;
 692                }
 693
 694                act_off += a->len_lw << NFP_FL_LW_SIZ;
 695        }
 696
 697        if (last_act_id)
 698                *last_act_id = act_id;
 699
 700        return 0;
 701}
 702
 703static int
 704nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
 705                                struct nfp_flower_merge_check *merge,
 706                                bool extra_fields)
 707{
 708        struct nfp_flower_meta_tci *meta_tci;
 709        u8 *mask = flow->mask_data;
 710        u8 key_layer, match_size;
 711
 712        memset(merge, 0, sizeof(struct nfp_flower_merge_check));
 713
 714        meta_tci = (struct nfp_flower_meta_tci *)mask;
 715        key_layer = meta_tci->nfp_flow_key_layer;
 716
 717        if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
 718                return -EOPNOTSUPP;
 719
 720        merge->tci = meta_tci->tci;
 721        mask += sizeof(struct nfp_flower_meta_tci);
 722
 723        if (key_layer & NFP_FLOWER_LAYER_EXT_META)
 724                mask += sizeof(struct nfp_flower_ext_meta);
 725
 726        mask += sizeof(struct nfp_flower_in_port);
 727
 728        if (key_layer & NFP_FLOWER_LAYER_MAC) {
 729                match_size = sizeof(struct nfp_flower_mac_mpls);
 730                memcpy(&merge->l2, mask, match_size);
 731                mask += match_size;
 732        }
 733
 734        if (key_layer & NFP_FLOWER_LAYER_TP) {
 735                match_size = sizeof(struct nfp_flower_tp_ports);
 736                memcpy(&merge->l4, mask, match_size);
 737                mask += match_size;
 738        }
 739
 740        if (key_layer & NFP_FLOWER_LAYER_IPV4) {
 741                match_size = sizeof(struct nfp_flower_ipv4);
 742                memcpy(&merge->ipv4, mask, match_size);
 743        }
 744
 745        if (key_layer & NFP_FLOWER_LAYER_IPV6) {
 746                match_size = sizeof(struct nfp_flower_ipv6);
 747                memcpy(&merge->ipv6, mask, match_size);
 748        }
 749
 750        return 0;
 751}
 752
 753static int
 754nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
 755                     struct nfp_fl_payload *sub_flow2)
 756{
 757        /* Two flows can be merged if sub_flow2 only matches on bits that are
 758         * either matched by sub_flow1 or set by a sub_flow1 action. This
 759         * ensures that every packet that hits sub_flow1 and recirculates is
 760         * guaranteed to hit sub_flow2.
 761         */
 762        struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
 763        int err, act_out = 0;
 764        u8 last_act_id = 0;
 765
 766        err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
 767                                              true);
 768        if (err)
 769                return err;
 770
 771        err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
 772                                              false);
 773        if (err)
 774                return err;
 775
 776        err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
 777                                                   &last_act_id, &act_out);
 778        if (err)
 779                return err;
 780
 781        /* Must only be 1 output action and it must be the last in sequence. */
 782        if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
 783                return -EOPNOTSUPP;
 784
 785        /* Reject merge if sub_flow2 matches on something that is not matched
 786         * on or set in an action by sub_flow1.
 787         */
 788        err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
 789                            sub_flow1_merge.vals,
 790                            sizeof(struct nfp_flower_merge_check) * 8);
 791        if (err)
 792                return -EINVAL;
 793
 794        return 0;
 795}
 796
 797static unsigned int
 798nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
 799                            bool *tunnel_act)
 800{
 801        unsigned int act_off = 0, act_len;
 802        struct nfp_fl_act_head *a;
 803        u8 act_id = 0;
 804
 805        while (act_off < len) {
 806                a = (struct nfp_fl_act_head *)&act_src[act_off];
 807                act_len = a->len_lw << NFP_FL_LW_SIZ;
 808                act_id = a->jump_id;
 809
 810                switch (act_id) {
 811                case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
 812                        if (tunnel_act)
 813                                *tunnel_act = true;
 814                        fallthrough;
 815                case NFP_FL_ACTION_OPCODE_PRE_LAG:
 816                        memcpy(act_dst + act_off, act_src + act_off, act_len);
 817                        break;
 818                default:
 819                        return act_off;
 820                }
 821
 822                act_off += act_len;
 823        }
 824
 825        return act_off;
 826}
 827
 828static int
 829nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
 830{
 831        struct nfp_fl_act_head *a;
 832        unsigned int act_off = 0;
 833
 834        while (act_off < len) {
 835                a = (struct nfp_fl_act_head *)&acts[act_off];
 836
 837                if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
 838                        *vlan = (struct nfp_fl_push_vlan *)a;
 839                else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
 840                        return -EOPNOTSUPP;
 841
 842                act_off += a->len_lw << NFP_FL_LW_SIZ;
 843        }
 844
 845        /* Ensure any VLAN push also has an egress action. */
 846        if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
 847                return -EOPNOTSUPP;
 848
 849        return 0;
 850}
 851
 852static int
 853nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
 854{
 855        struct nfp_fl_set_tun *tun;
 856        struct nfp_fl_act_head *a;
 857        unsigned int act_off = 0;
 858
 859        while (act_off < len) {
 860                a = (struct nfp_fl_act_head *)&acts[act_off];
 861
 862                if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
 863                        tun = (struct nfp_fl_set_tun *)a;
 864                        tun->outer_vlan_tpid = vlan->vlan_tpid;
 865                        tun->outer_vlan_tci = vlan->vlan_tci;
 866
 867                        return 0;
 868                }
 869
 870                act_off += a->len_lw << NFP_FL_LW_SIZ;
 871        }
 872
 873        /* Return error if no tunnel action is found. */
 874        return -EOPNOTSUPP;
 875}
 876
 877static int
 878nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
 879                        struct nfp_fl_payload *sub_flow2,
 880                        struct nfp_fl_payload *merge_flow)
 881{
 882        unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
 883        struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
 884        bool tunnel_act = false;
 885        char *merge_act;
 886        int err;
 887
 888        /* The last action of sub_flow1 must be output - do not merge this. */
 889        sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
 890        sub2_act_len = sub_flow2->meta.act_len;
 891
 892        if (!sub2_act_len)
 893                return -EINVAL;
 894
 895        if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
 896                return -EINVAL;
 897
 898        /* A shortcut can only be applied if there is a single action. */
 899        if (sub1_act_len)
 900                merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 901        else
 902                merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
 903
 904        merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
 905        merge_act = merge_flow->action_data;
 906
 907        /* Copy any pre-actions to the start of merge flow action list. */
 908        pre_off1 = nfp_flower_copy_pre_actions(merge_act,
 909                                               sub_flow1->action_data,
 910                                               sub1_act_len, &tunnel_act);
 911        merge_act += pre_off1;
 912        sub1_act_len -= pre_off1;
 913        pre_off2 = nfp_flower_copy_pre_actions(merge_act,
 914                                               sub_flow2->action_data,
 915                                               sub2_act_len, NULL);
 916        merge_act += pre_off2;
 917        sub2_act_len -= pre_off2;
 918
 919        /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
 920         * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
 921         * valid merge.
 922         */
 923        if (tunnel_act) {
 924                char *post_tun_acts = &sub_flow2->action_data[pre_off2];
 925
 926                err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
 927                                                  &post_tun_push_vlan);
 928                if (err)
 929                        return err;
 930
 931                if (post_tun_push_vlan) {
 932                        pre_off2 += sizeof(*post_tun_push_vlan);
 933                        sub2_act_len -= sizeof(*post_tun_push_vlan);
 934                }
 935        }
 936
 937        /* Copy remaining actions from sub_flows 1 and 2. */
 938        memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
 939
 940        if (post_tun_push_vlan) {
 941                /* Update tunnel action in merge to include VLAN push. */
 942                err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
 943                                                 post_tun_push_vlan);
 944                if (err)
 945                        return err;
 946
 947                merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
 948        }
 949
 950        merge_act += sub1_act_len;
 951        memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
 952
 953        return 0;
 954}
 955
 956/* Flow link code should only be accessed under RTNL. */
 957static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
 958{
 959        list_del(&link->merge_flow.list);
 960        list_del(&link->sub_flow.list);
 961        kfree(link);
 962}
 963
 964static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
 965                                    struct nfp_fl_payload *sub_flow)
 966{
 967        struct nfp_fl_payload_link *link;
 968
 969        list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
 970                if (link->sub_flow.flow == sub_flow) {
 971                        nfp_flower_unlink_flow(link);
 972                        return;
 973                }
 974}
 975
 976static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
 977                                 struct nfp_fl_payload *sub_flow)
 978{
 979        struct nfp_fl_payload_link *link;
 980
 981        link = kmalloc(sizeof(*link), GFP_KERNEL);
 982        if (!link)
 983                return -ENOMEM;
 984
 985        link->merge_flow.flow = merge_flow;
 986        list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
 987        link->sub_flow.flow = sub_flow;
 988        list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
 989
 990        return 0;
 991}
 992
 993/**
 994 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
 995 * @app:        Pointer to the APP handle
 996 * @sub_flow1:  Initial flow matched to produce merge hint
 997 * @sub_flow2:  Post recirculation flow matched in merge hint
 998 *
 999 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
1000 * and offloading the new, merged flow.
1001 *
1002 * Return: negative value on error, 0 in success.
1003 */
1004int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1005                                     struct nfp_fl_payload *sub_flow1,
1006                                     struct nfp_fl_payload *sub_flow2)
1007{
1008        struct flow_cls_offload merge_tc_off;
1009        struct nfp_flower_priv *priv = app->priv;
1010        struct netlink_ext_ack *extack = NULL;
1011        struct nfp_fl_payload *merge_flow;
1012        struct nfp_fl_key_ls merge_key_ls;
1013        struct nfp_merge_info *merge_info;
1014        u64 parent_ctx = 0;
1015        int err;
1016
1017        ASSERT_RTNL();
1018
1019        extack = merge_tc_off.common.extack;
1020        if (sub_flow1 == sub_flow2 ||
1021            nfp_flower_is_merge_flow(sub_flow1) ||
1022            nfp_flower_is_merge_flow(sub_flow2))
1023                return -EINVAL;
1024
1025        /* check if the two flows are already merged */
1026        parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
1027        parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
1028        if (rhashtable_lookup_fast(&priv->merge_table,
1029                                   &parent_ctx, merge_table_params)) {
1030                nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
1031                return 0;
1032        }
1033
1034        err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1035        if (err)
1036                return err;
1037
1038        merge_key_ls.key_size = sub_flow1->meta.key_len;
1039
1040        merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1041        if (!merge_flow)
1042                return -ENOMEM;
1043
1044        merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1045        merge_flow->ingress_dev = sub_flow1->ingress_dev;
1046
1047        memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1048               sub_flow1->meta.key_len);
1049        memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1050               sub_flow1->meta.mask_len);
1051
1052        err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1053        if (err)
1054                goto err_destroy_merge_flow;
1055
1056        err = nfp_flower_link_flows(merge_flow, sub_flow1);
1057        if (err)
1058                goto err_destroy_merge_flow;
1059
1060        err = nfp_flower_link_flows(merge_flow, sub_flow2);
1061        if (err)
1062                goto err_unlink_sub_flow1;
1063
1064        merge_tc_off.cookie = merge_flow->tc_flower_cookie;
1065        err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
1066                                        merge_flow->ingress_dev, extack);
1067        if (err)
1068                goto err_unlink_sub_flow2;
1069
1070        err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1071                                     nfp_flower_table_params);
1072        if (err)
1073                goto err_release_metadata;
1074
1075        merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
1076        if (!merge_info) {
1077                err = -ENOMEM;
1078                goto err_remove_rhash;
1079        }
1080        merge_info->parent_ctx = parent_ctx;
1081        err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
1082                                     merge_table_params);
1083        if (err)
1084                goto err_destroy_merge_info;
1085
1086        err = nfp_flower_xmit_flow(app, merge_flow,
1087                                   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1088        if (err)
1089                goto err_remove_merge_info;
1090
1091        merge_flow->in_hw = true;
1092        sub_flow1->in_hw = false;
1093
1094        return 0;
1095
1096err_remove_merge_info:
1097        WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1098                                            &merge_info->ht_node,
1099                                            merge_table_params));
1100err_destroy_merge_info:
1101        kfree(merge_info);
1102err_remove_rhash:
1103        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1104                                            &merge_flow->fl_node,
1105                                            nfp_flower_table_params));
1106err_release_metadata:
1107        nfp_modify_flow_metadata(app, merge_flow);
1108err_unlink_sub_flow2:
1109        nfp_flower_unlink_flows(merge_flow, sub_flow2);
1110err_unlink_sub_flow1:
1111        nfp_flower_unlink_flows(merge_flow, sub_flow1);
1112err_destroy_merge_flow:
1113        kfree(merge_flow->action_data);
1114        kfree(merge_flow->mask_data);
1115        kfree(merge_flow->unmasked_data);
1116        kfree(merge_flow);
1117        return err;
1118}
1119
1120/**
1121 * nfp_flower_validate_pre_tun_rule()
1122 * @app:        Pointer to the APP handle
1123 * @flow:       Pointer to NFP flow representation of rule
1124 * @key_ls:     Pointer to NFP key layers structure
1125 * @extack:     Netlink extended ACK report
1126 *
1127 * Verifies the flow as a pre-tunnel rule.
1128 *
1129 * Return: negative value on error, 0 if verified.
1130 */
1131static int
1132nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1133                                 struct nfp_fl_payload *flow,
1134                                 struct nfp_fl_key_ls *key_ls,
1135                                 struct netlink_ext_ack *extack)
1136{
1137        struct nfp_flower_priv *priv = app->priv;
1138        struct nfp_flower_meta_tci *meta_tci;
1139        struct nfp_flower_mac_mpls *mac;
1140        u8 *ext = flow->unmasked_data;
1141        struct nfp_fl_act_head *act;
1142        u8 *mask = flow->mask_data;
1143        bool vlan = false;
1144        int act_offset;
1145        u8 key_layer;
1146
1147        meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1148        key_layer = key_ls->key_layer;
1149        if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1150                if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1151                        u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1152
1153                        vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1154                        flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1155                        vlan = true;
1156                } else {
1157                        flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1158                }
1159        }
1160
1161        if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1162                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1163                return -EOPNOTSUPP;
1164        } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
1165                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1166                return -EOPNOTSUPP;
1167        }
1168
1169        if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1170                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1171                return -EOPNOTSUPP;
1172        }
1173
1174        if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
1175            !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
1176                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1177                return -EOPNOTSUPP;
1178        }
1179
1180        /* Skip fields known to exist. */
1181        mask += sizeof(struct nfp_flower_meta_tci);
1182        ext += sizeof(struct nfp_flower_meta_tci);
1183        if (key_ls->key_layer_two) {
1184                mask += sizeof(struct nfp_flower_ext_meta);
1185                ext += sizeof(struct nfp_flower_ext_meta);
1186        }
1187        mask += sizeof(struct nfp_flower_in_port);
1188        ext += sizeof(struct nfp_flower_in_port);
1189
1190        /* Ensure destination MAC address matches pre_tun_dev. */
1191        mac = (struct nfp_flower_mac_mpls *)ext;
1192        if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
1193                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1194                return -EOPNOTSUPP;
1195        }
1196
1197        /* Ensure destination MAC address is fully matched. */
1198        mac = (struct nfp_flower_mac_mpls *)mask;
1199        if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1200                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1201                return -EOPNOTSUPP;
1202        }
1203
1204        if (mac->mpls_lse) {
1205                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
1206                return -EOPNOTSUPP;
1207        }
1208
1209        mask += sizeof(struct nfp_flower_mac_mpls);
1210        ext += sizeof(struct nfp_flower_mac_mpls);
1211        if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1212            key_layer & NFP_FLOWER_LAYER_IPV6) {
1213                /* Flags and proto fields have same offset in IPv4 and IPv6. */
1214                int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1215                int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1216                int size;
1217                int i;
1218
1219                size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1220                        sizeof(struct nfp_flower_ipv4) :
1221                        sizeof(struct nfp_flower_ipv6);
1222
1223
1224                /* Ensure proto and flags are the only IP layer fields. */
1225                for (i = 0; i < size; i++)
1226                        if (mask[i] && i != ip_flags && i != ip_proto) {
1227                                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1228                                return -EOPNOTSUPP;
1229                        }
1230                ext += size;
1231                mask += size;
1232        }
1233
1234        if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1235                if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
1236                        struct nfp_flower_vlan *vlan_tags;
1237                        u16 vlan_tci;
1238
1239                        vlan_tags = (struct nfp_flower_vlan *)ext;
1240
1241                        vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
1242
1243                        vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1244                        flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1245                        vlan = true;
1246                } else {
1247                        flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1248                }
1249        }
1250
1251        /* Action must be a single egress or pop_vlan and egress. */
1252        act_offset = 0;
1253        act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1254        if (vlan) {
1255                if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1256                        NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1257                        return -EOPNOTSUPP;
1258                }
1259
1260                act_offset += act->len_lw << NFP_FL_LW_SIZ;
1261                act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1262        }
1263
1264        if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1265                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1266                return -EOPNOTSUPP;
1267        }
1268
1269        act_offset += act->len_lw << NFP_FL_LW_SIZ;
1270
1271        /* Ensure there are no more actions after egress. */
1272        if (act_offset != flow->meta.act_len) {
1273                NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1274                return -EOPNOTSUPP;
1275        }
1276
1277        return 0;
1278}
1279
1280static bool offload_pre_check(struct flow_cls_offload *flow)
1281{
1282        struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1283        struct flow_dissector *dissector = rule->match.dissector;
1284
1285        if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
1286                return false;
1287
1288        if (flow->common.chain_index)
1289                return false;
1290
1291        return true;
1292}
1293
1294/**
1295 * nfp_flower_add_offload() - Adds a new flow to hardware.
1296 * @app:        Pointer to the APP handle
1297 * @netdev:     netdev structure.
1298 * @flow:       TC flower classifier offload structure.
1299 *
1300 * Adds a new flow to the repeated hash structure and action payload.
1301 *
1302 * Return: negative value on error, 0 if configured successfully.
1303 */
1304static int
1305nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1306                       struct flow_cls_offload *flow)
1307{
1308        enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1309        struct nfp_flower_priv *priv = app->priv;
1310        struct netlink_ext_ack *extack = NULL;
1311        struct nfp_fl_payload *flow_pay;
1312        struct nfp_fl_key_ls *key_layer;
1313        struct nfp_port *port = NULL;
1314        int err;
1315
1316        extack = flow->common.extack;
1317        if (nfp_netdev_is_nfp_repr(netdev))
1318                port = nfp_port_from_netdev(netdev);
1319
1320        if (is_pre_ct_flow(flow))
1321                return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
1322
1323        if (is_post_ct_flow(flow))
1324                return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
1325
1326        if (!offload_pre_check(flow))
1327                return -EOPNOTSUPP;
1328
1329        key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1330        if (!key_layer)
1331                return -ENOMEM;
1332
1333        err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1334                                              &tun_type, extack);
1335        if (err)
1336                goto err_free_key_ls;
1337
1338        flow_pay = nfp_flower_allocate_new(key_layer);
1339        if (!flow_pay) {
1340                err = -ENOMEM;
1341                goto err_free_key_ls;
1342        }
1343
1344        err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1345                                            flow_pay, tun_type, extack);
1346        if (err)
1347                goto err_destroy_flow;
1348
1349        err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1350        if (err)
1351                goto err_destroy_flow;
1352
1353        if (flow_pay->pre_tun_rule.dev) {
1354                err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
1355                if (err)
1356                        goto err_destroy_flow;
1357        }
1358
1359        err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1360        if (err)
1361                goto err_destroy_flow;
1362
1363        flow_pay->tc_flower_cookie = flow->cookie;
1364        err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1365                                     nfp_flower_table_params);
1366        if (err) {
1367                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1368                goto err_release_metadata;
1369        }
1370
1371        if (flow_pay->pre_tun_rule.dev)
1372                err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1373        else
1374                err = nfp_flower_xmit_flow(app, flow_pay,
1375                                           NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1376        if (err)
1377                goto err_remove_rhash;
1378
1379        if (port)
1380                port->tc_offload_cnt++;
1381
1382        flow_pay->in_hw = true;
1383
1384        /* Deallocate flow payload when flower rule has been destroyed. */
1385        kfree(key_layer);
1386
1387        return 0;
1388
1389err_remove_rhash:
1390        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1391                                            &flow_pay->fl_node,
1392                                            nfp_flower_table_params));
1393err_release_metadata:
1394        nfp_modify_flow_metadata(app, flow_pay);
1395err_destroy_flow:
1396        if (flow_pay->nfp_tun_ipv6)
1397                nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1398        kfree(flow_pay->action_data);
1399        kfree(flow_pay->mask_data);
1400        kfree(flow_pay->unmasked_data);
1401        kfree(flow_pay);
1402err_free_key_ls:
1403        kfree(key_layer);
1404        return err;
1405}
1406
1407static void
1408nfp_flower_remove_merge_flow(struct nfp_app *app,
1409                             struct nfp_fl_payload *del_sub_flow,
1410                             struct nfp_fl_payload *merge_flow)
1411{
1412        struct nfp_flower_priv *priv = app->priv;
1413        struct nfp_fl_payload_link *link, *temp;
1414        struct nfp_merge_info *merge_info;
1415        struct nfp_fl_payload *origin;
1416        u64 parent_ctx = 0;
1417        bool mod = false;
1418        int err;
1419
1420        link = list_first_entry(&merge_flow->linked_flows,
1421                                struct nfp_fl_payload_link, merge_flow.list);
1422        origin = link->sub_flow.flow;
1423
1424        /* Re-add rule the merge had overwritten if it has not been deleted. */
1425        if (origin != del_sub_flow)
1426                mod = true;
1427
1428        err = nfp_modify_flow_metadata(app, merge_flow);
1429        if (err) {
1430                nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1431                goto err_free_links;
1432        }
1433
1434        if (!mod) {
1435                err = nfp_flower_xmit_flow(app, merge_flow,
1436                                           NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1437                if (err) {
1438                        nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1439                        goto err_free_links;
1440                }
1441        } else {
1442                __nfp_modify_flow_metadata(priv, origin);
1443                err = nfp_flower_xmit_flow(app, origin,
1444                                           NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1445                if (err)
1446                        nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1447                origin->in_hw = true;
1448        }
1449
1450err_free_links:
1451        /* Clean any links connected with the merged flow. */
1452        list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1453                                 merge_flow.list) {
1454                u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
1455
1456                parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1457                nfp_flower_unlink_flow(link);
1458        }
1459
1460        merge_info = rhashtable_lookup_fast(&priv->merge_table,
1461                                            &parent_ctx,
1462                                            merge_table_params);
1463        if (merge_info) {
1464                WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1465                                                    &merge_info->ht_node,
1466                                                    merge_table_params));
1467                kfree(merge_info);
1468        }
1469
1470        kfree(merge_flow->action_data);
1471        kfree(merge_flow->mask_data);
1472        kfree(merge_flow->unmasked_data);
1473        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1474                                            &merge_flow->fl_node,
1475                                            nfp_flower_table_params));
1476        kfree_rcu(merge_flow, rcu);
1477}
1478
1479static void
1480nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1481                                  struct nfp_fl_payload *sub_flow)
1482{
1483        struct nfp_fl_payload_link *link, *temp;
1484
1485        /* Remove any merge flow formed from the deleted sub_flow. */
1486        list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1487                                 sub_flow.list)
1488                nfp_flower_remove_merge_flow(app, sub_flow,
1489                                             link->merge_flow.flow);
1490}
1491
1492/**
1493 * nfp_flower_del_offload() - Removes a flow from hardware.
1494 * @app:        Pointer to the APP handle
1495 * @netdev:     netdev structure.
1496 * @flow:       TC flower classifier offload structure
1497 *
1498 * Removes a flow from the repeated hash structure and clears the
1499 * action payload. Any flows merged from this are also deleted.
1500 *
1501 * Return: negative value on error, 0 if removed successfully.
1502 */
1503static int
1504nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1505                       struct flow_cls_offload *flow)
1506{
1507        struct nfp_flower_priv *priv = app->priv;
1508        struct nfp_fl_ct_map_entry *ct_map_ent;
1509        struct netlink_ext_ack *extack = NULL;
1510        struct nfp_fl_payload *nfp_flow;
1511        struct nfp_port *port = NULL;
1512        int err;
1513
1514        extack = flow->common.extack;
1515        if (nfp_netdev_is_nfp_repr(netdev))
1516                port = nfp_port_from_netdev(netdev);
1517
1518        /* Check ct_map_table */
1519        ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
1520                                            nfp_ct_map_params);
1521        if (ct_map_ent) {
1522                err = nfp_fl_ct_del_flow(ct_map_ent);
1523                return err;
1524        }
1525
1526        nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1527        if (!nfp_flow) {
1528                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1529                return -ENOENT;
1530        }
1531
1532        err = nfp_modify_flow_metadata(app, nfp_flow);
1533        if (err)
1534                goto err_free_merge_flow;
1535
1536        if (nfp_flow->nfp_tun_ipv4_addr)
1537                nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1538
1539        if (nfp_flow->nfp_tun_ipv6)
1540                nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1541
1542        if (!nfp_flow->in_hw) {
1543                err = 0;
1544                goto err_free_merge_flow;
1545        }
1546
1547        if (nfp_flow->pre_tun_rule.dev)
1548                err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1549        else
1550                err = nfp_flower_xmit_flow(app, nfp_flow,
1551                                           NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1552        /* Fall through on error. */
1553
1554err_free_merge_flow:
1555        nfp_flower_del_linked_merge_flows(app, nfp_flow);
1556        if (port)
1557                port->tc_offload_cnt--;
1558        kfree(nfp_flow->action_data);
1559        kfree(nfp_flow->mask_data);
1560        kfree(nfp_flow->unmasked_data);
1561        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1562                                            &nfp_flow->fl_node,
1563                                            nfp_flower_table_params));
1564        kfree_rcu(nfp_flow, rcu);
1565        return err;
1566}
1567
1568static void
1569__nfp_flower_update_merge_stats(struct nfp_app *app,
1570                                struct nfp_fl_payload *merge_flow)
1571{
1572        struct nfp_flower_priv *priv = app->priv;
1573        struct nfp_fl_payload_link *link;
1574        struct nfp_fl_payload *sub_flow;
1575        u64 pkts, bytes, used;
1576        u32 ctx_id;
1577
1578        ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1579        pkts = priv->stats[ctx_id].pkts;
1580        /* Do not cycle subflows if no stats to distribute. */
1581        if (!pkts)
1582                return;
1583        bytes = priv->stats[ctx_id].bytes;
1584        used = priv->stats[ctx_id].used;
1585
1586        /* Reset stats for the merge flow. */
1587        priv->stats[ctx_id].pkts = 0;
1588        priv->stats[ctx_id].bytes = 0;
1589
1590        /* The merge flow has received stats updates from firmware.
1591         * Distribute these stats to all subflows that form the merge.
1592         * The stats will collected from TC via the subflows.
1593         */
1594        list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1595                sub_flow = link->sub_flow.flow;
1596                ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1597                priv->stats[ctx_id].pkts += pkts;
1598                priv->stats[ctx_id].bytes += bytes;
1599                priv->stats[ctx_id].used = max_t(u64, used,
1600                                                 priv->stats[ctx_id].used);
1601        }
1602}
1603
1604static void
1605nfp_flower_update_merge_stats(struct nfp_app *app,
1606                              struct nfp_fl_payload *sub_flow)
1607{
1608        struct nfp_fl_payload_link *link;
1609
1610        /* Get merge flows that the subflow forms to distribute their stats. */
1611        list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1612                __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1613}
1614
1615/**
1616 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1617 * @app:        Pointer to the APP handle
1618 * @netdev:     Netdev structure.
1619 * @flow:       TC flower classifier offload structure
1620 *
1621 * Populates a flow statistics structure which which corresponds to a
1622 * specific flow.
1623 *
1624 * Return: negative value on error, 0 if stats populated successfully.
1625 */
1626static int
1627nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1628                     struct flow_cls_offload *flow)
1629{
1630        struct nfp_flower_priv *priv = app->priv;
1631        struct netlink_ext_ack *extack = NULL;
1632        struct nfp_fl_payload *nfp_flow;
1633        u32 ctx_id;
1634
1635        extack = flow->common.extack;
1636        nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1637        if (!nfp_flow) {
1638                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1639                return -EINVAL;
1640        }
1641
1642        ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1643
1644        spin_lock_bh(&priv->stats_lock);
1645        /* If request is for a sub_flow, update stats from merged flows. */
1646        if (!list_empty(&nfp_flow->linked_flows))
1647                nfp_flower_update_merge_stats(app, nfp_flow);
1648
1649        flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1650                          priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1651                          FLOW_ACTION_HW_STATS_DELAYED);
1652
1653        priv->stats[ctx_id].pkts = 0;
1654        priv->stats[ctx_id].bytes = 0;
1655        spin_unlock_bh(&priv->stats_lock);
1656
1657        return 0;
1658}
1659
1660static int
1661nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1662                        struct flow_cls_offload *flower)
1663{
1664        if (!eth_proto_is_802_3(flower->common.protocol))
1665                return -EOPNOTSUPP;
1666
1667        switch (flower->command) {
1668        case FLOW_CLS_REPLACE:
1669                return nfp_flower_add_offload(app, netdev, flower);
1670        case FLOW_CLS_DESTROY:
1671                return nfp_flower_del_offload(app, netdev, flower);
1672        case FLOW_CLS_STATS:
1673                return nfp_flower_get_stats(app, netdev, flower);
1674        default:
1675                return -EOPNOTSUPP;
1676        }
1677}
1678
1679static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1680                                        void *type_data, void *cb_priv)
1681{
1682        struct flow_cls_common_offload *common = type_data;
1683        struct nfp_repr *repr = cb_priv;
1684
1685        if (!tc_can_offload_extack(repr->netdev, common->extack))
1686                return -EOPNOTSUPP;
1687
1688        switch (type) {
1689        case TC_SETUP_CLSFLOWER:
1690                return nfp_flower_repr_offload(repr->app, repr->netdev,
1691                                               type_data);
1692        case TC_SETUP_CLSMATCHALL:
1693                return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1694                                                    type_data);
1695        default:
1696                return -EOPNOTSUPP;
1697        }
1698}
1699
1700static LIST_HEAD(nfp_block_cb_list);
1701
1702static int nfp_flower_setup_tc_block(struct net_device *netdev,
1703                                     struct flow_block_offload *f)
1704{
1705        struct nfp_repr *repr = netdev_priv(netdev);
1706        struct nfp_flower_repr_priv *repr_priv;
1707        struct flow_block_cb *block_cb;
1708
1709        if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1710                return -EOPNOTSUPP;
1711
1712        repr_priv = repr->app_priv;
1713        repr_priv->block_shared = f->block_shared;
1714        f->driver_block_list = &nfp_block_cb_list;
1715
1716        switch (f->command) {
1717        case FLOW_BLOCK_BIND:
1718                if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1719                                          &nfp_block_cb_list))
1720                        return -EBUSY;
1721
1722                block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1723                                               repr, repr, NULL);
1724                if (IS_ERR(block_cb))
1725                        return PTR_ERR(block_cb);
1726
1727                flow_block_cb_add(block_cb, f);
1728                list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1729                return 0;
1730        case FLOW_BLOCK_UNBIND:
1731                block_cb = flow_block_cb_lookup(f->block,
1732                                                nfp_flower_setup_tc_block_cb,
1733                                                repr);
1734                if (!block_cb)
1735                        return -ENOENT;
1736
1737                flow_block_cb_remove(block_cb, f);
1738                list_del(&block_cb->driver_list);
1739                return 0;
1740        default:
1741                return -EOPNOTSUPP;
1742        }
1743}
1744
1745int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1746                        enum tc_setup_type type, void *type_data)
1747{
1748        switch (type) {
1749        case TC_SETUP_BLOCK:
1750                return nfp_flower_setup_tc_block(netdev, type_data);
1751        default:
1752                return -EOPNOTSUPP;
1753        }
1754}
1755
1756struct nfp_flower_indr_block_cb_priv {
1757        struct net_device *netdev;
1758        struct nfp_app *app;
1759        struct list_head list;
1760};
1761
1762static struct nfp_flower_indr_block_cb_priv *
1763nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1764                                     struct net_device *netdev)
1765{
1766        struct nfp_flower_indr_block_cb_priv *cb_priv;
1767        struct nfp_flower_priv *priv = app->priv;
1768
1769        /* All callback list access should be protected by RTNL. */
1770        ASSERT_RTNL();
1771
1772        list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1773                if (cb_priv->netdev == netdev)
1774                        return cb_priv;
1775
1776        return NULL;
1777}
1778
1779static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1780                                          void *type_data, void *cb_priv)
1781{
1782        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1783
1784        switch (type) {
1785        case TC_SETUP_CLSFLOWER:
1786                return nfp_flower_repr_offload(priv->app, priv->netdev,
1787                                               type_data);
1788        default:
1789                return -EOPNOTSUPP;
1790        }
1791}
1792
1793void nfp_flower_setup_indr_tc_release(void *cb_priv)
1794{
1795        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1796
1797        list_del(&priv->list);
1798        kfree(priv);
1799}
1800
1801static int
1802nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1803                               struct flow_block_offload *f, void *data,
1804                               void (*cleanup)(struct flow_block_cb *block_cb))
1805{
1806        struct nfp_flower_indr_block_cb_priv *cb_priv;
1807        struct nfp_flower_priv *priv = app->priv;
1808        struct flow_block_cb *block_cb;
1809
1810        if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1811             !nfp_flower_internal_port_can_offload(app, netdev)) ||
1812            (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1813             nfp_flower_internal_port_can_offload(app, netdev)))
1814                return -EOPNOTSUPP;
1815
1816        switch (f->command) {
1817        case FLOW_BLOCK_BIND:
1818                cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1819                if (cb_priv &&
1820                    flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1821                                          cb_priv,
1822                                          &nfp_block_cb_list))
1823                        return -EBUSY;
1824
1825                cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1826                if (!cb_priv)
1827                        return -ENOMEM;
1828
1829                cb_priv->netdev = netdev;
1830                cb_priv->app = app;
1831                list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1832
1833                block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1834                                                    cb_priv, cb_priv,
1835                                                    nfp_flower_setup_indr_tc_release,
1836                                                    f, netdev, sch, data, app, cleanup);
1837                if (IS_ERR(block_cb)) {
1838                        list_del(&cb_priv->list);
1839                        kfree(cb_priv);
1840                        return PTR_ERR(block_cb);
1841                }
1842
1843                flow_block_cb_add(block_cb, f);
1844                list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1845                return 0;
1846        case FLOW_BLOCK_UNBIND:
1847                cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1848                if (!cb_priv)
1849                        return -ENOENT;
1850
1851                block_cb = flow_block_cb_lookup(f->block,
1852                                                nfp_flower_setup_indr_block_cb,
1853                                                cb_priv);
1854                if (!block_cb)
1855                        return -ENOENT;
1856
1857                flow_indr_block_cb_remove(block_cb, f);
1858                list_del(&block_cb->driver_list);
1859                return 0;
1860        default:
1861                return -EOPNOTSUPP;
1862        }
1863        return 0;
1864}
1865
1866int
1867nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1868                            enum tc_setup_type type, void *type_data,
1869                            void *data,
1870                            void (*cleanup)(struct flow_block_cb *block_cb))
1871{
1872        if (!nfp_fl_is_netdev_to_offload(netdev))
1873                return -EOPNOTSUPP;
1874
1875        switch (type) {
1876        case TC_SETUP_BLOCK:
1877                return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1878                                                      type_data, data, cleanup);
1879        default:
1880                return -EOPNOTSUPP;
1881        }
1882}
1883