dpdk/drivers/net/sfc/sfc_flow.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 *
   3 * Copyright(c) 2019-2021 Xilinx, Inc.
   4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
   5 *
   6 * This software was jointly developed between OKTET Labs (under contract
   7 * for Solarflare) and Solarflare Communications, Inc.
   8 */
   9
  10#include <rte_byteorder.h>
  11#include <rte_tailq.h>
  12#include <rte_common.h>
  13#include <ethdev_driver.h>
  14#include <rte_ether.h>
  15#include <rte_flow.h>
  16#include <rte_flow_driver.h>
  17
  18#include "efx.h"
  19
  20#include "sfc.h"
  21#include "sfc_debug.h"
  22#include "sfc_rx.h"
  23#include "sfc_filter.h"
  24#include "sfc_flow.h"
  25#include "sfc_flow_rss.h"
  26#include "sfc_flow_tunnel.h"
  27#include "sfc_log.h"
  28#include "sfc_dp_rx.h"
  29#include "sfc_mae_counter.h"
  30#include "sfc_switch.h"
  31
  32struct sfc_flow_ops_by_spec {
  33        sfc_flow_parse_cb_t     *parse;
  34        sfc_flow_verify_cb_t    *verify;
  35        sfc_flow_cleanup_cb_t   *cleanup;
  36        sfc_flow_insert_cb_t    *insert;
  37        sfc_flow_remove_cb_t    *remove;
  38        sfc_flow_query_cb_t     *query;
  39};
  40
  41static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
  42static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
  43static sfc_flow_insert_cb_t sfc_flow_filter_insert;
  44static sfc_flow_remove_cb_t sfc_flow_filter_remove;
  45static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
  46
  47static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
  48        .parse = sfc_flow_parse_rte_to_filter,
  49        .verify = NULL,
  50        .cleanup = sfc_flow_cleanup,
  51        .insert = sfc_flow_filter_insert,
  52        .remove = sfc_flow_filter_remove,
  53        .query = NULL,
  54};
  55
  56static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
  57        .parse = sfc_flow_parse_rte_to_mae,
  58        .verify = sfc_mae_flow_verify,
  59        .cleanup = sfc_mae_flow_cleanup,
  60        .insert = sfc_mae_flow_insert,
  61        .remove = sfc_mae_flow_remove,
  62        .query = sfc_mae_flow_query,
  63};
  64
  65static const struct sfc_flow_ops_by_spec *
  66sfc_flow_get_ops_by_spec(struct rte_flow *flow)
  67{
  68        struct sfc_flow_spec *spec = &flow->spec;
  69        const struct sfc_flow_ops_by_spec *ops = NULL;
  70
  71        switch (spec->type) {
  72        case SFC_FLOW_SPEC_FILTER:
  73                ops = &sfc_flow_ops_filter;
  74                break;
  75        case SFC_FLOW_SPEC_MAE:
  76                ops = &sfc_flow_ops_mae;
  77                break;
  78        default:
  79                SFC_ASSERT(false);
  80                break;
  81        }
  82
  83        return ops;
  84}
  85
  86/*
  87 * Currently, filter-based (VNIC) flow API is implemented in such a manner
  88 * that each flow rule is converted to one or more hardware filters.
  89 * All elements of flow rule (attributes, pattern items, actions)
  90 * correspond to one or more fields in the efx_filter_spec_s structure
  91 * that is responsible for the hardware filter.
  92 * If some required field is unset in the flow rule, then a handful
  93 * of filter copies will be created to cover all possible values
  94 * of such a field.
  95 */
  96
  97static sfc_flow_item_parse sfc_flow_parse_void;
  98static sfc_flow_item_parse sfc_flow_parse_eth;
  99static sfc_flow_item_parse sfc_flow_parse_vlan;
 100static sfc_flow_item_parse sfc_flow_parse_ipv4;
 101static sfc_flow_item_parse sfc_flow_parse_ipv6;
 102static sfc_flow_item_parse sfc_flow_parse_tcp;
 103static sfc_flow_item_parse sfc_flow_parse_udp;
 104static sfc_flow_item_parse sfc_flow_parse_vxlan;
 105static sfc_flow_item_parse sfc_flow_parse_geneve;
 106static sfc_flow_item_parse sfc_flow_parse_nvgre;
 107static sfc_flow_item_parse sfc_flow_parse_pppoex;
 108
 109typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
 110                                     unsigned int filters_count_for_one_val,
 111                                     struct rte_flow_error *error);
 112
 113typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
 114                                        efx_filter_spec_t *spec,
 115                                        struct sfc_filter *filter);
 116
 117struct sfc_flow_copy_flag {
 118        /* EFX filter specification match flag */
 119        efx_filter_match_flags_t flag;
 120        /* Number of values of corresponding field */
 121        unsigned int vals_count;
 122        /* Function to set values in specifications */
 123        sfc_flow_spec_set_vals *set_vals;
 124        /*
 125         * Function to check that the specification is suitable
 126         * for adding this match flag
 127         */
 128        sfc_flow_spec_check *spec_check;
 129};
 130
 131static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
 132static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
 133static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
 134static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
 135static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
 136static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
 137static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
 138
 139static boolean_t
 140sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
 141{
 142        uint8_t sum = 0;
 143        unsigned int i;
 144
 145        for (i = 0; i < size; i++)
 146                sum |= buf[i];
 147
 148        return (sum == 0) ? B_TRUE : B_FALSE;
 149}
 150
 151/*
 152 * Validate item and prepare structures spec and mask for parsing
 153 */
 154int
 155sfc_flow_parse_init(const struct rte_flow_item *item,
 156                    const void **spec_ptr,
 157                    const void **mask_ptr,
 158                    const void *supp_mask,
 159                    const void *def_mask,
 160                    unsigned int size,
 161                    struct rte_flow_error *error)
 162{
 163        const uint8_t *spec;
 164        const uint8_t *mask;
 165        const uint8_t *last;
 166        uint8_t supp;
 167        unsigned int i;
 168
 169        if (item == NULL) {
 170                rte_flow_error_set(error, EINVAL,
 171                                   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 172                                   "NULL item");
 173                return -rte_errno;
 174        }
 175
 176        if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
 177                rte_flow_error_set(error, EINVAL,
 178                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 179                                   "Mask or last is set without spec");
 180                return -rte_errno;
 181        }
 182
 183        /*
 184         * If "mask" is not set, default mask is used,
 185         * but if default mask is NULL, "mask" should be set
 186         */
 187        if (item->mask == NULL) {
 188                if (def_mask == NULL) {
 189                        rte_flow_error_set(error, EINVAL,
 190                                RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 191                                "Mask should be specified");
 192                        return -rte_errno;
 193                }
 194
 195                mask = def_mask;
 196        } else {
 197                mask = item->mask;
 198        }
 199
 200        spec = item->spec;
 201        last = item->last;
 202
 203        if (spec == NULL)
 204                goto exit;
 205
 206        /*
 207         * If field values in "last" are either 0 or equal to the corresponding
 208         * values in "spec" then they are ignored
 209         */
 210        if (last != NULL &&
 211            !sfc_flow_is_zero(last, size) &&
 212            memcmp(last, spec, size) != 0) {
 213                rte_flow_error_set(error, ENOTSUP,
 214                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 215                                   "Ranging is not supported");
 216                return -rte_errno;
 217        }
 218
 219        if (supp_mask == NULL) {
 220                rte_flow_error_set(error, EINVAL,
 221                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 222                        "Supported mask for item should be specified");
 223                return -rte_errno;
 224        }
 225
 226        /* Check that mask does not ask for more match than supp_mask */
 227        for (i = 0; i < size; i++) {
 228                supp = ((const uint8_t *)supp_mask)[i];
 229
 230                if (~supp & mask[i]) {
 231                        rte_flow_error_set(error, ENOTSUP,
 232                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 233                                           "Item's field is not supported");
 234                        return -rte_errno;
 235                }
 236        }
 237
 238exit:
 239        *spec_ptr = spec;
 240        *mask_ptr = mask;
 241        return 0;
 242}
 243
 244/*
 245 * Protocol parsers.
 246 * Masking is not supported, so masks in items should be either
 247 * full or empty (zeroed) and set only for supported fields which
 248 * are specified in the supp_mask.
 249 */
 250
 251static int
 252sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
 253                    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
 254                    __rte_unused struct rte_flow_error *error)
 255{
 256        return 0;
 257}
 258
 259/**
 260 * Convert Ethernet item to EFX filter specification.
 261 *
 262 * @param item[in]
 263 *   Item specification. Outer frame specification may only comprise
 264 *   source/destination addresses and Ethertype field.
 265 *   Inner frame specification may contain destination address only.
 266 *   There is support for individual/group mask as well as for empty and full.
 267 *   If the mask is NULL, default mask will be used. Ranging is not supported.
 268 * @param efx_spec[in, out]
 269 *   EFX filter specification to update.
 270 * @param[out] error
 271 *   Perform verbose error reporting if not NULL.
 272 */
 273static int
 274sfc_flow_parse_eth(const struct rte_flow_item *item,
 275                   struct sfc_flow_parse_ctx *parse_ctx,
 276                   struct rte_flow_error *error)
 277{
 278        int rc;
 279        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 280        const struct rte_flow_item_eth *spec = NULL;
 281        const struct rte_flow_item_eth *mask = NULL;
 282        const struct rte_flow_item_eth supp_mask = {
 283                .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
 284                .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
 285                .type = 0xffff,
 286        };
 287        const struct rte_flow_item_eth ifrm_supp_mask = {
 288                .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
 289        };
 290        const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
 291                0x01, 0x00, 0x00, 0x00, 0x00, 0x00
 292        };
 293        const struct rte_flow_item_eth *supp_mask_p;
 294        const struct rte_flow_item_eth *def_mask_p;
 295        uint8_t *loc_mac = NULL;
 296        boolean_t is_ifrm = (efx_spec->efs_encap_type !=
 297                EFX_TUNNEL_PROTOCOL_NONE);
 298
 299        if (is_ifrm) {
 300                supp_mask_p = &ifrm_supp_mask;
 301                def_mask_p = &ifrm_supp_mask;
 302                loc_mac = efx_spec->efs_ifrm_loc_mac;
 303        } else {
 304                supp_mask_p = &supp_mask;
 305                def_mask_p = &rte_flow_item_eth_mask;
 306                loc_mac = efx_spec->efs_loc_mac;
 307        }
 308
 309        rc = sfc_flow_parse_init(item,
 310                                 (const void **)&spec,
 311                                 (const void **)&mask,
 312                                 supp_mask_p, def_mask_p,
 313                                 sizeof(struct rte_flow_item_eth),
 314                                 error);
 315        if (rc != 0)
 316                return rc;
 317
 318        /* If "spec" is not set, could be any Ethernet */
 319        if (spec == NULL)
 320                return 0;
 321
 322        if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
 323                efx_spec->efs_match_flags |= is_ifrm ?
 324                        EFX_FILTER_MATCH_IFRM_LOC_MAC :
 325                        EFX_FILTER_MATCH_LOC_MAC;
 326                rte_memcpy(loc_mac, spec->dst.addr_bytes,
 327                           EFX_MAC_ADDR_LEN);
 328        } else if (memcmp(mask->dst.addr_bytes, ig_mask,
 329                          EFX_MAC_ADDR_LEN) == 0) {
 330                if (rte_is_unicast_ether_addr(&spec->dst))
 331                        efx_spec->efs_match_flags |= is_ifrm ?
 332                                EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
 333                                EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
 334                else
 335                        efx_spec->efs_match_flags |= is_ifrm ?
 336                                EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
 337                                EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
 338        } else if (!rte_is_zero_ether_addr(&mask->dst)) {
 339                goto fail_bad_mask;
 340        }
 341
 342        /*
 343         * ifrm_supp_mask ensures that the source address and
 344         * ethertype masks are equal to zero in inner frame,
 345         * so these fields are filled in only for the outer frame
 346         */
 347        if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
 348                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
 349                rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
 350                           EFX_MAC_ADDR_LEN);
 351        } else if (!rte_is_zero_ether_addr(&mask->src)) {
 352                goto fail_bad_mask;
 353        }
 354
 355        /*
 356         * Ether type is in big-endian byte order in item and
 357         * in little-endian in efx_spec, so byte swap is used
 358         */
 359        if (mask->type == supp_mask.type) {
 360                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
 361                efx_spec->efs_ether_type = rte_bswap16(spec->type);
 362        } else if (mask->type != 0) {
 363                goto fail_bad_mask;
 364        }
 365
 366        return 0;
 367
 368fail_bad_mask:
 369        rte_flow_error_set(error, EINVAL,
 370                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 371                           "Bad mask in the ETH pattern item");
 372        return -rte_errno;
 373}
 374
 375/**
 376 * Convert VLAN item to EFX filter specification.
 377 *
 378 * @param item[in]
 379 *   Item specification. Only VID field is supported.
 380 *   The mask can not be NULL. Ranging is not supported.
 381 * @param efx_spec[in, out]
 382 *   EFX filter specification to update.
 383 * @param[out] error
 384 *   Perform verbose error reporting if not NULL.
 385 */
 386static int
 387sfc_flow_parse_vlan(const struct rte_flow_item *item,
 388                    struct sfc_flow_parse_ctx *parse_ctx,
 389                    struct rte_flow_error *error)
 390{
 391        int rc;
 392        uint16_t vid;
 393        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 394        const struct rte_flow_item_vlan *spec = NULL;
 395        const struct rte_flow_item_vlan *mask = NULL;
 396        const struct rte_flow_item_vlan supp_mask = {
 397                .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
 398                .inner_type = RTE_BE16(0xffff),
 399        };
 400
 401        rc = sfc_flow_parse_init(item,
 402                                 (const void **)&spec,
 403                                 (const void **)&mask,
 404                                 &supp_mask,
 405                                 NULL,
 406                                 sizeof(struct rte_flow_item_vlan),
 407                                 error);
 408        if (rc != 0)
 409                return rc;
 410
 411        /*
 412         * VID is in big-endian byte order in item and
 413         * in little-endian in efx_spec, so byte swap is used.
 414         * If two VLAN items are included, the first matches
 415         * the outer tag and the next matches the inner tag.
 416         */
 417        if (mask->tci == supp_mask.tci) {
 418                /* Apply mask to keep VID only */
 419                vid = rte_bswap16(spec->tci & mask->tci);
 420
 421                if (!(efx_spec->efs_match_flags &
 422                      EFX_FILTER_MATCH_OUTER_VID)) {
 423                        efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
 424                        efx_spec->efs_outer_vid = vid;
 425                } else if (!(efx_spec->efs_match_flags &
 426                             EFX_FILTER_MATCH_INNER_VID)) {
 427                        efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
 428                        efx_spec->efs_inner_vid = vid;
 429                } else {
 430                        rte_flow_error_set(error, EINVAL,
 431                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 432                                           "More than two VLAN items");
 433                        return -rte_errno;
 434                }
 435        } else {
 436                rte_flow_error_set(error, EINVAL,
 437                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 438                                   "VLAN ID in TCI match is required");
 439                return -rte_errno;
 440        }
 441
 442        if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
 443                rte_flow_error_set(error, EINVAL,
 444                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 445                                   "VLAN TPID matching is not supported");
 446                return -rte_errno;
 447        }
 448        if (mask->inner_type == supp_mask.inner_type) {
 449                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
 450                efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
 451        } else if (mask->inner_type) {
 452                rte_flow_error_set(error, EINVAL,
 453                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 454                                   "Bad mask for VLAN inner_type");
 455                return -rte_errno;
 456        }
 457
 458        return 0;
 459}
 460
 461/**
 462 * Convert IPv4 item to EFX filter specification.
 463 *
 464 * @param item[in]
 465 *   Item specification. Only source and destination addresses and
 466 *   protocol fields are supported. If the mask is NULL, default
 467 *   mask will be used. Ranging is not supported.
 468 * @param efx_spec[in, out]
 469 *   EFX filter specification to update.
 470 * @param[out] error
 471 *   Perform verbose error reporting if not NULL.
 472 */
 473static int
 474sfc_flow_parse_ipv4(const struct rte_flow_item *item,
 475                    struct sfc_flow_parse_ctx *parse_ctx,
 476                    struct rte_flow_error *error)
 477{
 478        int rc;
 479        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 480        const struct rte_flow_item_ipv4 *spec = NULL;
 481        const struct rte_flow_item_ipv4 *mask = NULL;
 482        const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
 483        const struct rte_flow_item_ipv4 supp_mask = {
 484                .hdr = {
 485                        .src_addr = 0xffffffff,
 486                        .dst_addr = 0xffffffff,
 487                        .next_proto_id = 0xff,
 488                }
 489        };
 490
 491        rc = sfc_flow_parse_init(item,
 492                                 (const void **)&spec,
 493                                 (const void **)&mask,
 494                                 &supp_mask,
 495                                 &rte_flow_item_ipv4_mask,
 496                                 sizeof(struct rte_flow_item_ipv4),
 497                                 error);
 498        if (rc != 0)
 499                return rc;
 500
 501        /*
 502         * Filtering by IPv4 source and destination addresses requires
 503         * the appropriate ETHER_TYPE in hardware filters
 504         */
 505        if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
 506                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
 507                efx_spec->efs_ether_type = ether_type_ipv4;
 508        } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
 509                rte_flow_error_set(error, EINVAL,
 510                        RTE_FLOW_ERROR_TYPE_ITEM, item,
 511                        "Ethertype in pattern with IPV4 item should be appropriate");
 512                return -rte_errno;
 513        }
 514
 515        if (spec == NULL)
 516                return 0;
 517
 518        /*
 519         * IPv4 addresses are in big-endian byte order in item and in
 520         * efx_spec
 521         */
 522        if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
 523                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
 524                efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
 525        } else if (mask->hdr.src_addr != 0) {
 526                goto fail_bad_mask;
 527        }
 528
 529        if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
 530                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
 531                efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
 532        } else if (mask->hdr.dst_addr != 0) {
 533                goto fail_bad_mask;
 534        }
 535
 536        if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
 537                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
 538                efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
 539        } else if (mask->hdr.next_proto_id != 0) {
 540                goto fail_bad_mask;
 541        }
 542
 543        return 0;
 544
 545fail_bad_mask:
 546        rte_flow_error_set(error, EINVAL,
 547                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 548                           "Bad mask in the IPV4 pattern item");
 549        return -rte_errno;
 550}
 551
 552/**
 553 * Convert IPv6 item to EFX filter specification.
 554 *
 555 * @param item[in]
 556 *   Item specification. Only source and destination addresses and
 557 *   next header fields are supported. If the mask is NULL, default
 558 *   mask will be used. Ranging is not supported.
 559 * @param efx_spec[in, out]
 560 *   EFX filter specification to update.
 561 * @param[out] error
 562 *   Perform verbose error reporting if not NULL.
 563 */
 564static int
 565sfc_flow_parse_ipv6(const struct rte_flow_item *item,
 566                    struct sfc_flow_parse_ctx *parse_ctx,
 567                    struct rte_flow_error *error)
 568{
 569        int rc;
 570        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 571        const struct rte_flow_item_ipv6 *spec = NULL;
 572        const struct rte_flow_item_ipv6 *mask = NULL;
 573        const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
 574        const struct rte_flow_item_ipv6 supp_mask = {
 575                .hdr = {
 576                        .src_addr = { 0xff, 0xff, 0xff, 0xff,
 577                                      0xff, 0xff, 0xff, 0xff,
 578                                      0xff, 0xff, 0xff, 0xff,
 579                                      0xff, 0xff, 0xff, 0xff },
 580                        .dst_addr = { 0xff, 0xff, 0xff, 0xff,
 581                                      0xff, 0xff, 0xff, 0xff,
 582                                      0xff, 0xff, 0xff, 0xff,
 583                                      0xff, 0xff, 0xff, 0xff },
 584                        .proto = 0xff,
 585                }
 586        };
 587
 588        rc = sfc_flow_parse_init(item,
 589                                 (const void **)&spec,
 590                                 (const void **)&mask,
 591                                 &supp_mask,
 592                                 &rte_flow_item_ipv6_mask,
 593                                 sizeof(struct rte_flow_item_ipv6),
 594                                 error);
 595        if (rc != 0)
 596                return rc;
 597
 598        /*
 599         * Filtering by IPv6 source and destination addresses requires
 600         * the appropriate ETHER_TYPE in hardware filters
 601         */
 602        if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
 603                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
 604                efx_spec->efs_ether_type = ether_type_ipv6;
 605        } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
 606                rte_flow_error_set(error, EINVAL,
 607                        RTE_FLOW_ERROR_TYPE_ITEM, item,
 608                        "Ethertype in pattern with IPV6 item should be appropriate");
 609                return -rte_errno;
 610        }
 611
 612        if (spec == NULL)
 613                return 0;
 614
 615        /*
 616         * IPv6 addresses are in big-endian byte order in item and in
 617         * efx_spec
 618         */
 619        if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
 620                   sizeof(mask->hdr.src_addr)) == 0) {
 621                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
 622
 623                RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
 624                                 sizeof(spec->hdr.src_addr));
 625                rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
 626                           sizeof(efx_spec->efs_rem_host));
 627        } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
 628                                     sizeof(mask->hdr.src_addr))) {
 629                goto fail_bad_mask;
 630        }
 631
 632        if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
 633                   sizeof(mask->hdr.dst_addr)) == 0) {
 634                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
 635
 636                RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
 637                                 sizeof(spec->hdr.dst_addr));
 638                rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
 639                           sizeof(efx_spec->efs_loc_host));
 640        } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
 641                                     sizeof(mask->hdr.dst_addr))) {
 642                goto fail_bad_mask;
 643        }
 644
 645        if (mask->hdr.proto == supp_mask.hdr.proto) {
 646                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
 647                efx_spec->efs_ip_proto = spec->hdr.proto;
 648        } else if (mask->hdr.proto != 0) {
 649                goto fail_bad_mask;
 650        }
 651
 652        return 0;
 653
 654fail_bad_mask:
 655        rte_flow_error_set(error, EINVAL,
 656                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 657                           "Bad mask in the IPV6 pattern item");
 658        return -rte_errno;
 659}
 660
 661/**
 662 * Convert TCP item to EFX filter specification.
 663 *
 664 * @param item[in]
 665 *   Item specification. Only source and destination ports fields
 666 *   are supported. If the mask is NULL, default mask will be used.
 667 *   Ranging is not supported.
 668 * @param efx_spec[in, out]
 669 *   EFX filter specification to update.
 670 * @param[out] error
 671 *   Perform verbose error reporting if not NULL.
 672 */
 673static int
 674sfc_flow_parse_tcp(const struct rte_flow_item *item,
 675                   struct sfc_flow_parse_ctx *parse_ctx,
 676                   struct rte_flow_error *error)
 677{
 678        int rc;
 679        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 680        const struct rte_flow_item_tcp *spec = NULL;
 681        const struct rte_flow_item_tcp *mask = NULL;
 682        const struct rte_flow_item_tcp supp_mask = {
 683                .hdr = {
 684                        .src_port = 0xffff,
 685                        .dst_port = 0xffff,
 686                }
 687        };
 688
 689        rc = sfc_flow_parse_init(item,
 690                                 (const void **)&spec,
 691                                 (const void **)&mask,
 692                                 &supp_mask,
 693                                 &rte_flow_item_tcp_mask,
 694                                 sizeof(struct rte_flow_item_tcp),
 695                                 error);
 696        if (rc != 0)
 697                return rc;
 698
 699        /*
 700         * Filtering by TCP source and destination ports requires
 701         * the appropriate IP_PROTO in hardware filters
 702         */
 703        if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
 704                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
 705                efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
 706        } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
 707                rte_flow_error_set(error, EINVAL,
 708                        RTE_FLOW_ERROR_TYPE_ITEM, item,
 709                        "IP proto in pattern with TCP item should be appropriate");
 710                return -rte_errno;
 711        }
 712
 713        if (spec == NULL)
 714                return 0;
 715
 716        /*
 717         * Source and destination ports are in big-endian byte order in item and
 718         * in little-endian in efx_spec, so byte swap is used
 719         */
 720        if (mask->hdr.src_port == supp_mask.hdr.src_port) {
 721                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
 722                efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
 723        } else if (mask->hdr.src_port != 0) {
 724                goto fail_bad_mask;
 725        }
 726
 727        if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
 728                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
 729                efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
 730        } else if (mask->hdr.dst_port != 0) {
 731                goto fail_bad_mask;
 732        }
 733
 734        return 0;
 735
 736fail_bad_mask:
 737        rte_flow_error_set(error, EINVAL,
 738                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 739                           "Bad mask in the TCP pattern item");
 740        return -rte_errno;
 741}
 742
 743/**
 744 * Convert UDP item to EFX filter specification.
 745 *
 746 * @param item[in]
 747 *   Item specification. Only source and destination ports fields
 748 *   are supported. If the mask is NULL, default mask will be used.
 749 *   Ranging is not supported.
 750 * @param efx_spec[in, out]
 751 *   EFX filter specification to update.
 752 * @param[out] error
 753 *   Perform verbose error reporting if not NULL.
 754 */
 755static int
 756sfc_flow_parse_udp(const struct rte_flow_item *item,
 757                   struct sfc_flow_parse_ctx *parse_ctx,
 758                   struct rte_flow_error *error)
 759{
 760        int rc;
 761        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 762        const struct rte_flow_item_udp *spec = NULL;
 763        const struct rte_flow_item_udp *mask = NULL;
 764        const struct rte_flow_item_udp supp_mask = {
 765                .hdr = {
 766                        .src_port = 0xffff,
 767                        .dst_port = 0xffff,
 768                }
 769        };
 770
 771        rc = sfc_flow_parse_init(item,
 772                                 (const void **)&spec,
 773                                 (const void **)&mask,
 774                                 &supp_mask,
 775                                 &rte_flow_item_udp_mask,
 776                                 sizeof(struct rte_flow_item_udp),
 777                                 error);
 778        if (rc != 0)
 779                return rc;
 780
 781        /*
 782         * Filtering by UDP source and destination ports requires
 783         * the appropriate IP_PROTO in hardware filters
 784         */
 785        if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
 786                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
 787                efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
 788        } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
 789                rte_flow_error_set(error, EINVAL,
 790                        RTE_FLOW_ERROR_TYPE_ITEM, item,
 791                        "IP proto in pattern with UDP item should be appropriate");
 792                return -rte_errno;
 793        }
 794
 795        if (spec == NULL)
 796                return 0;
 797
 798        /*
 799         * Source and destination ports are in big-endian byte order in item and
 800         * in little-endian in efx_spec, so byte swap is used
 801         */
 802        if (mask->hdr.src_port == supp_mask.hdr.src_port) {
 803                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
 804                efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
 805        } else if (mask->hdr.src_port != 0) {
 806                goto fail_bad_mask;
 807        }
 808
 809        if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
 810                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
 811                efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
 812        } else if (mask->hdr.dst_port != 0) {
 813                goto fail_bad_mask;
 814        }
 815
 816        return 0;
 817
 818fail_bad_mask:
 819        rte_flow_error_set(error, EINVAL,
 820                           RTE_FLOW_ERROR_TYPE_ITEM, item,
 821                           "Bad mask in the UDP pattern item");
 822        return -rte_errno;
 823}
 824
 825/*
 826 * Filters for encapsulated packets match based on the EtherType and IP
 827 * protocol in the outer frame.
 828 */
 829static int
 830sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
 831                                        efx_filter_spec_t *efx_spec,
 832                                        uint8_t ip_proto,
 833                                        struct rte_flow_error *error)
 834{
 835        if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
 836                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
 837                efx_spec->efs_ip_proto = ip_proto;
 838        } else if (efx_spec->efs_ip_proto != ip_proto) {
 839                switch (ip_proto) {
 840                case EFX_IPPROTO_UDP:
 841                        rte_flow_error_set(error, EINVAL,
 842                                RTE_FLOW_ERROR_TYPE_ITEM, item,
 843                                "Outer IP header protocol must be UDP "
 844                                "in VxLAN/GENEVE pattern");
 845                        return -rte_errno;
 846
 847                case EFX_IPPROTO_GRE:
 848                        rte_flow_error_set(error, EINVAL,
 849                                RTE_FLOW_ERROR_TYPE_ITEM, item,
 850                                "Outer IP header protocol must be GRE "
 851                                "in NVGRE pattern");
 852                        return -rte_errno;
 853
 854                default:
 855                        rte_flow_error_set(error, EINVAL,
 856                                RTE_FLOW_ERROR_TYPE_ITEM, item,
 857                                "Only VxLAN/GENEVE/NVGRE tunneling patterns "
 858                                "are supported");
 859                        return -rte_errno;
 860                }
 861        }
 862
 863        if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
 864            efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
 865            efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
 866                rte_flow_error_set(error, EINVAL,
 867                        RTE_FLOW_ERROR_TYPE_ITEM, item,
 868                        "Outer frame EtherType in pattern with tunneling "
 869                        "must be IPv4 or IPv6");
 870                return -rte_errno;
 871        }
 872
 873        return 0;
 874}
 875
 876static int
 877sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
 878                                  const uint8_t *vni_or_vsid_val,
 879                                  const uint8_t *vni_or_vsid_mask,
 880                                  const struct rte_flow_item *item,
 881                                  struct rte_flow_error *error)
 882{
 883        const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
 884                0xff, 0xff, 0xff
 885        };
 886
 887        if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
 888                   EFX_VNI_OR_VSID_LEN) == 0) {
 889                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
 890                rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
 891                           EFX_VNI_OR_VSID_LEN);
 892        } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
 893                rte_flow_error_set(error, EINVAL,
 894                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
 895                                   "Unsupported VNI/VSID mask");
 896                return -rte_errno;
 897        }
 898
 899        return 0;
 900}
 901
 902/**
 903 * Convert VXLAN item to EFX filter specification.
 904 *
 905 * @param item[in]
 906 *   Item specification. Only VXLAN network identifier field is supported.
 907 *   If the mask is NULL, default mask will be used.
 908 *   Ranging is not supported.
 909 * @param efx_spec[in, out]
 910 *   EFX filter specification to update.
 911 * @param[out] error
 912 *   Perform verbose error reporting if not NULL.
 913 */
 914static int
 915sfc_flow_parse_vxlan(const struct rte_flow_item *item,
 916                     struct sfc_flow_parse_ctx *parse_ctx,
 917                     struct rte_flow_error *error)
 918{
 919        int rc;
 920        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 921        const struct rte_flow_item_vxlan *spec = NULL;
 922        const struct rte_flow_item_vxlan *mask = NULL;
 923        const struct rte_flow_item_vxlan supp_mask = {
 924                .vni = { 0xff, 0xff, 0xff }
 925        };
 926
 927        rc = sfc_flow_parse_init(item,
 928                                 (const void **)&spec,
 929                                 (const void **)&mask,
 930                                 &supp_mask,
 931                                 &rte_flow_item_vxlan_mask,
 932                                 sizeof(struct rte_flow_item_vxlan),
 933                                 error);
 934        if (rc != 0)
 935                return rc;
 936
 937        rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
 938                                                     EFX_IPPROTO_UDP, error);
 939        if (rc != 0)
 940                return rc;
 941
 942        efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
 943        efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
 944
 945        if (spec == NULL)
 946                return 0;
 947
 948        rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
 949                                               mask->vni, item, error);
 950
 951        return rc;
 952}
 953
 954/**
 955 * Convert GENEVE item to EFX filter specification.
 956 *
 957 * @param item[in]
 958 *   Item specification. Only Virtual Network Identifier and protocol type
 959 *   fields are supported. But protocol type can be only Ethernet (0x6558).
 960 *   If the mask is NULL, default mask will be used.
 961 *   Ranging is not supported.
 962 * @param efx_spec[in, out]
 963 *   EFX filter specification to update.
 964 * @param[out] error
 965 *   Perform verbose error reporting if not NULL.
 966 */
 967static int
 968sfc_flow_parse_geneve(const struct rte_flow_item *item,
 969                      struct sfc_flow_parse_ctx *parse_ctx,
 970                      struct rte_flow_error *error)
 971{
 972        int rc;
 973        efx_filter_spec_t *efx_spec = parse_ctx->filter;
 974        const struct rte_flow_item_geneve *spec = NULL;
 975        const struct rte_flow_item_geneve *mask = NULL;
 976        const struct rte_flow_item_geneve supp_mask = {
 977                .protocol = RTE_BE16(0xffff),
 978                .vni = { 0xff, 0xff, 0xff }
 979        };
 980
 981        rc = sfc_flow_parse_init(item,
 982                                 (const void **)&spec,
 983                                 (const void **)&mask,
 984                                 &supp_mask,
 985                                 &rte_flow_item_geneve_mask,
 986                                 sizeof(struct rte_flow_item_geneve),
 987                                 error);
 988        if (rc != 0)
 989                return rc;
 990
 991        rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
 992                                                     EFX_IPPROTO_UDP, error);
 993        if (rc != 0)
 994                return rc;
 995
 996        efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
 997        efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
 998
 999        if (spec == NULL)
1000                return 0;
1001
1002        if (mask->protocol == supp_mask.protocol) {
1003                if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1004                        rte_flow_error_set(error, EINVAL,
1005                                RTE_FLOW_ERROR_TYPE_ITEM, item,
1006                                "GENEVE encap. protocol must be Ethernet "
1007                                "(0x6558) in the GENEVE pattern item");
1008                        return -rte_errno;
1009                }
1010        } else if (mask->protocol != 0) {
1011                rte_flow_error_set(error, EINVAL,
1012                        RTE_FLOW_ERROR_TYPE_ITEM, item,
1013                        "Unsupported mask for GENEVE encap. protocol");
1014                return -rte_errno;
1015        }
1016
1017        rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1018                                               mask->vni, item, error);
1019
1020        return rc;
1021}
1022
1023/**
1024 * Convert NVGRE item to EFX filter specification.
1025 *
1026 * @param item[in]
1027 *   Item specification. Only virtual subnet ID field is supported.
1028 *   If the mask is NULL, default mask will be used.
1029 *   Ranging is not supported.
1030 * @param efx_spec[in, out]
1031 *   EFX filter specification to update.
1032 * @param[out] error
1033 *   Perform verbose error reporting if not NULL.
1034 */
1035static int
1036sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1037                     struct sfc_flow_parse_ctx *parse_ctx,
1038                     struct rte_flow_error *error)
1039{
1040        int rc;
1041        efx_filter_spec_t *efx_spec = parse_ctx->filter;
1042        const struct rte_flow_item_nvgre *spec = NULL;
1043        const struct rte_flow_item_nvgre *mask = NULL;
1044        const struct rte_flow_item_nvgre supp_mask = {
1045                .tni = { 0xff, 0xff, 0xff }
1046        };
1047
1048        rc = sfc_flow_parse_init(item,
1049                                 (const void **)&spec,
1050                                 (const void **)&mask,
1051                                 &supp_mask,
1052                                 &rte_flow_item_nvgre_mask,
1053                                 sizeof(struct rte_flow_item_nvgre),
1054                                 error);
1055        if (rc != 0)
1056                return rc;
1057
1058        rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1059                                                     EFX_IPPROTO_GRE, error);
1060        if (rc != 0)
1061                return rc;
1062
1063        efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1064        efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1065
1066        if (spec == NULL)
1067                return 0;
1068
1069        rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1070                                               mask->tni, item, error);
1071
1072        return rc;
1073}
1074
1075/**
1076 * Convert PPPoEx item to EFX filter specification.
1077 *
1078 * @param item[in]
1079 *   Item specification.
1080 *   Matching on PPPoEx fields is not supported.
1081 *   This item can only be used to set or validate the EtherType filter.
1082 *   Only zero masks are allowed.
1083 *   Ranging is not supported.
1084 * @param efx_spec[in, out]
1085 *   EFX filter specification to update.
1086 * @param[out] error
1087 *   Perform verbose error reporting if not NULL.
1088 */
1089static int
1090sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1091                      struct sfc_flow_parse_ctx *parse_ctx,
1092                      struct rte_flow_error *error)
1093{
1094        efx_filter_spec_t *efx_spec = parse_ctx->filter;
1095        const struct rte_flow_item_pppoe *spec = NULL;
1096        const struct rte_flow_item_pppoe *mask = NULL;
1097        const struct rte_flow_item_pppoe supp_mask = {};
1098        const struct rte_flow_item_pppoe def_mask = {};
1099        uint16_t ether_type;
1100        int rc;
1101
1102        rc = sfc_flow_parse_init(item,
1103                                 (const void **)&spec,
1104                                 (const void **)&mask,
1105                                 &supp_mask,
1106                                 &def_mask,
1107                                 sizeof(struct rte_flow_item_pppoe),
1108                                 error);
1109        if (rc != 0)
1110                return rc;
1111
1112        if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1113                ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1114        else
1115                ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1116
1117        if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1118                if (efx_spec->efs_ether_type != ether_type) {
1119                        rte_flow_error_set(error, EINVAL,
1120                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1121                                           "Invalid EtherType for a PPPoE flow item");
1122                        return -rte_errno;
1123                }
1124        } else {
1125                efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1126                efx_spec->efs_ether_type = ether_type;
1127        }
1128
1129        return 0;
1130}
1131
1132static const struct sfc_flow_item sfc_flow_items[] = {
1133        {
1134                .type = RTE_FLOW_ITEM_TYPE_VOID,
1135                .name = "VOID",
1136                .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1137                .layer = SFC_FLOW_ITEM_ANY_LAYER,
1138                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1139                .parse = sfc_flow_parse_void,
1140        },
1141        {
1142                .type = RTE_FLOW_ITEM_TYPE_ETH,
1143                .name = "ETH",
1144                .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1145                .layer = SFC_FLOW_ITEM_L2,
1146                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1147                .parse = sfc_flow_parse_eth,
1148        },
1149        {
1150                .type = RTE_FLOW_ITEM_TYPE_VLAN,
1151                .name = "VLAN",
1152                .prev_layer = SFC_FLOW_ITEM_L2,
1153                .layer = SFC_FLOW_ITEM_L2,
1154                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1155                .parse = sfc_flow_parse_vlan,
1156        },
1157        {
1158                .type = RTE_FLOW_ITEM_TYPE_PPPOED,
1159                .name = "PPPOED",
1160                .prev_layer = SFC_FLOW_ITEM_L2,
1161                .layer = SFC_FLOW_ITEM_L2,
1162                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1163                .parse = sfc_flow_parse_pppoex,
1164        },
1165        {
1166                .type = RTE_FLOW_ITEM_TYPE_PPPOES,
1167                .name = "PPPOES",
1168                .prev_layer = SFC_FLOW_ITEM_L2,
1169                .layer = SFC_FLOW_ITEM_L2,
1170                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1171                .parse = sfc_flow_parse_pppoex,
1172        },
1173        {
1174                .type = RTE_FLOW_ITEM_TYPE_IPV4,
1175                .name = "IPV4",
1176                .prev_layer = SFC_FLOW_ITEM_L2,
1177                .layer = SFC_FLOW_ITEM_L3,
1178                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1179                .parse = sfc_flow_parse_ipv4,
1180        },
1181        {
1182                .type = RTE_FLOW_ITEM_TYPE_IPV6,
1183                .name = "IPV6",
1184                .prev_layer = SFC_FLOW_ITEM_L2,
1185                .layer = SFC_FLOW_ITEM_L3,
1186                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1187                .parse = sfc_flow_parse_ipv6,
1188        },
1189        {
1190                .type = RTE_FLOW_ITEM_TYPE_TCP,
1191                .name = "TCP",
1192                .prev_layer = SFC_FLOW_ITEM_L3,
1193                .layer = SFC_FLOW_ITEM_L4,
1194                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1195                .parse = sfc_flow_parse_tcp,
1196        },
1197        {
1198                .type = RTE_FLOW_ITEM_TYPE_UDP,
1199                .name = "UDP",
1200                .prev_layer = SFC_FLOW_ITEM_L3,
1201                .layer = SFC_FLOW_ITEM_L4,
1202                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1203                .parse = sfc_flow_parse_udp,
1204        },
1205        {
1206                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1207                .name = "VXLAN",
1208                .prev_layer = SFC_FLOW_ITEM_L4,
1209                .layer = SFC_FLOW_ITEM_START_LAYER,
1210                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1211                .parse = sfc_flow_parse_vxlan,
1212        },
1213        {
1214                .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1215                .name = "GENEVE",
1216                .prev_layer = SFC_FLOW_ITEM_L4,
1217                .layer = SFC_FLOW_ITEM_START_LAYER,
1218                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1219                .parse = sfc_flow_parse_geneve,
1220        },
1221        {
1222                .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1223                .name = "NVGRE",
1224                .prev_layer = SFC_FLOW_ITEM_L3,
1225                .layer = SFC_FLOW_ITEM_START_LAYER,
1226                .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1227                .parse = sfc_flow_parse_nvgre,
1228        },
1229};
1230
1231/*
1232 * Protocol-independent flow API support
1233 */
1234static int
1235sfc_flow_parse_attr(struct sfc_adapter *sa,
1236                    const struct rte_flow_attr *attr,
1237                    struct rte_flow *flow,
1238                    struct rte_flow_error *error)
1239{
1240        struct sfc_flow_spec *spec = &flow->spec;
1241        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1242        struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1243        struct sfc_mae *mae = &sa->mae;
1244
1245        if (attr == NULL) {
1246                rte_flow_error_set(error, EINVAL,
1247                                   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1248                                   "NULL attribute");
1249                return -rte_errno;
1250        }
1251        if (attr->group != 0) {
1252                rte_flow_error_set(error, ENOTSUP,
1253                                   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1254                                   "Groups are not supported");
1255                return -rte_errno;
1256        }
1257        if (attr->egress != 0 && attr->transfer == 0) {
1258                rte_flow_error_set(error, ENOTSUP,
1259                                   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1260                                   "Egress is not supported");
1261                return -rte_errno;
1262        }
1263        if (attr->ingress == 0 && attr->transfer == 0) {
1264                rte_flow_error_set(error, ENOTSUP,
1265                                   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1266                                   "Ingress is compulsory");
1267                return -rte_errno;
1268        }
1269        if (attr->transfer == 0) {
1270                if (attr->priority != 0) {
1271                        rte_flow_error_set(error, ENOTSUP,
1272                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1273                                           attr, "Priorities are unsupported");
1274                        return -rte_errno;
1275                }
1276                spec->type = SFC_FLOW_SPEC_FILTER;
1277                spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1278                spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1279                spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1280        } else {
1281                if (mae->status != SFC_MAE_STATUS_ADMIN) {
1282                        rte_flow_error_set(error, ENOTSUP,
1283                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1284                                           attr, "Transfer is not supported");
1285                        return -rte_errno;
1286                }
1287                if (attr->priority > mae->nb_action_rule_prios_max) {
1288                        rte_flow_error_set(error, ENOTSUP,
1289                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1290                                           attr, "Unsupported priority level");
1291                        return -rte_errno;
1292                }
1293                spec->type = SFC_FLOW_SPEC_MAE;
1294                spec_mae->priority = attr->priority;
1295                spec_mae->match_spec = NULL;
1296                spec_mae->action_set = NULL;
1297                spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1298        }
1299
1300        return 0;
1301}
1302
1303/* Get item from array sfc_flow_items */
1304static const struct sfc_flow_item *
1305sfc_flow_get_item(const struct sfc_flow_item *items,
1306                  unsigned int nb_items,
1307                  enum rte_flow_item_type type)
1308{
1309        unsigned int i;
1310
1311        for (i = 0; i < nb_items; i++)
1312                if (items[i].type == type)
1313                        return &items[i];
1314
1315        return NULL;
1316}
1317
1318int
1319sfc_flow_parse_pattern(struct sfc_adapter *sa,
1320                       const struct sfc_flow_item *flow_items,
1321                       unsigned int nb_flow_items,
1322                       const struct rte_flow_item pattern[],
1323                       struct sfc_flow_parse_ctx *parse_ctx,
1324                       struct rte_flow_error *error)
1325{
1326        int rc;
1327        unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1328        boolean_t is_ifrm = B_FALSE;
1329        const struct sfc_flow_item *item;
1330
1331        if (pattern == NULL) {
1332                rte_flow_error_set(error, EINVAL,
1333                                   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1334                                   "NULL pattern");
1335                return -rte_errno;
1336        }
1337
1338        for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1339                item = sfc_flow_get_item(flow_items, nb_flow_items,
1340                                         pattern->type);
1341                if (item == NULL) {
1342                        rte_flow_error_set(error, ENOTSUP,
1343                                           RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1344                                           "Unsupported pattern item");
1345                        return -rte_errno;
1346                }
1347
1348                /*
1349                 * Omitting one or several protocol layers at the beginning
1350                 * of pattern is supported
1351                 */
1352                if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1353                    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1354                    item->prev_layer != prev_layer) {
1355                        rte_flow_error_set(error, ENOTSUP,
1356                                           RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1357                                           "Unexpected sequence of pattern items");
1358                        return -rte_errno;
1359                }
1360
1361                /*
1362                 * Allow only VOID and ETH pattern items in the inner frame.
1363                 * Also check that there is only one tunneling protocol.
1364                 */
1365                switch (item->type) {
1366                case RTE_FLOW_ITEM_TYPE_VOID:
1367                case RTE_FLOW_ITEM_TYPE_ETH:
1368                        break;
1369
1370                case RTE_FLOW_ITEM_TYPE_VXLAN:
1371                case RTE_FLOW_ITEM_TYPE_GENEVE:
1372                case RTE_FLOW_ITEM_TYPE_NVGRE:
1373                        if (is_ifrm) {
1374                                rte_flow_error_set(error, EINVAL,
1375                                        RTE_FLOW_ERROR_TYPE_ITEM,
1376                                        pattern,
1377                                        "More than one tunneling protocol");
1378                                return -rte_errno;
1379                        }
1380                        is_ifrm = B_TRUE;
1381                        break;
1382
1383                default:
1384                        if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1385                            is_ifrm) {
1386                                rte_flow_error_set(error, EINVAL,
1387                                        RTE_FLOW_ERROR_TYPE_ITEM,
1388                                        pattern,
1389                                        "There is an unsupported pattern item "
1390                                        "in the inner frame");
1391                                return -rte_errno;
1392                        }
1393                        break;
1394                }
1395
1396                if (parse_ctx->type != item->ctx_type) {
1397                        rte_flow_error_set(error, EINVAL,
1398                                        RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1399                                        "Parse context type mismatch");
1400                        return -rte_errno;
1401                }
1402
1403                rc = item->parse(pattern, parse_ctx, error);
1404                if (rc != 0) {
1405                        sfc_err(sa, "failed to parse item %s: %s",
1406                                item->name, strerror(-rc));
1407                        return rc;
1408                }
1409
1410                if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1411                        prev_layer = item->layer;
1412        }
1413
1414        return 0;
1415}
1416
1417static int
1418sfc_flow_parse_queue(struct sfc_adapter *sa,
1419                     const struct rte_flow_action_queue *queue,
1420                     struct rte_flow *flow)
1421{
1422        struct sfc_flow_spec *spec = &flow->spec;
1423        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1424        struct sfc_rxq *rxq;
1425        struct sfc_rxq_info *rxq_info;
1426
1427        if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1428                return -EINVAL;
1429
1430        rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1431        spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1432
1433        rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1434
1435        if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
1436                struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1437                struct sfc_rss *ethdev_rss = &sas->rss;
1438
1439                spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1440                spec_filter->rss_ctx = &ethdev_rss->dummy_ctx;
1441        }
1442
1443        return 0;
1444}
1445
1446static int
1447sfc_flow_parse_rss(struct sfc_adapter *sa,
1448                   const struct rte_flow_action_rss *action_rss,
1449                   struct rte_flow *flow)
1450{
1451        struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1452        struct sfc_flow_rss_conf conf;
1453        uint16_t sw_qid_min;
1454        struct sfc_rxq *rxq;
1455        int rc;
1456
1457        spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1458
1459        rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
1460        if (rc != 0)
1461                return -rc;
1462
1463        rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
1464        spec_filter->template.efs_dmaq_id = rxq->hw_index;
1465
1466        spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
1467                                                      action_rss->queue);
1468        if (spec_filter->rss_ctx != NULL)
1469                return 0;
1470
1471        rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
1472                                  &spec_filter->rss_ctx);
1473        if (rc != 0)
1474                return -rc;
1475
1476        return 0;
1477}
1478
1479static int
1480sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1481                    unsigned int filters_count)
1482{
1483        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1484        unsigned int i;
1485        int ret = 0;
1486
1487        for (i = 0; i < filters_count; i++) {
1488                int rc;
1489
1490                rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1491                if (ret == 0 && rc != 0) {
1492                        sfc_err(sa, "failed to remove filter specification "
1493                                "(rc = %d)", rc);
1494                        ret = rc;
1495                }
1496        }
1497
1498        return ret;
1499}
1500
1501static int
1502sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1503{
1504        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1505        unsigned int i;
1506        int rc = 0;
1507
1508        for (i = 0; i < spec_filter->count; i++) {
1509                rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1510                if (rc != 0) {
1511                        sfc_flow_spec_flush(sa, spec, i);
1512                        break;
1513                }
1514        }
1515
1516        return rc;
1517}
1518
1519static int
1520sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1521{
1522        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1523
1524        return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1525}
1526
1527static int
1528sfc_flow_filter_insert(struct sfc_adapter *sa,
1529                       struct rte_flow *flow)
1530{
1531        struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1532        struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
1533        int rc = 0;
1534
1535        rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
1536        if (rc != 0)
1537                goto fail_rss_ctx_program;
1538
1539        if (rss_ctx != NULL) {
1540                unsigned int i;
1541
1542                /*
1543                 * At this point, fully elaborated filter specifications
1544                 * have been produced from the template. To make sure that
1545                 * RSS behaviour is consistent between them, set the same
1546                 * RSS context value everywhere.
1547                 */
1548                for (i = 0; i < spec_filter->count; i++) {
1549                        efx_filter_spec_t *spec = &spec_filter->filters[i];
1550
1551                        spec->efs_rss_context = rss_ctx->nic_handle;
1552                }
1553        }
1554
1555        rc = sfc_flow_spec_insert(sa, &flow->spec);
1556        if (rc != 0)
1557                goto fail_filter_insert;
1558
1559        return 0;
1560
1561fail_filter_insert:
1562        sfc_flow_rss_ctx_terminate(sa, rss_ctx);
1563
1564fail_rss_ctx_program:
1565        return rc;
1566}
1567
1568static int
1569sfc_flow_filter_remove(struct sfc_adapter *sa,
1570                       struct rte_flow *flow)
1571{
1572        struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1573        int rc = 0;
1574
1575        rc = sfc_flow_spec_remove(sa, &flow->spec);
1576        if (rc != 0)
1577                return rc;
1578
1579        sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
1580
1581        return 0;
1582}
1583
1584static int
1585sfc_flow_parse_mark(struct sfc_adapter *sa,
1586                    const struct rte_flow_action_mark *mark,
1587                    struct rte_flow *flow)
1588{
1589        struct sfc_flow_spec *spec = &flow->spec;
1590        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1591        const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1592        uint32_t mark_max;
1593
1594        mark_max = encp->enc_filter_action_mark_max;
1595        if (sfc_ft_is_active(sa))
1596                mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1597
1598        if (mark == NULL || mark->id > mark_max)
1599                return EINVAL;
1600
1601        spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1602        spec_filter->template.efs_mark = mark->id;
1603
1604        return 0;
1605}
1606
1607static int
1608sfc_flow_parse_actions(struct sfc_adapter *sa,
1609                       const struct rte_flow_action actions[],
1610                       struct rte_flow *flow,
1611                       struct rte_flow_error *error)
1612{
1613        int rc;
1614        struct sfc_flow_spec *spec = &flow->spec;
1615        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1616        const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1617        const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1618        uint32_t actions_set = 0;
1619        const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1620                                           (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1621                                           (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1622        const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1623                                           (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1624
1625        if (actions == NULL) {
1626                rte_flow_error_set(error, EINVAL,
1627                                   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1628                                   "NULL actions");
1629                return -rte_errno;
1630        }
1631
1632        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1633                switch (actions->type) {
1634                case RTE_FLOW_ACTION_TYPE_VOID:
1635                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1636                                               actions_set);
1637                        break;
1638
1639                case RTE_FLOW_ACTION_TYPE_QUEUE:
1640                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1641                                               actions_set);
1642                        if ((actions_set & fate_actions_mask) != 0)
1643                                goto fail_fate_actions;
1644
1645                        rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1646                        if (rc != 0) {
1647                                rte_flow_error_set(error, EINVAL,
1648                                        RTE_FLOW_ERROR_TYPE_ACTION, actions,
1649                                        "Bad QUEUE action");
1650                                return -rte_errno;
1651                        }
1652                        break;
1653
1654                case RTE_FLOW_ACTION_TYPE_RSS:
1655                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1656                                               actions_set);
1657                        if ((actions_set & fate_actions_mask) != 0)
1658                                goto fail_fate_actions;
1659
1660                        rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1661                        if (rc != 0) {
1662                                rte_flow_error_set(error, -rc,
1663                                        RTE_FLOW_ERROR_TYPE_ACTION, actions,
1664                                        "Bad RSS action");
1665                                return -rte_errno;
1666                        }
1667                        break;
1668
1669                case RTE_FLOW_ACTION_TYPE_DROP:
1670                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1671                                               actions_set);
1672                        if ((actions_set & fate_actions_mask) != 0)
1673                                goto fail_fate_actions;
1674
1675                        spec_filter->template.efs_dmaq_id =
1676                                EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1677                        break;
1678
1679                case RTE_FLOW_ACTION_TYPE_FLAG:
1680                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1681                                               actions_set);
1682                        if ((actions_set & mark_actions_mask) != 0)
1683                                goto fail_actions_overlap;
1684
1685                        if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1686                                rte_flow_error_set(error, ENOTSUP,
1687                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688                                        "FLAG action is not supported on the current Rx datapath");
1689                                return -rte_errno;
1690                        } else if ((rx_metadata &
1691                                    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1692                                rte_flow_error_set(error, ENOTSUP,
1693                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1694                                        "flag delivery has not been negotiated");
1695                                return -rte_errno;
1696                        }
1697
1698                        spec_filter->template.efs_flags |=
1699                                EFX_FILTER_FLAG_ACTION_FLAG;
1700                        break;
1701
1702                case RTE_FLOW_ACTION_TYPE_MARK:
1703                        SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1704                                               actions_set);
1705                        if ((actions_set & mark_actions_mask) != 0)
1706                                goto fail_actions_overlap;
1707
1708                        if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1709                                rte_flow_error_set(error, ENOTSUP,
1710                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1711                                        "MARK action is not supported on the current Rx datapath");
1712                                return -rte_errno;
1713                        } else if ((rx_metadata &
1714                                    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1715                                rte_flow_error_set(error, ENOTSUP,
1716                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1717                                        "mark delivery has not been negotiated");
1718                                return -rte_errno;
1719                        }
1720
1721                        rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1722                        if (rc != 0) {
1723                                rte_flow_error_set(error, rc,
1724                                        RTE_FLOW_ERROR_TYPE_ACTION, actions,
1725                                        "Bad MARK action");
1726                                return -rte_errno;
1727                        }
1728                        break;
1729
1730                default:
1731                        rte_flow_error_set(error, ENOTSUP,
1732                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1733                                           "Action is not supported");
1734                        return -rte_errno;
1735                }
1736
1737                actions_set |= (1UL << actions->type);
1738        }
1739
1740        /* When fate is unknown, drop traffic. */
1741        if ((actions_set & fate_actions_mask) == 0) {
1742                spec_filter->template.efs_dmaq_id =
1743                        EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1744        }
1745
1746        return 0;
1747
1748fail_fate_actions:
1749        rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1750                           "Cannot combine several fate-deciding actions, "
1751                           "choose between QUEUE, RSS or DROP");
1752        return -rte_errno;
1753
1754fail_actions_overlap:
1755        rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1756                           "Overlapping actions are not supported");
1757        return -rte_errno;
1758}
1759
1760/**
1761 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1762 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1763 * specifications after copying.
1764 *
1765 * @param spec[in, out]
1766 *   SFC flow specification to update.
1767 * @param filters_count_for_one_val[in]
1768 *   How many specifications should have the same match flag, what is the
1769 *   number of specifications before copying.
1770 * @param error[out]
1771 *   Perform verbose error reporting if not NULL.
1772 */
1773static int
1774sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1775                               unsigned int filters_count_for_one_val,
1776                               struct rte_flow_error *error)
1777{
1778        unsigned int i;
1779        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1780        static const efx_filter_match_flags_t vals[] = {
1781                EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1782                EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1783        };
1784
1785        if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1786                rte_flow_error_set(error, EINVAL,
1787                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1788                        "Number of specifications is incorrect while copying "
1789                        "by unknown destination flags");
1790                return -rte_errno;
1791        }
1792
1793        for (i = 0; i < spec_filter->count; i++) {
1794                /* The check above ensures that divisor can't be zero here */
1795                spec_filter->filters[i].efs_match_flags |=
1796                        vals[i / filters_count_for_one_val];
1797        }
1798
1799        return 0;
1800}
1801
1802/**
1803 * Check that the following conditions are met:
1804 * - the list of supported filters has a filter
1805 *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1806 *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1807 *   be inserted.
1808 *
1809 * @param match[in]
1810 *   The match flags of filter.
1811 * @param spec[in]
1812 *   Specification to be supplemented.
1813 * @param filter[in]
1814 *   SFC filter with list of supported filters.
1815 */
1816static boolean_t
1817sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1818                                 __rte_unused efx_filter_spec_t *spec,
1819                                 struct sfc_filter *filter)
1820{
1821        unsigned int i;
1822        efx_filter_match_flags_t match_mcast_dst;
1823
1824        match_mcast_dst =
1825                (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1826                EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1827        for (i = 0; i < filter->supported_match_num; i++) {
1828                if (match_mcast_dst == filter->supported_match[i])
1829                        return B_TRUE;
1830        }
1831
1832        return B_FALSE;
1833}
1834
1835/**
1836 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1837 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1838 * specifications after copying.
1839 *
1840 * @param spec[in, out]
1841 *   SFC flow specification to update.
1842 * @param filters_count_for_one_val[in]
1843 *   How many specifications should have the same EtherType value, what is the
1844 *   number of specifications before copying.
1845 * @param error[out]
1846 *   Perform verbose error reporting if not NULL.
1847 */
1848static int
1849sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1850                        unsigned int filters_count_for_one_val,
1851                        struct rte_flow_error *error)
1852{
1853        unsigned int i;
1854        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1855        static const uint16_t vals[] = {
1856                EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1857        };
1858
1859        if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1860                rte_flow_error_set(error, EINVAL,
1861                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1862                        "Number of specifications is incorrect "
1863                        "while copying by Ethertype");
1864                return -rte_errno;
1865        }
1866
1867        for (i = 0; i < spec_filter->count; i++) {
1868                spec_filter->filters[i].efs_match_flags |=
1869                        EFX_FILTER_MATCH_ETHER_TYPE;
1870
1871                /*
1872                 * The check above ensures that
1873                 * filters_count_for_one_val is not 0
1874                 */
1875                spec_filter->filters[i].efs_ether_type =
1876                        vals[i / filters_count_for_one_val];
1877        }
1878
1879        return 0;
1880}
1881
1882/**
1883 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1884 * in the same specifications after copying.
1885 *
1886 * @param spec[in, out]
1887 *   SFC flow specification to update.
1888 * @param filters_count_for_one_val[in]
1889 *   How many specifications should have the same match flag, what is the
1890 *   number of specifications before copying.
1891 * @param error[out]
1892 *   Perform verbose error reporting if not NULL.
1893 */
1894static int
1895sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1896                            unsigned int filters_count_for_one_val,
1897                            struct rte_flow_error *error)
1898{
1899        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1900        unsigned int i;
1901
1902        if (filters_count_for_one_val != spec_filter->count) {
1903                rte_flow_error_set(error, EINVAL,
1904                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1905                        "Number of specifications is incorrect "
1906                        "while copying by outer VLAN ID");
1907                return -rte_errno;
1908        }
1909
1910        for (i = 0; i < spec_filter->count; i++) {
1911                spec_filter->filters[i].efs_match_flags |=
1912                        EFX_FILTER_MATCH_OUTER_VID;
1913
1914                spec_filter->filters[i].efs_outer_vid = 0;
1915        }
1916
1917        return 0;
1918}
1919
1920/**
1921 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1922 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1923 * specifications after copying.
1924 *
1925 * @param spec[in, out]
1926 *   SFC flow specification to update.
1927 * @param filters_count_for_one_val[in]
1928 *   How many specifications should have the same match flag, what is the
1929 *   number of specifications before copying.
1930 * @param error[out]
1931 *   Perform verbose error reporting if not NULL.
1932 */
1933static int
1934sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1935                                    unsigned int filters_count_for_one_val,
1936                                    struct rte_flow_error *error)
1937{
1938        unsigned int i;
1939        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1940        static const efx_filter_match_flags_t vals[] = {
1941                EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1942                EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1943        };
1944
1945        if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1946                rte_flow_error_set(error, EINVAL,
1947                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1948                        "Number of specifications is incorrect while copying "
1949                        "by inner frame unknown destination flags");
1950                return -rte_errno;
1951        }
1952
1953        for (i = 0; i < spec_filter->count; i++) {
1954                /* The check above ensures that divisor can't be zero here */
1955                spec_filter->filters[i].efs_match_flags |=
1956                        vals[i / filters_count_for_one_val];
1957        }
1958
1959        return 0;
1960}
1961
1962/**
1963 * Check that the following conditions are met:
1964 * - the specification corresponds to a filter for encapsulated traffic
1965 * - the list of supported filters has a filter
1966 *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1967 *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1968 *   be inserted.
1969 *
1970 * @param match[in]
1971 *   The match flags of filter.
1972 * @param spec[in]
1973 *   Specification to be supplemented.
1974 * @param filter[in]
1975 *   SFC filter with list of supported filters.
1976 */
1977static boolean_t
1978sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1979                                      efx_filter_spec_t *spec,
1980                                      struct sfc_filter *filter)
1981{
1982        unsigned int i;
1983        efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1984        efx_filter_match_flags_t match_mcast_dst;
1985
1986        if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1987                return B_FALSE;
1988
1989        match_mcast_dst =
1990                (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1991                EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1992        for (i = 0; i < filter->supported_match_num; i++) {
1993                if (match_mcast_dst == filter->supported_match[i])
1994                        return B_TRUE;
1995        }
1996
1997        return B_FALSE;
1998}
1999
2000/**
2001 * Check that the list of supported filters has a filter that differs
2002 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2003 * in this case that filter will be used and the flag
2004 * EFX_FILTER_MATCH_OUTER_VID is not needed.
2005 *
2006 * @param match[in]
2007 *   The match flags of filter.
2008 * @param spec[in]
2009 *   Specification to be supplemented.
2010 * @param filter[in]
2011 *   SFC filter with list of supported filters.
2012 */
2013static boolean_t
2014sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2015                              __rte_unused efx_filter_spec_t *spec,
2016                              struct sfc_filter *filter)
2017{
2018        unsigned int i;
2019        efx_filter_match_flags_t match_without_vid =
2020                match & ~EFX_FILTER_MATCH_OUTER_VID;
2021
2022        for (i = 0; i < filter->supported_match_num; i++) {
2023                if (match_without_vid == filter->supported_match[i])
2024                        return B_FALSE;
2025        }
2026
2027        return B_TRUE;
2028}
2029
2030/*
2031 * Match flags that can be automatically added to filters.
2032 * Selecting the last minimum when searching for the copy flag ensures that the
2033 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2034 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2035 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2036 * filters.
2037 */
2038static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2039        {
2040                .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2041                .vals_count = 2,
2042                .set_vals = sfc_flow_set_unknown_dst_flags,
2043                .spec_check = sfc_flow_check_unknown_dst_flags,
2044        },
2045        {
2046                .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2047                .vals_count = 2,
2048                .set_vals = sfc_flow_set_ethertypes,
2049                .spec_check = NULL,
2050        },
2051        {
2052                .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2053                .vals_count = 2,
2054                .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2055                .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2056        },
2057        {
2058                .flag = EFX_FILTER_MATCH_OUTER_VID,
2059                .vals_count = 1,
2060                .set_vals = sfc_flow_set_outer_vid_flag,
2061                .spec_check = sfc_flow_check_outer_vid_flag,
2062        },
2063};
2064
2065/* Get item from array sfc_flow_copy_flags */
2066static const struct sfc_flow_copy_flag *
2067sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2068{
2069        unsigned int i;
2070
2071        for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2072                if (sfc_flow_copy_flags[i].flag == flag)
2073                        return &sfc_flow_copy_flags[i];
2074        }
2075
2076        return NULL;
2077}
2078
2079/**
2080 * Make copies of the specifications, set match flag and values
2081 * of the field that corresponds to it.
2082 *
2083 * @param spec[in, out]
2084 *   SFC flow specification to update.
2085 * @param flag[in]
2086 *   The match flag to add.
2087 * @param error[out]
2088 *   Perform verbose error reporting if not NULL.
2089 */
2090static int
2091sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2092                             efx_filter_match_flags_t flag,
2093                             struct rte_flow_error *error)
2094{
2095        unsigned int i;
2096        unsigned int new_filters_count;
2097        unsigned int filters_count_for_one_val;
2098        const struct sfc_flow_copy_flag *copy_flag;
2099        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2100        int rc;
2101
2102        copy_flag = sfc_flow_get_copy_flag(flag);
2103        if (copy_flag == NULL) {
2104                rte_flow_error_set(error, ENOTSUP,
2105                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2106                                   "Unsupported spec field for copying");
2107                return -rte_errno;
2108        }
2109
2110        new_filters_count = spec_filter->count * copy_flag->vals_count;
2111        if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2112                rte_flow_error_set(error, EINVAL,
2113                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2114                        "Too much EFX specifications in the flow rule");
2115                return -rte_errno;
2116        }
2117
2118        /* Copy filters specifications */
2119        for (i = spec_filter->count; i < new_filters_count; i++) {
2120                spec_filter->filters[i] =
2121                        spec_filter->filters[i - spec_filter->count];
2122        }
2123
2124        filters_count_for_one_val = spec_filter->count;
2125        spec_filter->count = new_filters_count;
2126
2127        rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2128        if (rc != 0)
2129                return rc;
2130
2131        return 0;
2132}
2133
2134/**
2135 * Check that the given set of match flags missing in the original filter spec
2136 * could be covered by adding spec copies which specify the corresponding
2137 * flags and packet field values to match.
2138 *
2139 * @param miss_flags[in]
2140 *   Flags that are missing until the supported filter.
2141 * @param spec[in]
2142 *   Specification to be supplemented.
2143 * @param filter[in]
2144 *   SFC filter.
2145 *
2146 * @return
2147 *   Number of specifications after copy or 0, if the flags can not be added.
2148 */
2149static unsigned int
2150sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2151                             efx_filter_spec_t *spec,
2152                             struct sfc_filter *filter)
2153{
2154        unsigned int i;
2155        efx_filter_match_flags_t copy_flags = 0;
2156        efx_filter_match_flags_t flag;
2157        efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2158        sfc_flow_spec_check *check;
2159        unsigned int multiplier = 1;
2160
2161        for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2162                flag = sfc_flow_copy_flags[i].flag;
2163                check = sfc_flow_copy_flags[i].spec_check;
2164                if ((flag & miss_flags) == flag) {
2165                        if (check != NULL && (!check(match, spec, filter)))
2166                                continue;
2167
2168                        copy_flags |= flag;
2169                        multiplier *= sfc_flow_copy_flags[i].vals_count;
2170                }
2171        }
2172
2173        if (copy_flags == miss_flags)
2174                return multiplier;
2175
2176        return 0;
2177}
2178
2179/**
2180 * Attempt to supplement the specification template to the minimally
2181 * supported set of match flags. To do this, it is necessary to copy
2182 * the specifications, filling them with the values of fields that
2183 * correspond to the missing flags.
2184 * The necessary and sufficient filter is built from the fewest number
2185 * of copies which could be made to cover the minimally required set
2186 * of flags.
2187 *
2188 * @param sa[in]
2189 *   SFC adapter.
2190 * @param spec[in, out]
2191 *   SFC flow specification to update.
2192 * @param error[out]
2193 *   Perform verbose error reporting if not NULL.
2194 */
2195static int
2196sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2197                               struct sfc_flow_spec *spec,
2198                               struct rte_flow_error *error)
2199{
2200        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2201        struct sfc_filter *filter = &sa->filter;
2202        efx_filter_match_flags_t miss_flags;
2203        efx_filter_match_flags_t min_miss_flags = 0;
2204        efx_filter_match_flags_t match;
2205        unsigned int min_multiplier = UINT_MAX;
2206        unsigned int multiplier;
2207        unsigned int i;
2208        int rc;
2209
2210        match = spec_filter->template.efs_match_flags;
2211        for (i = 0; i < filter->supported_match_num; i++) {
2212                if ((match & filter->supported_match[i]) == match) {
2213                        miss_flags = filter->supported_match[i] & (~match);
2214                        multiplier = sfc_flow_check_missing_flags(miss_flags,
2215                                &spec_filter->template, filter);
2216                        if (multiplier > 0) {
2217                                if (multiplier <= min_multiplier) {
2218                                        min_multiplier = multiplier;
2219                                        min_miss_flags = miss_flags;
2220                                }
2221                        }
2222                }
2223        }
2224
2225        if (min_multiplier == UINT_MAX) {
2226                rte_flow_error_set(error, ENOTSUP,
2227                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2228                                   "The flow rule pattern is unsupported");
2229                return -rte_errno;
2230        }
2231
2232        for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2233                efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2234
2235                if ((flag & min_miss_flags) == flag) {
2236                        rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2237                        if (rc != 0)
2238                                return rc;
2239                }
2240        }
2241
2242        return 0;
2243}
2244
2245/**
2246 * Check that set of match flags is referred to by a filter. Filter is
2247 * described by match flags with the ability to add OUTER_VID and INNER_VID
2248 * flags.
2249 *
2250 * @param match_flags[in]
2251 *   Set of match flags.
2252 * @param flags_pattern[in]
2253 *   Pattern of filter match flags.
2254 */
2255static boolean_t
2256sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2257                            efx_filter_match_flags_t flags_pattern)
2258{
2259        if ((match_flags & flags_pattern) != flags_pattern)
2260                return B_FALSE;
2261
2262        switch (match_flags & ~flags_pattern) {
2263        case 0:
2264        case EFX_FILTER_MATCH_OUTER_VID:
2265        case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2266                return B_TRUE;
2267        default:
2268                return B_FALSE;
2269        }
2270}
2271
2272/**
2273 * Check whether the spec maps to a hardware filter which is known to be
2274 * ineffective despite being valid.
2275 *
2276 * @param filter[in]
2277 *   SFC filter with list of supported filters.
2278 * @param spec[in]
2279 *   SFC flow specification.
2280 */
2281static boolean_t
2282sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2283                                  struct sfc_flow_spec *spec)
2284{
2285        unsigned int i;
2286        uint16_t ether_type;
2287        uint8_t ip_proto;
2288        efx_filter_match_flags_t match_flags;
2289        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2290
2291        for (i = 0; i < spec_filter->count; i++) {
2292                match_flags = spec_filter->filters[i].efs_match_flags;
2293
2294                if (sfc_flow_is_match_with_vids(match_flags,
2295                                                EFX_FILTER_MATCH_ETHER_TYPE) ||
2296                    sfc_flow_is_match_with_vids(match_flags,
2297                                                EFX_FILTER_MATCH_ETHER_TYPE |
2298                                                EFX_FILTER_MATCH_LOC_MAC)) {
2299                        ether_type = spec_filter->filters[i].efs_ether_type;
2300                        if (filter->supports_ip_proto_or_addr_filter &&
2301                            (ether_type == EFX_ETHER_TYPE_IPV4 ||
2302                             ether_type == EFX_ETHER_TYPE_IPV6))
2303                                return B_TRUE;
2304                } else if (sfc_flow_is_match_with_vids(match_flags,
2305                                EFX_FILTER_MATCH_ETHER_TYPE |
2306                                EFX_FILTER_MATCH_IP_PROTO) ||
2307                           sfc_flow_is_match_with_vids(match_flags,
2308                                EFX_FILTER_MATCH_ETHER_TYPE |
2309                                EFX_FILTER_MATCH_IP_PROTO |
2310                                EFX_FILTER_MATCH_LOC_MAC)) {
2311                        ip_proto = spec_filter->filters[i].efs_ip_proto;
2312                        if (filter->supports_rem_or_local_port_filter &&
2313                            (ip_proto == EFX_IPPROTO_TCP ||
2314                             ip_proto == EFX_IPPROTO_UDP))
2315                                return B_TRUE;
2316                }
2317        }
2318
2319        return B_FALSE;
2320}
2321
2322static int
2323sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2324                              struct rte_flow *flow,
2325                              struct rte_flow_error *error)
2326{
2327        struct sfc_flow_spec *spec = &flow->spec;
2328        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2329        efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2330        efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2331        int rc;
2332
2333        /* Initialize the first filter spec with template */
2334        spec_filter->filters[0] = *spec_tmpl;
2335        spec_filter->count = 1;
2336
2337        if (!sfc_filter_is_match_supported(sa, match_flags)) {
2338                rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2339                if (rc != 0)
2340                        return rc;
2341        }
2342
2343        if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2344                rte_flow_error_set(error, ENOTSUP,
2345                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2346                        "The flow rule pattern is unsupported");
2347                return -rte_errno;
2348        }
2349
2350        return 0;
2351}
2352
2353static int
2354sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2355                             const struct rte_flow_item pattern[],
2356                             const struct rte_flow_action actions[],
2357                             struct rte_flow *flow,
2358                             struct rte_flow_error *error)
2359{
2360        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2361        struct sfc_flow_spec *spec = &flow->spec;
2362        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2363        struct sfc_flow_parse_ctx ctx;
2364        int rc;
2365
2366        ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2367        ctx.filter = &spec_filter->template;
2368
2369        rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2370                                    pattern, &ctx, error);
2371        if (rc != 0)
2372                goto fail_bad_value;
2373
2374        rc = sfc_flow_parse_actions(sa, actions, flow, error);
2375        if (rc != 0)
2376                goto fail_bad_value;
2377
2378        rc = sfc_flow_validate_match_flags(sa, flow, error);
2379        if (rc != 0)
2380                goto fail_bad_value;
2381
2382        return 0;
2383
2384fail_bad_value:
2385        return rc;
2386}
2387
2388static int
2389sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2390                          const struct rte_flow_item pattern[],
2391                          const struct rte_flow_action actions[],
2392                          struct rte_flow *flow,
2393                          struct rte_flow_error *error)
2394{
2395        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2396        struct sfc_flow_spec *spec = &flow->spec;
2397        struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2398        int rc;
2399
2400        /*
2401         * If the flow is meant to be a TUNNEL rule in a FT context,
2402         * preparse its actions and save its properties in spec_mae.
2403         */
2404        rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
2405        if (rc != 0)
2406                goto fail;
2407
2408        rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2409        if (rc != 0)
2410                goto fail;
2411
2412        if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
2413                /*
2414                 * By design, this flow should be represented solely by the
2415                 * outer rule. But the HW/FW hasn't got support for setting
2416                 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2417                 * does it support outer rule counters. As a workaround, an
2418                 * action rule of lower priority is used to do the job.
2419                 *
2420                 * So don't skip sfc_mae_rule_parse_actions() below.
2421                 */
2422        }
2423
2424        rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2425        if (rc != 0)
2426                goto fail;
2427
2428        if (spec_mae->ft_ctx != NULL) {
2429                if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
2430                        spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
2431
2432                ++(spec_mae->ft_ctx->refcnt);
2433        }
2434
2435        return 0;
2436
2437fail:
2438        /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
2439        spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
2440        spec_mae->ft_ctx = NULL;
2441
2442        return rc;
2443}
2444
2445static int
2446sfc_flow_parse(struct rte_eth_dev *dev,
2447               const struct rte_flow_attr *attr,
2448               const struct rte_flow_item pattern[],
2449               const struct rte_flow_action actions[],
2450               struct rte_flow *flow,
2451               struct rte_flow_error *error)
2452{
2453        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2454        const struct sfc_flow_ops_by_spec *ops;
2455        int rc;
2456
2457        rc = sfc_flow_parse_attr(sa, attr, flow, error);
2458        if (rc != 0)
2459                return rc;
2460
2461        ops = sfc_flow_get_ops_by_spec(flow);
2462        if (ops == NULL || ops->parse == NULL) {
2463                rte_flow_error_set(error, ENOTSUP,
2464                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2465                                   "No backend to handle this flow");
2466                return -rte_errno;
2467        }
2468
2469        return ops->parse(dev, pattern, actions, flow, error);
2470}
2471
2472static struct rte_flow *
2473sfc_flow_zmalloc(struct rte_flow_error *error)
2474{
2475        struct rte_flow *flow;
2476
2477        flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2478        if (flow == NULL) {
2479                rte_flow_error_set(error, ENOMEM,
2480                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2481                                   "Failed to allocate memory");
2482        }
2483
2484        return flow;
2485}
2486
2487static void
2488sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2489{
2490        const struct sfc_flow_ops_by_spec *ops;
2491
2492        ops = sfc_flow_get_ops_by_spec(flow);
2493        if (ops != NULL && ops->cleanup != NULL)
2494                ops->cleanup(sa, flow);
2495
2496        rte_free(flow);
2497}
2498
2499static int
2500sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2501                struct rte_flow_error *error)
2502{
2503        const struct sfc_flow_ops_by_spec *ops;
2504        int rc;
2505
2506        ops = sfc_flow_get_ops_by_spec(flow);
2507        if (ops == NULL || ops->insert == NULL) {
2508                rte_flow_error_set(error, ENOTSUP,
2509                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2510                                   "No backend to handle this flow");
2511                return rte_errno;
2512        }
2513
2514        rc = ops->insert(sa, flow);
2515        if (rc != 0) {
2516                rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2517                                   NULL, "Failed to insert the flow rule");
2518        }
2519
2520        return rc;
2521}
2522
2523static int
2524sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2525                struct rte_flow_error *error)
2526{
2527        const struct sfc_flow_ops_by_spec *ops;
2528        int rc;
2529
2530        ops = sfc_flow_get_ops_by_spec(flow);
2531        if (ops == NULL || ops->remove == NULL) {
2532                rte_flow_error_set(error, ENOTSUP,
2533                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2534                                   "No backend to handle this flow");
2535                return rte_errno;
2536        }
2537
2538        rc = ops->remove(sa, flow);
2539        if (rc != 0) {
2540                rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2541                                   NULL, "Failed to remove the flow rule");
2542        }
2543
2544        return rc;
2545}
2546
2547static int
2548sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2549                struct rte_flow_error *error)
2550{
2551        const struct sfc_flow_ops_by_spec *ops;
2552        int rc = 0;
2553
2554        ops = sfc_flow_get_ops_by_spec(flow);
2555        if (ops == NULL) {
2556                rte_flow_error_set(error, ENOTSUP,
2557                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2558                                   "No backend to handle this flow");
2559                return -rte_errno;
2560        }
2561
2562        if (ops->verify != NULL) {
2563                SFC_ASSERT(sfc_adapter_is_locked(sa));
2564                rc = ops->verify(sa, flow);
2565        }
2566
2567        if (rc != 0) {
2568                rte_flow_error_set(error, rc,
2569                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2570                        "Failed to verify flow validity with FW");
2571                return -rte_errno;
2572        }
2573
2574        return 0;
2575}
2576
2577static int
2578sfc_flow_validate(struct rte_eth_dev *dev,
2579                  const struct rte_flow_attr *attr,
2580                  const struct rte_flow_item pattern[],
2581                  const struct rte_flow_action actions[],
2582                  struct rte_flow_error *error)
2583{
2584        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2585        struct rte_flow *flow;
2586        int rc;
2587
2588        flow = sfc_flow_zmalloc(error);
2589        if (flow == NULL)
2590                return -rte_errno;
2591
2592        sfc_adapter_lock(sa);
2593
2594        rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2595        if (rc == 0)
2596                rc = sfc_flow_verify(sa, flow, error);
2597
2598        sfc_flow_free(sa, flow);
2599
2600        sfc_adapter_unlock(sa);
2601
2602        return rc;
2603}
2604
2605static struct rte_flow *
2606sfc_flow_create(struct rte_eth_dev *dev,
2607                const struct rte_flow_attr *attr,
2608                const struct rte_flow_item pattern[],
2609                const struct rte_flow_action actions[],
2610                struct rte_flow_error *error)
2611{
2612        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2613        struct rte_flow *flow = NULL;
2614        int rc;
2615
2616        flow = sfc_flow_zmalloc(error);
2617        if (flow == NULL)
2618                goto fail_no_mem;
2619
2620        sfc_adapter_lock(sa);
2621
2622        rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2623        if (rc != 0)
2624                goto fail_bad_value;
2625
2626        TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2627
2628        if (sa->state == SFC_ETHDEV_STARTED) {
2629                rc = sfc_flow_insert(sa, flow, error);
2630                if (rc != 0)
2631                        goto fail_flow_insert;
2632        }
2633
2634        sfc_adapter_unlock(sa);
2635
2636        return flow;
2637
2638fail_flow_insert:
2639        TAILQ_REMOVE(&sa->flow_list, flow, entries);
2640
2641fail_bad_value:
2642        sfc_flow_free(sa, flow);
2643        sfc_adapter_unlock(sa);
2644
2645fail_no_mem:
2646        return NULL;
2647}
2648
2649static int
2650sfc_flow_destroy(struct rte_eth_dev *dev,
2651                 struct rte_flow *flow,
2652                 struct rte_flow_error *error)
2653{
2654        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2655        struct rte_flow *flow_ptr;
2656        int rc = EINVAL;
2657
2658        sfc_adapter_lock(sa);
2659
2660        TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2661                if (flow_ptr == flow)
2662                        rc = 0;
2663        }
2664        if (rc != 0) {
2665                rte_flow_error_set(error, rc,
2666                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2667                                   "Failed to find flow rule to destroy");
2668                goto fail_bad_value;
2669        }
2670
2671        if (sa->state == SFC_ETHDEV_STARTED)
2672                rc = sfc_flow_remove(sa, flow, error);
2673
2674        TAILQ_REMOVE(&sa->flow_list, flow, entries);
2675        sfc_flow_free(sa, flow);
2676
2677fail_bad_value:
2678        sfc_adapter_unlock(sa);
2679
2680        return -rc;
2681}
2682
2683static int
2684sfc_flow_flush(struct rte_eth_dev *dev,
2685               struct rte_flow_error *error)
2686{
2687        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2688        struct rte_flow *flow;
2689        int ret = 0;
2690
2691        sfc_adapter_lock(sa);
2692
2693        while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2694                if (sa->state == SFC_ETHDEV_STARTED) {
2695                        int rc;
2696
2697                        rc = sfc_flow_remove(sa, flow, error);
2698                        if (rc != 0)
2699                                ret = rc;
2700                }
2701
2702                TAILQ_REMOVE(&sa->flow_list, flow, entries);
2703                sfc_flow_free(sa, flow);
2704        }
2705
2706        sfc_adapter_unlock(sa);
2707
2708        return -ret;
2709}
2710
2711static int
2712sfc_flow_query(struct rte_eth_dev *dev,
2713               struct rte_flow *flow,
2714               const struct rte_flow_action *action,
2715               void *data,
2716               struct rte_flow_error *error)
2717{
2718        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2719        const struct sfc_flow_ops_by_spec *ops;
2720        int ret;
2721
2722        sfc_adapter_lock(sa);
2723
2724        ops = sfc_flow_get_ops_by_spec(flow);
2725        if (ops == NULL || ops->query == NULL) {
2726                ret = rte_flow_error_set(error, ENOTSUP,
2727                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2728                        "No backend to handle this flow");
2729                goto fail_no_backend;
2730        }
2731
2732        if (sa->state != SFC_ETHDEV_STARTED) {
2733                ret = rte_flow_error_set(error, EINVAL,
2734                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2735                        "Can't query the flow: the adapter is not started");
2736                goto fail_not_started;
2737        }
2738
2739        ret = ops->query(dev, flow, action, data, error);
2740        if (ret != 0)
2741                goto fail_query;
2742
2743        sfc_adapter_unlock(sa);
2744
2745        return 0;
2746
2747fail_query:
2748fail_not_started:
2749fail_no_backend:
2750        sfc_adapter_unlock(sa);
2751        return ret;
2752}
2753
2754static int
2755sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2756                 struct rte_flow_error *error)
2757{
2758        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2759        int ret = 0;
2760
2761        sfc_adapter_lock(sa);
2762        if (sa->state != SFC_ETHDEV_INITIALIZED) {
2763                rte_flow_error_set(error, EBUSY,
2764                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765                                   NULL, "please close the port first");
2766                ret = -rte_errno;
2767        } else {
2768                sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2769        }
2770        sfc_adapter_unlock(sa);
2771
2772        return ret;
2773}
2774
2775static int
2776sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2777                             uint16_t *transfer_proxy_port,
2778                             struct rte_flow_error *error)
2779{
2780        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2781        int ret;
2782
2783        ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2784                                              transfer_proxy_port);
2785        if (ret != 0) {
2786                return rte_flow_error_set(error, ret,
2787                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2788                                          NULL, NULL);
2789        }
2790
2791        return 0;
2792}
2793
2794const struct rte_flow_ops sfc_flow_ops = {
2795        .validate = sfc_flow_validate,
2796        .create = sfc_flow_create,
2797        .destroy = sfc_flow_destroy,
2798        .flush = sfc_flow_flush,
2799        .query = sfc_flow_query,
2800        .isolate = sfc_flow_isolate,
2801        .tunnel_decap_set = sfc_ft_decap_set,
2802        .tunnel_match = sfc_ft_match,
2803        .tunnel_action_decap_release = sfc_ft_action_decap_release,
2804        .tunnel_item_release = sfc_ft_item_release,
2805        .get_restore_info = sfc_ft_get_restore_info,
2806        .pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2807};
2808
2809void
2810sfc_flow_init(struct sfc_adapter *sa)
2811{
2812        SFC_ASSERT(sfc_adapter_is_locked(sa));
2813
2814        TAILQ_INIT(&sa->flow_list);
2815}
2816
2817void
2818sfc_flow_fini(struct sfc_adapter *sa)
2819{
2820        struct rte_flow *flow;
2821
2822        SFC_ASSERT(sfc_adapter_is_locked(sa));
2823
2824        while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2825                TAILQ_REMOVE(&sa->flow_list, flow, entries);
2826                sfc_flow_free(sa, flow);
2827        }
2828}
2829
2830void
2831sfc_flow_stop(struct sfc_adapter *sa)
2832{
2833        struct rte_flow *flow;
2834
2835        SFC_ASSERT(sfc_adapter_is_locked(sa));
2836
2837        TAILQ_FOREACH(flow, &sa->flow_list, entries)
2838                sfc_flow_remove(sa, flow, NULL);
2839
2840        /*
2841         * MAE counter service is not stopped on flow rule remove to avoid
2842         * extra work. Make sure that it is stopped here.
2843         */
2844        sfc_mae_counter_stop(sa);
2845}
2846
2847int
2848sfc_flow_start(struct sfc_adapter *sa)
2849{
2850        struct rte_flow *flow;
2851        int rc = 0;
2852
2853        sfc_log_init(sa, "entry");
2854
2855        SFC_ASSERT(sfc_adapter_is_locked(sa));
2856
2857        sfc_ft_counters_reset(sa);
2858
2859        TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2860                rc = sfc_flow_insert(sa, flow, NULL);
2861                if (rc != 0)
2862                        goto fail_bad_flow;
2863        }
2864
2865        sfc_log_init(sa, "done");
2866
2867fail_bad_flow:
2868        return rc;
2869}
2870
2871static void
2872sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
2873{
2874        if (flow == NULL)
2875                return;
2876
2877        sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
2878}
2879