dpdk/drivers/net/enic/enic_flow.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
   3 */
   4
   5#include <errno.h>
   6#include <stdint.h>
   7#include <rte_log.h>
   8#include <ethdev_driver.h>
   9#include <rte_flow_driver.h>
  10#include <rte_ether.h>
  11#include <rte_ip.h>
  12#include <rte_udp.h>
  13
  14#include "enic_compat.h"
  15#include "enic.h"
  16#include "vnic_dev.h"
  17#include "vnic_nic.h"
  18
  19/*
  20 * Common arguments passed to copy_item functions. Use this structure
  21 * so we can easily add new arguments.
  22 * item: Item specification.
  23 * filter: Partially filled in NIC filter structure.
  24 * inner_ofst: If zero, this is an outer header. If non-zero, this is
  25 *   the offset into L5 where the header begins.
  26 * l2_proto_off: offset to EtherType eth or vlan header.
  27 * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
  28 */
  29struct copy_item_args {
  30        const struct rte_flow_item *item;
  31        struct filter_v2 *filter;
  32        uint8_t *inner_ofst;
  33        uint8_t l2_proto_off;
  34        uint8_t l3_proto_off;
  35        struct enic *enic;
  36};
  37
  38/* functions for copying items into enic filters */
  39typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
  40
  41/** Info about how to copy items into enic filters. */
  42struct enic_items {
  43        /** Function for copying and validating an item. */
  44        enic_copy_item_fn *copy_item;
  45        /** List of valid previous items. */
  46        const enum rte_flow_item_type * const prev_items;
  47        /** True if it's OK for this item to be the first item. For some NIC
  48         * versions, it's invalid to start the stack above layer 3.
  49         */
  50        const uint8_t valid_start_item;
  51        /* Inner packet version of copy_item. */
  52        enic_copy_item_fn *inner_copy_item;
  53};
  54
  55/** Filtering capabilities for various NIC and firmware versions. */
  56struct enic_filter_cap {
  57        /** list of valid items and their handlers and attributes. */
  58        const struct enic_items *item_info;
  59        /* Max type in the above list, used to detect unsupported types */
  60        enum rte_flow_item_type max_item_type;
  61};
  62
  63/* functions for copying flow actions into enic actions */
  64typedef int (copy_action_fn)(struct enic *enic,
  65                             const struct rte_flow_action actions[],
  66                             struct filter_action_v2 *enic_action);
  67
  68/** Action capabilities for various NICs. */
  69struct enic_action_cap {
  70        /** list of valid actions */
  71        const enum rte_flow_action_type *actions;
  72        /** copy function for a particular NIC */
  73        copy_action_fn *copy_fn;
  74};
  75
  76/* Forward declarations */
  77static enic_copy_item_fn enic_copy_item_ipv4_v1;
  78static enic_copy_item_fn enic_copy_item_udp_v1;
  79static enic_copy_item_fn enic_copy_item_tcp_v1;
  80static enic_copy_item_fn enic_copy_item_raw_v2;
  81static enic_copy_item_fn enic_copy_item_eth_v2;
  82static enic_copy_item_fn enic_copy_item_vlan_v2;
  83static enic_copy_item_fn enic_copy_item_ipv4_v2;
  84static enic_copy_item_fn enic_copy_item_ipv6_v2;
  85static enic_copy_item_fn enic_copy_item_udp_v2;
  86static enic_copy_item_fn enic_copy_item_tcp_v2;
  87static enic_copy_item_fn enic_copy_item_sctp_v2;
  88static enic_copy_item_fn enic_copy_item_vxlan_v2;
  89static enic_copy_item_fn enic_copy_item_inner_eth_v2;
  90static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
  91static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
  92static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
  93static enic_copy_item_fn enic_copy_item_inner_udp_v2;
  94static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
  95static copy_action_fn enic_copy_action_v1;
  96static copy_action_fn enic_copy_action_v2;
  97
  98/**
  99 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
 100 * is supported.
 101 */
 102static const struct enic_items enic_items_v1[] = {
 103        [RTE_FLOW_ITEM_TYPE_IPV4] = {
 104                .copy_item = enic_copy_item_ipv4_v1,
 105                .valid_start_item = 1,
 106                .prev_items = (const enum rte_flow_item_type[]) {
 107                               RTE_FLOW_ITEM_TYPE_END,
 108                },
 109                .inner_copy_item = NULL,
 110        },
 111        [RTE_FLOW_ITEM_TYPE_UDP] = {
 112                .copy_item = enic_copy_item_udp_v1,
 113                .valid_start_item = 0,
 114                .prev_items = (const enum rte_flow_item_type[]) {
 115                               RTE_FLOW_ITEM_TYPE_IPV4,
 116                               RTE_FLOW_ITEM_TYPE_END,
 117                },
 118                .inner_copy_item = NULL,
 119        },
 120        [RTE_FLOW_ITEM_TYPE_TCP] = {
 121                .copy_item = enic_copy_item_tcp_v1,
 122                .valid_start_item = 0,
 123                .prev_items = (const enum rte_flow_item_type[]) {
 124                               RTE_FLOW_ITEM_TYPE_IPV4,
 125                               RTE_FLOW_ITEM_TYPE_END,
 126                },
 127                .inner_copy_item = NULL,
 128        },
 129};
 130
 131/**
 132 * NICs have Advanced Filters capability but they are disabled. This means
 133 * that layer 3 must be specified.
 134 */
 135static const struct enic_items enic_items_v2[] = {
 136        [RTE_FLOW_ITEM_TYPE_RAW] = {
 137                .copy_item = enic_copy_item_raw_v2,
 138                .valid_start_item = 0,
 139                .prev_items = (const enum rte_flow_item_type[]) {
 140                               RTE_FLOW_ITEM_TYPE_UDP,
 141                               RTE_FLOW_ITEM_TYPE_END,
 142                },
 143                .inner_copy_item = NULL,
 144        },
 145        [RTE_FLOW_ITEM_TYPE_ETH] = {
 146                .copy_item = enic_copy_item_eth_v2,
 147                .valid_start_item = 1,
 148                .prev_items = (const enum rte_flow_item_type[]) {
 149                               RTE_FLOW_ITEM_TYPE_VXLAN,
 150                               RTE_FLOW_ITEM_TYPE_END,
 151                },
 152                .inner_copy_item = enic_copy_item_inner_eth_v2,
 153        },
 154        [RTE_FLOW_ITEM_TYPE_VLAN] = {
 155                .copy_item = enic_copy_item_vlan_v2,
 156                .valid_start_item = 1,
 157                .prev_items = (const enum rte_flow_item_type[]) {
 158                               RTE_FLOW_ITEM_TYPE_ETH,
 159                               RTE_FLOW_ITEM_TYPE_END,
 160                },
 161                .inner_copy_item = enic_copy_item_inner_vlan_v2,
 162        },
 163        [RTE_FLOW_ITEM_TYPE_IPV4] = {
 164                .copy_item = enic_copy_item_ipv4_v2,
 165                .valid_start_item = 1,
 166                .prev_items = (const enum rte_flow_item_type[]) {
 167                               RTE_FLOW_ITEM_TYPE_ETH,
 168                               RTE_FLOW_ITEM_TYPE_VLAN,
 169                               RTE_FLOW_ITEM_TYPE_END,
 170                },
 171                .inner_copy_item = enic_copy_item_inner_ipv4_v2,
 172        },
 173        [RTE_FLOW_ITEM_TYPE_IPV6] = {
 174                .copy_item = enic_copy_item_ipv6_v2,
 175                .valid_start_item = 1,
 176                .prev_items = (const enum rte_flow_item_type[]) {
 177                               RTE_FLOW_ITEM_TYPE_ETH,
 178                               RTE_FLOW_ITEM_TYPE_VLAN,
 179                               RTE_FLOW_ITEM_TYPE_END,
 180                },
 181                .inner_copy_item = enic_copy_item_inner_ipv6_v2,
 182        },
 183        [RTE_FLOW_ITEM_TYPE_UDP] = {
 184                .copy_item = enic_copy_item_udp_v2,
 185                .valid_start_item = 0,
 186                .prev_items = (const enum rte_flow_item_type[]) {
 187                               RTE_FLOW_ITEM_TYPE_IPV4,
 188                               RTE_FLOW_ITEM_TYPE_IPV6,
 189                               RTE_FLOW_ITEM_TYPE_END,
 190                },
 191                .inner_copy_item = enic_copy_item_inner_udp_v2,
 192        },
 193        [RTE_FLOW_ITEM_TYPE_TCP] = {
 194                .copy_item = enic_copy_item_tcp_v2,
 195                .valid_start_item = 0,
 196                .prev_items = (const enum rte_flow_item_type[]) {
 197                               RTE_FLOW_ITEM_TYPE_IPV4,
 198                               RTE_FLOW_ITEM_TYPE_IPV6,
 199                               RTE_FLOW_ITEM_TYPE_END,
 200                },
 201                .inner_copy_item = enic_copy_item_inner_tcp_v2,
 202        },
 203        [RTE_FLOW_ITEM_TYPE_SCTP] = {
 204                .copy_item = enic_copy_item_sctp_v2,
 205                .valid_start_item = 0,
 206                .prev_items = (const enum rte_flow_item_type[]) {
 207                               RTE_FLOW_ITEM_TYPE_IPV4,
 208                               RTE_FLOW_ITEM_TYPE_IPV6,
 209                               RTE_FLOW_ITEM_TYPE_END,
 210                },
 211                .inner_copy_item = NULL,
 212        },
 213        [RTE_FLOW_ITEM_TYPE_VXLAN] = {
 214                .copy_item = enic_copy_item_vxlan_v2,
 215                .valid_start_item = 0,
 216                .prev_items = (const enum rte_flow_item_type[]) {
 217                               RTE_FLOW_ITEM_TYPE_UDP,
 218                               RTE_FLOW_ITEM_TYPE_END,
 219                },
 220                .inner_copy_item = NULL,
 221        },
 222};
 223
 224/** NICs with Advanced filters enabled */
 225static const struct enic_items enic_items_v3[] = {
 226        [RTE_FLOW_ITEM_TYPE_RAW] = {
 227                .copy_item = enic_copy_item_raw_v2,
 228                .valid_start_item = 0,
 229                .prev_items = (const enum rte_flow_item_type[]) {
 230                               RTE_FLOW_ITEM_TYPE_UDP,
 231                               RTE_FLOW_ITEM_TYPE_END,
 232                },
 233                .inner_copy_item = NULL,
 234        },
 235        [RTE_FLOW_ITEM_TYPE_ETH] = {
 236                .copy_item = enic_copy_item_eth_v2,
 237                .valid_start_item = 1,
 238                .prev_items = (const enum rte_flow_item_type[]) {
 239                               RTE_FLOW_ITEM_TYPE_VXLAN,
 240                               RTE_FLOW_ITEM_TYPE_END,
 241                },
 242                .inner_copy_item = enic_copy_item_inner_eth_v2,
 243        },
 244        [RTE_FLOW_ITEM_TYPE_VLAN] = {
 245                .copy_item = enic_copy_item_vlan_v2,
 246                .valid_start_item = 1,
 247                .prev_items = (const enum rte_flow_item_type[]) {
 248                               RTE_FLOW_ITEM_TYPE_ETH,
 249                               RTE_FLOW_ITEM_TYPE_END,
 250                },
 251                .inner_copy_item = enic_copy_item_inner_vlan_v2,
 252        },
 253        [RTE_FLOW_ITEM_TYPE_IPV4] = {
 254                .copy_item = enic_copy_item_ipv4_v2,
 255                .valid_start_item = 1,
 256                .prev_items = (const enum rte_flow_item_type[]) {
 257                               RTE_FLOW_ITEM_TYPE_ETH,
 258                               RTE_FLOW_ITEM_TYPE_VLAN,
 259                               RTE_FLOW_ITEM_TYPE_END,
 260                },
 261                .inner_copy_item = enic_copy_item_inner_ipv4_v2,
 262        },
 263        [RTE_FLOW_ITEM_TYPE_IPV6] = {
 264                .copy_item = enic_copy_item_ipv6_v2,
 265                .valid_start_item = 1,
 266                .prev_items = (const enum rte_flow_item_type[]) {
 267                               RTE_FLOW_ITEM_TYPE_ETH,
 268                               RTE_FLOW_ITEM_TYPE_VLAN,
 269                               RTE_FLOW_ITEM_TYPE_END,
 270                },
 271                .inner_copy_item = enic_copy_item_inner_ipv6_v2,
 272        },
 273        [RTE_FLOW_ITEM_TYPE_UDP] = {
 274                .copy_item = enic_copy_item_udp_v2,
 275                .valid_start_item = 1,
 276                .prev_items = (const enum rte_flow_item_type[]) {
 277                               RTE_FLOW_ITEM_TYPE_IPV4,
 278                               RTE_FLOW_ITEM_TYPE_IPV6,
 279                               RTE_FLOW_ITEM_TYPE_END,
 280                },
 281                .inner_copy_item = enic_copy_item_inner_udp_v2,
 282        },
 283        [RTE_FLOW_ITEM_TYPE_TCP] = {
 284                .copy_item = enic_copy_item_tcp_v2,
 285                .valid_start_item = 1,
 286                .prev_items = (const enum rte_flow_item_type[]) {
 287                               RTE_FLOW_ITEM_TYPE_IPV4,
 288                               RTE_FLOW_ITEM_TYPE_IPV6,
 289                               RTE_FLOW_ITEM_TYPE_END,
 290                },
 291                .inner_copy_item = enic_copy_item_inner_tcp_v2,
 292        },
 293        [RTE_FLOW_ITEM_TYPE_SCTP] = {
 294                .copy_item = enic_copy_item_sctp_v2,
 295                .valid_start_item = 0,
 296                .prev_items = (const enum rte_flow_item_type[]) {
 297                               RTE_FLOW_ITEM_TYPE_IPV4,
 298                               RTE_FLOW_ITEM_TYPE_IPV6,
 299                               RTE_FLOW_ITEM_TYPE_END,
 300                },
 301                .inner_copy_item = NULL,
 302        },
 303        [RTE_FLOW_ITEM_TYPE_VXLAN] = {
 304                .copy_item = enic_copy_item_vxlan_v2,
 305                .valid_start_item = 1,
 306                .prev_items = (const enum rte_flow_item_type[]) {
 307                               RTE_FLOW_ITEM_TYPE_UDP,
 308                               RTE_FLOW_ITEM_TYPE_END,
 309                },
 310                .inner_copy_item = NULL,
 311        },
 312};
 313
 314/** Filtering capabilities indexed this NICs supported filter type. */
 315static const struct enic_filter_cap enic_filter_cap[] = {
 316        [FILTER_IPV4_5TUPLE] = {
 317                .item_info = enic_items_v1,
 318                .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
 319        },
 320        [FILTER_USNIC_IP] = {
 321                .item_info = enic_items_v2,
 322                .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
 323        },
 324        [FILTER_DPDK_1] = {
 325                .item_info = enic_items_v3,
 326                .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
 327        },
 328};
 329
 330/** Supported actions for older NICs */
 331static const enum rte_flow_action_type enic_supported_actions_v1[] = {
 332        RTE_FLOW_ACTION_TYPE_QUEUE,
 333        RTE_FLOW_ACTION_TYPE_END,
 334};
 335
 336/** Supported actions for newer NICs */
 337static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
 338        RTE_FLOW_ACTION_TYPE_QUEUE,
 339        RTE_FLOW_ACTION_TYPE_MARK,
 340        RTE_FLOW_ACTION_TYPE_FLAG,
 341        RTE_FLOW_ACTION_TYPE_RSS,
 342        RTE_FLOW_ACTION_TYPE_PASSTHRU,
 343        RTE_FLOW_ACTION_TYPE_END,
 344};
 345
 346static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
 347        RTE_FLOW_ACTION_TYPE_QUEUE,
 348        RTE_FLOW_ACTION_TYPE_MARK,
 349        RTE_FLOW_ACTION_TYPE_FLAG,
 350        RTE_FLOW_ACTION_TYPE_DROP,
 351        RTE_FLOW_ACTION_TYPE_RSS,
 352        RTE_FLOW_ACTION_TYPE_PASSTHRU,
 353        RTE_FLOW_ACTION_TYPE_END,
 354};
 355
 356/** Action capabilities indexed by NIC version information */
 357static const struct enic_action_cap enic_action_cap[] = {
 358        [FILTER_ACTION_RQ_STEERING_FLAG] = {
 359                .actions = enic_supported_actions_v1,
 360                .copy_fn = enic_copy_action_v1,
 361        },
 362        [FILTER_ACTION_FILTER_ID_FLAG] = {
 363                .actions = enic_supported_actions_v2_id,
 364                .copy_fn = enic_copy_action_v2,
 365        },
 366        [FILTER_ACTION_DROP_FLAG] = {
 367                .actions = enic_supported_actions_v2_drop,
 368                .copy_fn = enic_copy_action_v2,
 369        },
 370};
 371
 372static int
 373mask_exact_match(const uint8_t *supported, const uint8_t *supplied,
 374                 unsigned int size)
 375{
 376        unsigned int i;
 377        for (i = 0; i < size; i++) {
 378                if (supported[i] != supplied[i])
 379                        return 0;
 380        }
 381        return 1;
 382}
 383
 384static int
 385enic_copy_item_ipv4_v1(struct copy_item_args *arg)
 386{
 387        const struct rte_flow_item *item = arg->item;
 388        struct filter_v2 *enic_filter = arg->filter;
 389        const struct rte_flow_item_ipv4 *spec = item->spec;
 390        const struct rte_flow_item_ipv4 *mask = item->mask;
 391        struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
 392        struct rte_ipv4_hdr supported_mask = {
 393                .src_addr = 0xffffffff,
 394                .dst_addr = 0xffffffff,
 395        };
 396
 397        ENICPMD_FUNC_TRACE();
 398
 399        if (!mask)
 400                mask = &rte_flow_item_ipv4_mask;
 401
 402        /* This is an exact match filter, both fields must be set */
 403        if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
 404                ENICPMD_LOG(ERR, "IPv4 exact match src/dst addr");
 405                return ENOTSUP;
 406        }
 407
 408        /* check that the suppied mask exactly matches capabilty */
 409        if (!mask_exact_match((const uint8_t *)&supported_mask,
 410                              (const uint8_t *)item->mask, sizeof(*mask))) {
 411                ENICPMD_LOG(ERR, "IPv4 exact match mask");
 412                return ENOTSUP;
 413        }
 414
 415        enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 416        enic_5tup->src_addr = spec->hdr.src_addr;
 417        enic_5tup->dst_addr = spec->hdr.dst_addr;
 418
 419        return 0;
 420}
 421
 422static int
 423enic_copy_item_udp_v1(struct copy_item_args *arg)
 424{
 425        const struct rte_flow_item *item = arg->item;
 426        struct filter_v2 *enic_filter = arg->filter;
 427        const struct rte_flow_item_udp *spec = item->spec;
 428        const struct rte_flow_item_udp *mask = item->mask;
 429        struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
 430        struct rte_udp_hdr supported_mask = {
 431                .src_port = 0xffff,
 432                .dst_port = 0xffff,
 433        };
 434
 435        ENICPMD_FUNC_TRACE();
 436
 437        if (!mask)
 438                mask = &rte_flow_item_udp_mask;
 439
 440        /* This is an exact match filter, both ports must be set */
 441        if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
 442                ENICPMD_LOG(ERR, "UDP exact match src/dst addr");
 443                return ENOTSUP;
 444        }
 445
 446        /* check that the suppied mask exactly matches capabilty */
 447        if (!mask_exact_match((const uint8_t *)&supported_mask,
 448                              (const uint8_t *)item->mask, sizeof(*mask))) {
 449                ENICPMD_LOG(ERR, "UDP exact match mask");
 450                return ENOTSUP;
 451        }
 452
 453        enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 454        enic_5tup->src_port = spec->hdr.src_port;
 455        enic_5tup->dst_port = spec->hdr.dst_port;
 456        enic_5tup->protocol = PROTO_UDP;
 457
 458        return 0;
 459}
 460
 461static int
 462enic_copy_item_tcp_v1(struct copy_item_args *arg)
 463{
 464        const struct rte_flow_item *item = arg->item;
 465        struct filter_v2 *enic_filter = arg->filter;
 466        const struct rte_flow_item_tcp *spec = item->spec;
 467        const struct rte_flow_item_tcp *mask = item->mask;
 468        struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
 469        struct rte_tcp_hdr supported_mask = {
 470                .src_port = 0xffff,
 471                .dst_port = 0xffff,
 472        };
 473
 474        ENICPMD_FUNC_TRACE();
 475
 476        if (!mask)
 477                mask = &rte_flow_item_tcp_mask;
 478
 479        /* This is an exact match filter, both ports must be set */
 480        if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
 481                ENICPMD_LOG(ERR, "TCPIPv4 exact match src/dst addr");
 482                return ENOTSUP;
 483        }
 484
 485        /* check that the suppied mask exactly matches capabilty */
 486        if (!mask_exact_match((const uint8_t *)&supported_mask,
 487                             (const uint8_t *)item->mask, sizeof(*mask))) {
 488                ENICPMD_LOG(ERR, "TCP exact match mask");
 489                return ENOTSUP;
 490        }
 491
 492        enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 493        enic_5tup->src_port = spec->hdr.src_port;
 494        enic_5tup->dst_port = spec->hdr.dst_port;
 495        enic_5tup->protocol = PROTO_TCP;
 496
 497        return 0;
 498}
 499
 500/*
 501 * The common 'copy' function for all inner packet patterns. Patterns are
 502 * first appended to the L5 pattern buffer. Then, since the NIC filter
 503 * API has no special support for inner packet matching at the moment,
 504 * we set EtherType and IP proto as necessary.
 505 */
 506static int
 507copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
 508                  const void *val, const void *mask, uint8_t val_size,
 509                  uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
 510{
 511        uint8_t *l5_mask, *l5_val;
 512        uint8_t start_off;
 513
 514        /* No space left in the L5 pattern buffer. */
 515        start_off = *inner_ofst;
 516        if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
 517                return ENOTSUP;
 518        l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
 519        l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
 520        /* Copy the pattern into the L5 buffer. */
 521        if (val) {
 522                memcpy(l5_mask + start_off, mask, val_size);
 523                memcpy(l5_val + start_off, val, val_size);
 524        }
 525        /* Set the protocol field in the previous header. */
 526        if (proto_off) {
 527                void *m, *v;
 528
 529                m = l5_mask + proto_off;
 530                v = l5_val + proto_off;
 531                if (proto_size == 1) {
 532                        *(uint8_t *)m = 0xff;
 533                        *(uint8_t *)v = (uint8_t)proto_val;
 534                } else if (proto_size == 2) {
 535                        *(uint16_t *)m = 0xffff;
 536                        *(uint16_t *)v = proto_val;
 537                }
 538        }
 539        /* All inner headers land in L5 buffer even if their spec is null. */
 540        *inner_ofst += val_size;
 541        return 0;
 542}
 543
 544static int
 545enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
 546{
 547        const void *mask = arg->item->mask;
 548        uint8_t *off = arg->inner_ofst;
 549
 550        ENICPMD_FUNC_TRACE();
 551        if (!mask)
 552                mask = &rte_flow_item_eth_mask;
 553        arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type);
 554        return copy_inner_common(&arg->filter->u.generic_1, off,
 555                arg->item->spec, mask, sizeof(struct rte_ether_hdr),
 556                0 /* no previous protocol */, 0, 0);
 557}
 558
 559static int
 560enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
 561{
 562        const void *mask = arg->item->mask;
 563        uint8_t *off = arg->inner_ofst;
 564        uint8_t eth_type_off;
 565
 566        ENICPMD_FUNC_TRACE();
 567        if (!mask)
 568                mask = &rte_flow_item_vlan_mask;
 569        /* Append vlan header to L5 and set ether type = TPID */
 570        eth_type_off = arg->l2_proto_off;
 571        arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
 572        return copy_inner_common(&arg->filter->u.generic_1, off,
 573                arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
 574                eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
 575}
 576
 577static int
 578enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
 579{
 580        const void *mask = arg->item->mask;
 581        uint8_t *off = arg->inner_ofst;
 582
 583        ENICPMD_FUNC_TRACE();
 584        if (!mask)
 585                mask = &rte_flow_item_ipv4_mask;
 586        /* Append ipv4 header to L5 and set ether type = ipv4 */
 587        arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id);
 588        return copy_inner_common(&arg->filter->u.generic_1, off,
 589                arg->item->spec, mask, sizeof(struct rte_ipv4_hdr),
 590                arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2);
 591}
 592
 593static int
 594enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
 595{
 596        const void *mask = arg->item->mask;
 597        uint8_t *off = arg->inner_ofst;
 598
 599        ENICPMD_FUNC_TRACE();
 600        if (!mask)
 601                mask = &rte_flow_item_ipv6_mask;
 602        /* Append ipv6 header to L5 and set ether type = ipv6 */
 603        arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto);
 604        return copy_inner_common(&arg->filter->u.generic_1, off,
 605                arg->item->spec, mask, sizeof(struct rte_ipv6_hdr),
 606                arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2);
 607}
 608
 609static int
 610enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
 611{
 612        const void *mask = arg->item->mask;
 613        uint8_t *off = arg->inner_ofst;
 614
 615        ENICPMD_FUNC_TRACE();
 616        if (!mask)
 617                mask = &rte_flow_item_udp_mask;
 618        /* Append udp header to L5 and set ip proto = udp */
 619        return copy_inner_common(&arg->filter->u.generic_1, off,
 620                arg->item->spec, mask, sizeof(struct rte_udp_hdr),
 621                arg->l3_proto_off, IPPROTO_UDP, 1);
 622}
 623
 624static int
 625enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
 626{
 627        const void *mask = arg->item->mask;
 628        uint8_t *off = arg->inner_ofst;
 629
 630        ENICPMD_FUNC_TRACE();
 631        if (!mask)
 632                mask = &rte_flow_item_tcp_mask;
 633        /* Append tcp header to L5 and set ip proto = tcp */
 634        return copy_inner_common(&arg->filter->u.generic_1, off,
 635                arg->item->spec, mask, sizeof(struct rte_tcp_hdr),
 636                arg->l3_proto_off, IPPROTO_TCP, 1);
 637}
 638
 639static int
 640enic_copy_item_eth_v2(struct copy_item_args *arg)
 641{
 642        const struct rte_flow_item *item = arg->item;
 643        struct filter_v2 *enic_filter = arg->filter;
 644        struct rte_ether_hdr enic_spec;
 645        struct rte_ether_hdr enic_mask;
 646        const struct rte_flow_item_eth *spec = item->spec;
 647        const struct rte_flow_item_eth *mask = item->mask;
 648        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 649
 650        ENICPMD_FUNC_TRACE();
 651
 652        /* Match all if no spec */
 653        if (!spec)
 654                return 0;
 655
 656        if (!mask)
 657                mask = &rte_flow_item_eth_mask;
 658
 659        memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
 660               RTE_ETHER_ADDR_LEN);
 661        memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
 662               RTE_ETHER_ADDR_LEN);
 663
 664        memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
 665               RTE_ETHER_ADDR_LEN);
 666        memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
 667               RTE_ETHER_ADDR_LEN);
 668        enic_spec.ether_type = spec->type;
 669        enic_mask.ether_type = mask->type;
 670
 671        /* outer header */
 672        memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
 673               sizeof(struct rte_ether_hdr));
 674        memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
 675               sizeof(struct rte_ether_hdr));
 676        return 0;
 677}
 678
 679static int
 680enic_copy_item_vlan_v2(struct copy_item_args *arg)
 681{
 682        const struct rte_flow_item *item = arg->item;
 683        struct filter_v2 *enic_filter = arg->filter;
 684        const struct rte_flow_item_vlan *spec = item->spec;
 685        const struct rte_flow_item_vlan *mask = item->mask;
 686        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 687        struct rte_ether_hdr *eth_mask;
 688        struct rte_ether_hdr *eth_val;
 689
 690        ENICPMD_FUNC_TRACE();
 691
 692        /* Match all if no spec */
 693        if (!spec)
 694                return 0;
 695
 696        if (!mask)
 697                mask = &rte_flow_item_vlan_mask;
 698
 699        eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
 700        eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
 701        /* Outer TPID cannot be matched */
 702        if (eth_mask->ether_type)
 703                return ENOTSUP;
 704        /*
 705         * For recent models:
 706         * When packet matching, the VIC always compares vlan-stripped
 707         * L2, regardless of vlan stripping settings. So, the inner type
 708         * from vlan becomes the ether type of the eth header.
 709         *
 710         * Older models w/o hardware vxlan parser have a different
 711         * behavior when vlan stripping is disabled. In this case,
 712         * vlan tag remains in the L2 buffer.
 713         */
 714        if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
 715                struct rte_vlan_hdr *vlan;
 716
 717                vlan = (struct rte_vlan_hdr *)(eth_mask + 1);
 718                vlan->eth_proto = mask->inner_type;
 719                vlan = (struct rte_vlan_hdr *)(eth_val + 1);
 720                vlan->eth_proto = spec->inner_type;
 721        } else {
 722                eth_mask->ether_type = mask->inner_type;
 723                eth_val->ether_type = spec->inner_type;
 724        }
 725        /* For TCI, use the vlan mask/val fields (little endian). */
 726        gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
 727        gp->val_vlan = rte_be_to_cpu_16(spec->tci);
 728        return 0;
 729}
 730
 731static int
 732enic_copy_item_ipv4_v2(struct copy_item_args *arg)
 733{
 734        const struct rte_flow_item *item = arg->item;
 735        struct filter_v2 *enic_filter = arg->filter;
 736        const struct rte_flow_item_ipv4 *spec = item->spec;
 737        const struct rte_flow_item_ipv4 *mask = item->mask;
 738        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 739
 740        ENICPMD_FUNC_TRACE();
 741
 742        /* Match IPv4 */
 743        gp->mask_flags |= FILTER_GENERIC_1_IPV4;
 744        gp->val_flags |= FILTER_GENERIC_1_IPV4;
 745
 746        /* Match all if no spec */
 747        if (!spec)
 748                return 0;
 749
 750        if (!mask)
 751                mask = &rte_flow_item_ipv4_mask;
 752
 753        memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
 754               sizeof(struct rte_ipv4_hdr));
 755        memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
 756               sizeof(struct rte_ipv4_hdr));
 757        return 0;
 758}
 759
 760static int
 761enic_copy_item_ipv6_v2(struct copy_item_args *arg)
 762{
 763        const struct rte_flow_item *item = arg->item;
 764        struct filter_v2 *enic_filter = arg->filter;
 765        const struct rte_flow_item_ipv6 *spec = item->spec;
 766        const struct rte_flow_item_ipv6 *mask = item->mask;
 767        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 768
 769        ENICPMD_FUNC_TRACE();
 770
 771        /* Match IPv6 */
 772        gp->mask_flags |= FILTER_GENERIC_1_IPV6;
 773        gp->val_flags |= FILTER_GENERIC_1_IPV6;
 774
 775        /* Match all if no spec */
 776        if (!spec)
 777                return 0;
 778
 779        if (!mask)
 780                mask = &rte_flow_item_ipv6_mask;
 781
 782        memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
 783               sizeof(struct rte_ipv6_hdr));
 784        memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
 785               sizeof(struct rte_ipv6_hdr));
 786        return 0;
 787}
 788
 789static int
 790enic_copy_item_udp_v2(struct copy_item_args *arg)
 791{
 792        const struct rte_flow_item *item = arg->item;
 793        struct filter_v2 *enic_filter = arg->filter;
 794        const struct rte_flow_item_udp *spec = item->spec;
 795        const struct rte_flow_item_udp *mask = item->mask;
 796        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 797
 798        ENICPMD_FUNC_TRACE();
 799
 800        /* Match UDP */
 801        gp->mask_flags |= FILTER_GENERIC_1_UDP;
 802        gp->val_flags |= FILTER_GENERIC_1_UDP;
 803
 804        /* Match all if no spec */
 805        if (!spec)
 806                return 0;
 807
 808        if (!mask)
 809                mask = &rte_flow_item_udp_mask;
 810
 811        memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
 812               sizeof(struct rte_udp_hdr));
 813        memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
 814               sizeof(struct rte_udp_hdr));
 815        return 0;
 816}
 817
 818static int
 819enic_copy_item_tcp_v2(struct copy_item_args *arg)
 820{
 821        const struct rte_flow_item *item = arg->item;
 822        struct filter_v2 *enic_filter = arg->filter;
 823        const struct rte_flow_item_tcp *spec = item->spec;
 824        const struct rte_flow_item_tcp *mask = item->mask;
 825        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 826
 827        ENICPMD_FUNC_TRACE();
 828
 829        /* Match TCP */
 830        gp->mask_flags |= FILTER_GENERIC_1_TCP;
 831        gp->val_flags |= FILTER_GENERIC_1_TCP;
 832
 833        /* Match all if no spec */
 834        if (!spec)
 835                return 0;
 836
 837        if (!mask)
 838                return ENOTSUP;
 839
 840        memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
 841               sizeof(struct rte_tcp_hdr));
 842        memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
 843               sizeof(struct rte_tcp_hdr));
 844        return 0;
 845}
 846
 847static int
 848enic_copy_item_sctp_v2(struct copy_item_args *arg)
 849{
 850        const struct rte_flow_item *item = arg->item;
 851        struct filter_v2 *enic_filter = arg->filter;
 852        const struct rte_flow_item_sctp *spec = item->spec;
 853        const struct rte_flow_item_sctp *mask = item->mask;
 854        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 855        uint8_t *ip_proto_mask = NULL;
 856        uint8_t *ip_proto = NULL;
 857
 858        ENICPMD_FUNC_TRACE();
 859
 860        /*
 861         * The NIC filter API has no flags for "match sctp", so explicitly set
 862         * the protocol number in the IP pattern.
 863         */
 864        if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
 865                struct rte_ipv4_hdr *ip;
 866                ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
 867                ip_proto_mask = &ip->next_proto_id;
 868                ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
 869                ip_proto = &ip->next_proto_id;
 870        } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
 871                struct rte_ipv6_hdr *ip;
 872                ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
 873                ip_proto_mask = &ip->proto;
 874                ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
 875                ip_proto = &ip->proto;
 876        } else {
 877                /* Need IPv4/IPv6 pattern first */
 878                return EINVAL;
 879        }
 880        *ip_proto = IPPROTO_SCTP;
 881        *ip_proto_mask = 0xff;
 882
 883        /* Match all if no spec */
 884        if (!spec)
 885                return 0;
 886
 887        if (!mask)
 888                mask = &rte_flow_item_sctp_mask;
 889
 890        memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
 891               sizeof(struct rte_sctp_hdr));
 892        memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
 893               sizeof(struct rte_sctp_hdr));
 894        return 0;
 895}
 896
 897static int
 898enic_copy_item_vxlan_v2(struct copy_item_args *arg)
 899{
 900        const struct rte_flow_item *item = arg->item;
 901        struct filter_v2 *enic_filter = arg->filter;
 902        uint8_t *inner_ofst = arg->inner_ofst;
 903        const struct rte_flow_item_vxlan *spec = item->spec;
 904        const struct rte_flow_item_vxlan *mask = item->mask;
 905        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 906        struct rte_udp_hdr *udp;
 907
 908        ENICPMD_FUNC_TRACE();
 909
 910        /*
 911         * The NIC filter API has no flags for "match vxlan". Set UDP port to
 912         * avoid false positives.
 913         */
 914        gp->mask_flags |= FILTER_GENERIC_1_UDP;
 915        gp->val_flags |= FILTER_GENERIC_1_UDP;
 916        udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
 917        udp->dst_port = 0xffff;
 918        udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
 919        udp->dst_port = RTE_BE16(4789);
 920        /* Match all if no spec */
 921        if (!spec)
 922                return 0;
 923
 924        if (!mask)
 925                mask = &rte_flow_item_vxlan_mask;
 926
 927        memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
 928               sizeof(struct rte_vxlan_hdr));
 929        memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
 930               sizeof(struct rte_vxlan_hdr));
 931
 932        *inner_ofst = sizeof(struct rte_vxlan_hdr);
 933        return 0;
 934}
 935
 936/*
 937 * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
 938 * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
 939 * or geneve).
 940 */
 941static int
 942enic_copy_item_raw_v2(struct copy_item_args *arg)
 943{
 944        const struct rte_flow_item *item = arg->item;
 945        struct filter_v2 *enic_filter = arg->filter;
 946        uint8_t *inner_ofst = arg->inner_ofst;
 947        const struct rte_flow_item_raw *spec = item->spec;
 948        const struct rte_flow_item_raw *mask = item->mask;
 949        struct filter_generic_1 *gp = &enic_filter->u.generic_1;
 950
 951        ENICPMD_FUNC_TRACE();
 952
 953        /* Cannot be used for inner packet */
 954        if (*inner_ofst)
 955                return EINVAL;
 956        /* Need both spec and mask */
 957        if (!spec || !mask)
 958                return EINVAL;
 959        /* Only supports relative with offset 0 */
 960        if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
 961                return EINVAL;
 962        /* Need non-null pattern that fits within the NIC's filter pattern */
 963        if (spec->length == 0 ||
 964            spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
 965            !spec->pattern || !mask->pattern)
 966                return EINVAL;
 967        /*
 968         * Mask fields, including length, are often set to zero. Assume that
 969         * means "same as spec" to avoid breaking existing apps. If length
 970         * is not zero, then it should be >= spec length.
 971         *
 972         * No more pattern follows this, so append to the L4 layer instead of
 973         * L5 to work with both recent and older VICs.
 974         */
 975        if (mask->length != 0 && mask->length < spec->length)
 976                return EINVAL;
 977        memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
 978               mask->pattern, spec->length);
 979        memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
 980               spec->pattern, spec->length);
 981
 982        return 0;
 983}
 984
 985/**
 986 * Return 1 if current item is valid on top of the previous one.
 987 *
 988 * @param prev_item[in]
 989 *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
 990 *   is the first item.
 991 * @param item_info[in]
 992 *   Info about this item, like valid previous items.
 993 * @param is_first[in]
 994 *   True if this the first item in the pattern.
 995 */
 996static int
 997item_stacking_valid(enum rte_flow_item_type prev_item,
 998                    const struct enic_items *item_info, uint8_t is_first_item)
 999{
1000        enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1001
1002        ENICPMD_FUNC_TRACE();
1003
1004        for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1005                if (prev_item == *allowed_items)
1006                        return 1;
1007        }
1008
1009        /* This is the first item in the stack. Check if that's cool */
1010        if (is_first_item && item_info->valid_start_item)
1011                return 1;
1012
1013        return 0;
1014}
1015
1016/*
1017 * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1018 * Instead it is in L4 following the UDP header. Append the vxlan
1019 * pattern to L4 (udp) and shift any inner packet pattern in L5.
1020 */
1021static void
1022fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1023               uint8_t inner_ofst)
1024{
1025        uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1026        uint8_t inner;
1027        uint8_t vxlan;
1028
1029        if (!(inner_ofst > 0 && enic->vxlan))
1030                return;
1031        ENICPMD_FUNC_TRACE();
1032        vxlan = sizeof(struct rte_vxlan_hdr);
1033        memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
1034               gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1035        memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
1036               gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1037        inner = inner_ofst - vxlan;
1038        memset(layer, 0, sizeof(layer));
1039        memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1040        memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1041        memset(layer, 0, sizeof(layer));
1042        memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1043        memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1044}
1045
1046/**
1047 * Build the intenal enic filter structure from the provided pattern. The
1048 * pattern is validated as the items are copied.
1049 *
1050 * @param pattern[in]
1051 * @param items_info[in]
1052 *   Info about this NICs item support, like valid previous items.
1053 * @param enic_filter[out]
1054 *   NIC specfilc filters derived from the pattern.
1055 * @param error[out]
1056 */
1057static int
1058enic_copy_filter(const struct rte_flow_item pattern[],
1059                 const struct enic_filter_cap *cap,
1060                 struct enic *enic,
1061                 struct filter_v2 *enic_filter,
1062                 struct rte_flow_error *error)
1063{
1064        int ret;
1065        const struct rte_flow_item *item = pattern;
1066        uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */
1067        enum rte_flow_item_type prev_item;
1068        const struct enic_items *item_info;
1069        struct copy_item_args args;
1070        enic_copy_item_fn *copy_fn;
1071        uint8_t is_first_item = 1;
1072
1073        ENICPMD_FUNC_TRACE();
1074
1075        prev_item = 0;
1076
1077        args.filter = enic_filter;
1078        args.inner_ofst = &inner_ofst;
1079        args.enic = enic;
1080        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1081                /* Get info about how to validate and copy the item. If NULL
1082                 * is returned the nic does not support the item.
1083                 */
1084                if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1085                        continue;
1086
1087                item_info = &cap->item_info[item->type];
1088                if (item->type > cap->max_item_type ||
1089                    item_info->copy_item == NULL ||
1090                    (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1091                        rte_flow_error_set(error, ENOTSUP,
1092                                RTE_FLOW_ERROR_TYPE_ITEM,
1093                                NULL, "Unsupported item.");
1094                        return -rte_errno;
1095                }
1096
1097                /* check to see if item stacking is valid */
1098                if (!item_stacking_valid(prev_item, item_info, is_first_item))
1099                        goto stacking_error;
1100
1101                args.item = item;
1102                copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1103                        item_info->copy_item;
1104                ret = copy_fn(&args);
1105                if (ret)
1106                        goto item_not_supported;
1107                prev_item = item->type;
1108                is_first_item = 0;
1109        }
1110        fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1111
1112        return 0;
1113
1114item_not_supported:
1115        rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1116                           NULL, "enic type error");
1117        return -rte_errno;
1118
1119stacking_error:
1120        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1121                           item, "stacking error");
1122        return -rte_errno;
1123}
1124
1125/**
1126 * Build the intenal version 1 NIC action structure from the provided pattern.
1127 * The pattern is validated as the items are copied.
1128 *
1129 * @param actions[in]
1130 * @param enic_action[out]
1131 *   NIC specfilc actions derived from the actions.
1132 * @param error[out]
1133 */
1134static int
1135enic_copy_action_v1(__rte_unused struct enic *enic,
1136                    const struct rte_flow_action actions[],
1137                    struct filter_action_v2 *enic_action)
1138{
1139        enum { FATE = 1, };
1140        uint32_t overlap = 0;
1141
1142        ENICPMD_FUNC_TRACE();
1143
1144        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1145                if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1146                        continue;
1147
1148                switch (actions->type) {
1149                case RTE_FLOW_ACTION_TYPE_QUEUE: {
1150                        const struct rte_flow_action_queue *queue =
1151                                (const struct rte_flow_action_queue *)
1152                                actions->conf;
1153
1154                        if (overlap & FATE)
1155                                return ENOTSUP;
1156                        overlap |= FATE;
1157                        enic_action->rq_idx =
1158                                enic_rte_rq_idx_to_sop_idx(queue->index);
1159                        break;
1160                }
1161                default:
1162                        RTE_ASSERT(0);
1163                        break;
1164                }
1165        }
1166        if (!(overlap & FATE))
1167                return ENOTSUP;
1168        enic_action->type = FILTER_ACTION_RQ_STEERING;
1169        return 0;
1170}
1171
1172/**
1173 * Build the intenal version 2 NIC action structure from the provided pattern.
1174 * The pattern is validated as the items are copied.
1175 *
1176 * @param actions[in]
1177 * @param enic_action[out]
1178 *   NIC specfilc actions derived from the actions.
1179 * @param error[out]
1180 */
1181static int
1182enic_copy_action_v2(struct enic *enic,
1183                    const struct rte_flow_action actions[],
1184                    struct filter_action_v2 *enic_action)
1185{
1186        enum { FATE = 1, MARK = 2, };
1187        uint32_t overlap = 0;
1188        bool passthru = false;
1189
1190        ENICPMD_FUNC_TRACE();
1191
1192        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1193                switch (actions->type) {
1194                case RTE_FLOW_ACTION_TYPE_QUEUE: {
1195                        const struct rte_flow_action_queue *queue =
1196                                (const struct rte_flow_action_queue *)
1197                                actions->conf;
1198
1199                        if (overlap & FATE)
1200                                return ENOTSUP;
1201                        overlap |= FATE;
1202                        enic_action->rq_idx =
1203                                enic_rte_rq_idx_to_sop_idx(queue->index);
1204                        enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1205                        break;
1206                }
1207                case RTE_FLOW_ACTION_TYPE_MARK: {
1208                        const struct rte_flow_action_mark *mark =
1209                                (const struct rte_flow_action_mark *)
1210                                actions->conf;
1211                        if (enic->use_noscatter_vec_rx_handler)
1212                                return ENOTSUP;
1213                        if (overlap & MARK)
1214                                return ENOTSUP;
1215                        overlap |= MARK;
1216                        /*
1217                         * Map mark ID (32-bit) to filter ID (16-bit):
1218                         * - Reject values > 16 bits
1219                         * - Filter ID 0 is reserved for filters that steer
1220                         *   but not mark. So add 1 to the mark ID to avoid
1221                         *   using 0.
1222                         * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1223                         *   reserved for the "flag" action below.
1224                         */
1225                        if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1226                                return EINVAL;
1227                        enic_action->filter_id = mark->id + 1;
1228                        enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1229                        break;
1230                }
1231                case RTE_FLOW_ACTION_TYPE_FLAG: {
1232                        if (enic->use_noscatter_vec_rx_handler)
1233                                return ENOTSUP;
1234                        if (overlap & MARK)
1235                                return ENOTSUP;
1236                        overlap |= MARK;
1237                        /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1238                        enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1239                        enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1240                        break;
1241                }
1242                case RTE_FLOW_ACTION_TYPE_DROP: {
1243                        if (overlap & FATE)
1244                                return ENOTSUP;
1245                        overlap |= FATE;
1246                        enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1247                        break;
1248                }
1249                case RTE_FLOW_ACTION_TYPE_RSS: {
1250                        const struct rte_flow_action_rss *rss =
1251                                (const struct rte_flow_action_rss *)
1252                                actions->conf;
1253                        bool allow;
1254                        uint16_t i;
1255
1256                        /*
1257                         * Hardware does not support general RSS actions, but
1258                         * we can still support the dummy one that is used to
1259                         * "receive normally".
1260                         */
1261                        allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1262                                rss->level == 0 &&
1263                                (rss->types == 0 ||
1264                                 rss->types == enic->rss_hf) &&
1265                                rss->queue_num == enic->rq_count &&
1266                                rss->key_len == 0;
1267                        /* Identity queue map is ok */
1268                        for (i = 0; i < rss->queue_num; i++)
1269                                allow = allow && (i == rss->queue[i]);
1270                        if (!allow)
1271                                return ENOTSUP;
1272                        if (overlap & FATE)
1273                                return ENOTSUP;
1274                        /* Need MARK or FLAG */
1275                        if (!(overlap & MARK))
1276                                return ENOTSUP;
1277                        overlap |= FATE;
1278                        break;
1279                }
1280                case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1281                        /*
1282                         * Like RSS above, PASSTHRU + MARK may be used to
1283                         * "mark and then receive normally". MARK usually comes
1284                         * after PASSTHRU, so remember we have seen passthru
1285                         * and check for mark later.
1286                         */
1287                        if (overlap & FATE)
1288                                return ENOTSUP;
1289                        overlap |= FATE;
1290                        passthru = true;
1291                        break;
1292                }
1293                case RTE_FLOW_ACTION_TYPE_VOID:
1294                        continue;
1295                default:
1296                        RTE_ASSERT(0);
1297                        break;
1298                }
1299        }
1300        /* Only PASSTHRU + MARK is allowed */
1301        if (passthru && !(overlap & MARK))
1302                return ENOTSUP;
1303        if (!(overlap & FATE))
1304                return ENOTSUP;
1305        enic_action->type = FILTER_ACTION_V2;
1306        return 0;
1307}
1308
1309/** Check if the action is supported */
1310static int
1311enic_match_action(const struct rte_flow_action *action,
1312                  const enum rte_flow_action_type *supported_actions)
1313{
1314        for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1315             supported_actions++) {
1316                if (action->type == *supported_actions)
1317                        return 1;
1318        }
1319        return 0;
1320}
1321
1322/** Get the NIC filter capabilties structure */
1323static const struct enic_filter_cap *
1324enic_get_filter_cap(struct enic *enic)
1325{
1326        if (enic->flow_filter_mode)
1327                return &enic_filter_cap[enic->flow_filter_mode];
1328
1329        return NULL;
1330}
1331
1332/** Get the actions for this NIC version. */
1333static const struct enic_action_cap *
1334enic_get_action_cap(struct enic *enic)
1335{
1336        const struct enic_action_cap *ea;
1337        uint8_t actions;
1338
1339        actions = enic->filter_actions;
1340        if (actions & FILTER_ACTION_DROP_FLAG)
1341                ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1342        else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1343                ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1344        else
1345                ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1346        return ea;
1347}
1348
1349/* Debug function to dump internal NIC action structure. */
1350static void
1351enic_dump_actions(const struct filter_action_v2 *ea)
1352{
1353        if (ea->type == FILTER_ACTION_RQ_STEERING) {
1354                ENICPMD_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1355        } else if (ea->type == FILTER_ACTION_V2) {
1356                ENICPMD_LOG(INFO, "Actions(V2)\n");
1357                if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1358                        ENICPMD_LOG(INFO, "\tqueue: %u\n",
1359                               enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1360                if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1361                        ENICPMD_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1362        }
1363}
1364
1365/* Debug function to dump internal NIC filter structure. */
1366static void
1367enic_dump_filter(const struct filter_v2 *filt)
1368{
1369        const struct filter_generic_1 *gp;
1370        int i, j, mbyte;
1371        char buf[128], *bp;
1372        char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1373        char l4csum[16], ipfrag[16];
1374
1375        switch (filt->type) {
1376        case FILTER_IPV4_5TUPLE:
1377                ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1378                break;
1379        case FILTER_USNIC_IP:
1380        case FILTER_DPDK_1:
1381                /* FIXME: this should be a loop */
1382                gp = &filt->u.generic_1;
1383                ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1384                       gp->val_vlan, gp->mask_vlan);
1385
1386                if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1387                        sprintf(ip4, "%s ",
1388                                (gp->val_flags & FILTER_GENERIC_1_IPV4)
1389                                 ? "ip4(y)" : "ip4(n)");
1390                else
1391                        sprintf(ip4, "%s ", "ip4(x)");
1392
1393                if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1394                        sprintf(ip6, "%s ",
1395                                (gp->val_flags & FILTER_GENERIC_1_IPV6)
1396                                 ? "ip6(y)" : "ip6(n)");
1397                else
1398                        sprintf(ip6, "%s ", "ip6(x)");
1399
1400                if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1401                        sprintf(udp, "%s ",
1402                                (gp->val_flags & FILTER_GENERIC_1_UDP)
1403                                 ? "udp(y)" : "udp(n)");
1404                else
1405                        sprintf(udp, "%s ", "udp(x)");
1406
1407                if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1408                        sprintf(tcp, "%s ",
1409                                (gp->val_flags & FILTER_GENERIC_1_TCP)
1410                                 ? "tcp(y)" : "tcp(n)");
1411                else
1412                        sprintf(tcp, "%s ", "tcp(x)");
1413
1414                if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1415                        sprintf(tcpudp, "%s ",
1416                                (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1417                                 ? "tcpudp(y)" : "tcpudp(n)");
1418                else
1419                        sprintf(tcpudp, "%s ", "tcpudp(x)");
1420
1421                if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1422                        sprintf(ip4csum, "%s ",
1423                                (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1424                                 ? "ip4csum(y)" : "ip4csum(n)");
1425                else
1426                        sprintf(ip4csum, "%s ", "ip4csum(x)");
1427
1428                if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1429                        sprintf(l4csum, "%s ",
1430                                (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1431                                 ? "l4csum(y)" : "l4csum(n)");
1432                else
1433                        sprintf(l4csum, "%s ", "l4csum(x)");
1434
1435                if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1436                        sprintf(ipfrag, "%s ",
1437                                (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1438                                 ? "ipfrag(y)" : "ipfrag(n)");
1439                else
1440                        sprintf(ipfrag, "%s ", "ipfrag(x)");
1441                ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1442                         tcp, tcpudp, ip4csum, l4csum, ipfrag);
1443
1444                for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1445                        mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1446                        while (mbyte && !gp->layer[i].mask[mbyte])
1447                                mbyte--;
1448                        if (mbyte == 0)
1449                                continue;
1450
1451                        bp = buf;
1452                        for (j = 0; j <= mbyte; j++) {
1453                                sprintf(bp, "%02x",
1454                                        gp->layer[i].mask[j]);
1455                                bp += 2;
1456                        }
1457                        *bp = '\0';
1458                        ENICPMD_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1459                        bp = buf;
1460                        for (j = 0; j <= mbyte; j++) {
1461                                sprintf(bp, "%02x",
1462                                        gp->layer[i].val[j]);
1463                                bp += 2;
1464                        }
1465                        *bp = '\0';
1466                        ENICPMD_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1467                }
1468                break;
1469        default:
1470                ENICPMD_LOG(INFO, "FILTER UNKNOWN\n");
1471                break;
1472        }
1473}
1474
1475/* Debug function to dump internal NIC flow structures. */
1476static void
1477enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1478{
1479        enic_dump_filter(filt);
1480        enic_dump_actions(ea);
1481}
1482
1483
1484/**
1485 * Internal flow parse/validate function.
1486 *
1487 * @param dev[in]
1488 *   This device pointer.
1489 * @param pattern[in]
1490 * @param actions[in]
1491 * @param error[out]
1492 * @param enic_filter[out]
1493 *   Internal NIC filter structure pointer.
1494 * @param enic_action[out]
1495 *   Internal NIC action structure pointer.
1496 */
1497static int
1498enic_flow_parse(struct rte_eth_dev *dev,
1499                const struct rte_flow_attr *attrs,
1500                const struct rte_flow_item pattern[],
1501                const struct rte_flow_action actions[],
1502                struct rte_flow_error *error,
1503                struct filter_v2 *enic_filter,
1504                struct filter_action_v2 *enic_action)
1505{
1506        unsigned int ret = 0;
1507        struct enic *enic = pmd_priv(dev);
1508        const struct enic_filter_cap *enic_filter_cap;
1509        const struct enic_action_cap *enic_action_cap;
1510        const struct rte_flow_action *action;
1511
1512        ENICPMD_FUNC_TRACE();
1513
1514        memset(enic_filter, 0, sizeof(*enic_filter));
1515        memset(enic_action, 0, sizeof(*enic_action));
1516
1517        if (!pattern) {
1518                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1519                                   NULL, "No pattern specified");
1520                return -rte_errno;
1521        }
1522
1523        if (!actions) {
1524                rte_flow_error_set(error, EINVAL,
1525                                   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1526                                   NULL, "No action specified");
1527                return -rte_errno;
1528        }
1529
1530        if (attrs) {
1531                if (attrs->group) {
1532                        rte_flow_error_set(error, ENOTSUP,
1533                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1534                                           NULL,
1535                                           "priority groups are not supported");
1536                        return -rte_errno;
1537                } else if (attrs->priority) {
1538                        rte_flow_error_set(error, ENOTSUP,
1539                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1540                                           NULL,
1541                                           "priorities are not supported");
1542                        return -rte_errno;
1543                } else if (attrs->egress) {
1544                        rte_flow_error_set(error, ENOTSUP,
1545                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1546                                           NULL,
1547                                           "egress is not supported");
1548                        return -rte_errno;
1549                } else if (attrs->transfer) {
1550                        rte_flow_error_set(error, ENOTSUP,
1551                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1552                                           NULL,
1553                                           "transfer is not supported");
1554                        return -rte_errno;
1555                } else if (!attrs->ingress) {
1556                        rte_flow_error_set(error, ENOTSUP,
1557                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1558                                           NULL,
1559                                           "only ingress is supported");
1560                        return -rte_errno;
1561                }
1562
1563        } else {
1564                rte_flow_error_set(error, EINVAL,
1565                                   RTE_FLOW_ERROR_TYPE_ATTR,
1566                                   NULL, "No attribute specified");
1567                return -rte_errno;
1568        }
1569
1570        /* Verify Actions. */
1571        enic_action_cap =  enic_get_action_cap(enic);
1572        for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1573             action++) {
1574                if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1575                        continue;
1576                else if (!enic_match_action(action, enic_action_cap->actions))
1577                        break;
1578        }
1579        if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1580                rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1581                                   action, "Invalid action.");
1582                return -rte_errno;
1583        }
1584        ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1585        if (ret) {
1586                rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1587                           NULL, "Unsupported action.");
1588                return -rte_errno;
1589        }
1590
1591        /* Verify Flow items. If copying the filter from flow format to enic
1592         * format fails, the flow is not supported
1593         */
1594        enic_filter_cap =  enic_get_filter_cap(enic);
1595        if (enic_filter_cap == NULL) {
1596                rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1597                           NULL, "Flow API not available");
1598                return -rte_errno;
1599        }
1600        enic_filter->type = enic->flow_filter_mode;
1601        if (enic->adv_filters)
1602                enic_filter->type = FILTER_DPDK_1;
1603        ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1604                                       enic_filter, error);
1605        return ret;
1606}
1607
1608/**
1609 * Push filter/action to the NIC.
1610 *
1611 * @param enic[in]
1612 *   Device structure pointer.
1613 * @param enic_filter[in]
1614 *   Internal NIC filter structure pointer.
1615 * @param enic_action[in]
1616 *   Internal NIC action structure pointer.
1617 * @param error[out]
1618 */
1619static struct rte_flow *
1620enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1621                   struct filter_action_v2 *enic_action,
1622                   struct rte_flow_error *error)
1623{
1624        struct rte_flow *flow;
1625        int err;
1626        uint16_t entry;
1627
1628        ENICPMD_FUNC_TRACE();
1629
1630        flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1631        if (!flow) {
1632                rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1633                                   NULL, "cannot allocate flow memory");
1634                return NULL;
1635        }
1636
1637        /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1638        entry = enic_action->rq_idx;
1639        err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1640                                  enic_action);
1641        if (err) {
1642                rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1643                                   NULL, "vnic_dev_classifier error");
1644                rte_free(flow);
1645                return NULL;
1646        }
1647
1648        flow->enic_filter_id = entry;
1649        flow->enic_filter = *enic_filter;
1650        return flow;
1651}
1652
1653/**
1654 * Remove filter/action from the NIC.
1655 *
1656 * @param enic[in]
1657 *   Device structure pointer.
1658 * @param filter_id[in]
1659 *   Id of NIC filter.
1660 * @param enic_action[in]
1661 *   Internal NIC action structure pointer.
1662 * @param error[out]
1663 */
1664static int
1665enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1666                   struct rte_flow_error *error)
1667{
1668        uint16_t filter_id;
1669        int err;
1670
1671        ENICPMD_FUNC_TRACE();
1672
1673        filter_id = flow->enic_filter_id;
1674        err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1675        if (err) {
1676                rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1677                                   NULL, "vnic_dev_classifier failed");
1678                return -err;
1679        }
1680        return 0;
1681}
1682
1683/*
1684 * The following functions are callbacks for Generic flow API.
1685 */
1686
1687/**
1688 * Validate a flow supported by the NIC.
1689 *
1690 * @see rte_flow_validate()
1691 * @see rte_flow_ops
1692 */
1693static int
1694enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1695                   const struct rte_flow_item pattern[],
1696                   const struct rte_flow_action actions[],
1697                   struct rte_flow_error *error)
1698{
1699        struct filter_v2 enic_filter;
1700        struct filter_action_v2 enic_action;
1701        int ret;
1702
1703        ENICPMD_FUNC_TRACE();
1704
1705        ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1706                               &enic_filter, &enic_action);
1707        if (!ret)
1708                enic_dump_flow(&enic_action, &enic_filter);
1709        return ret;
1710}
1711
1712/**
1713 * Create a flow supported by the NIC.
1714 *
1715 * @see rte_flow_create()
1716 * @see rte_flow_ops
1717 */
1718static struct rte_flow *
1719enic_flow_create(struct rte_eth_dev *dev,
1720                 const struct rte_flow_attr *attrs,
1721                 const struct rte_flow_item pattern[],
1722                 const struct rte_flow_action actions[],
1723                 struct rte_flow_error *error)
1724{
1725        int ret;
1726        struct filter_v2 enic_filter;
1727        struct filter_action_v2 enic_action;
1728        struct rte_flow *flow;
1729        struct enic *enic = pmd_priv(dev);
1730
1731        ENICPMD_FUNC_TRACE();
1732
1733        ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1734                              &enic_action);
1735        if (ret < 0)
1736                return NULL;
1737
1738        flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1739                                    error);
1740        if (flow)
1741                LIST_INSERT_HEAD(&enic->flows, flow, next);
1742
1743        return flow;
1744}
1745
1746/**
1747 * Destroy a flow supported by the NIC.
1748 *
1749 * @see rte_flow_destroy()
1750 * @see rte_flow_ops
1751 */
1752static int
1753enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1754                  __rte_unused struct rte_flow_error *error)
1755{
1756        struct enic *enic = pmd_priv(dev);
1757
1758        ENICPMD_FUNC_TRACE();
1759
1760        enic_flow_del_filter(enic, flow, error);
1761        LIST_REMOVE(flow, next);
1762        rte_free(flow);
1763        return 0;
1764}
1765
1766/**
1767 * Flush all flows on the device.
1768 *
1769 * @see rte_flow_flush()
1770 * @see rte_flow_ops
1771 */
1772static int
1773enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1774{
1775        struct rte_flow *flow;
1776        struct enic *enic = pmd_priv(dev);
1777
1778        ENICPMD_FUNC_TRACE();
1779
1780
1781        while (!LIST_EMPTY(&enic->flows)) {
1782                flow = LIST_FIRST(&enic->flows);
1783                enic_flow_del_filter(enic, flow, error);
1784                LIST_REMOVE(flow, next);
1785                rte_free(flow);
1786        }
1787        return 0;
1788}
1789
1790/**
1791 * Flow callback registration.
1792 *
1793 * @see rte_flow_ops
1794 */
1795const struct rte_flow_ops enic_flow_ops = {
1796        .validate = enic_flow_validate,
1797        .create = enic_flow_create,
1798        .destroy = enic_flow_destroy,
1799        .flush = enic_flow_flush,
1800};
1801