dpdk/drivers/net/ice/ice_fdir_filter.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2019 Intel Corporation
   3 */
   4
   5#include <stdio.h>
   6#include <rte_flow.h>
   7#include <rte_hash.h>
   8#include <rte_hash_crc.h>
   9#include "base/ice_fdir.h"
  10#include "base/ice_flow.h"
  11#include "base/ice_type.h"
  12#include "ice_ethdev.h"
  13#include "ice_rxtx.h"
  14#include "ice_generic_flow.h"
  15
  16#define ICE_FDIR_IPV6_TC_OFFSET         20
  17#define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
  18
  19#define ICE_FDIR_MAX_QREGION_SIZE       128
  20
  21#define ICE_FDIR_INSET_ETH (\
  22        ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
  23
  24#define ICE_FDIR_INSET_ETH_IPV4 (\
  25        ICE_FDIR_INSET_ETH | \
  26        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
  27        ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
  28
  29#define ICE_FDIR_INSET_ETH_IPV4_UDP (\
  30        ICE_FDIR_INSET_ETH_IPV4 | \
  31        ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
  32
  33#define ICE_FDIR_INSET_ETH_IPV4_TCP (\
  34        ICE_FDIR_INSET_ETH_IPV4 | \
  35        ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
  36
  37#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
  38        ICE_FDIR_INSET_ETH_IPV4 | \
  39        ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
  40
  41#define ICE_FDIR_INSET_ETH_IPV6 (\
  42        ICE_INSET_DMAC | \
  43        ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
  44        ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
  45        ICE_INSET_IPV6_PKID)
  46
  47#define ICE_FDIR_INSET_ETH_IPV6_UDP (\
  48        ICE_FDIR_INSET_ETH_IPV6 | \
  49        ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
  50
  51#define ICE_FDIR_INSET_ETH_IPV6_TCP (\
  52        ICE_FDIR_INSET_ETH_IPV6 | \
  53        ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
  54
  55#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
  56        ICE_FDIR_INSET_ETH_IPV6 | \
  57        ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
  58
  59#define ICE_FDIR_INSET_IPV4 (\
  60        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
  61        ICE_INSET_IPV4_PKID)
  62
  63#define ICE_FDIR_INSET_IPV4_TCP (\
  64        ICE_FDIR_INSET_IPV4 | \
  65        ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
  66
  67#define ICE_FDIR_INSET_IPV4_UDP (\
  68        ICE_FDIR_INSET_IPV4 | \
  69        ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
  70
  71#define ICE_FDIR_INSET_IPV4_SCTP (\
  72        ICE_FDIR_INSET_IPV4 | \
  73        ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
  74
  75#define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
  76        ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
  77        ICE_INSET_VXLAN_VNI)
  78
  79#define ICE_FDIR_INSET_IPV4_GTPU (\
  80        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
  81
  82#define ICE_FDIR_INSET_IPV4_GTPU_EH (\
  83        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
  84        ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
  85
  86#define ICE_FDIR_INSET_IPV6_GTPU (\
  87        ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
  88
  89#define ICE_FDIR_INSET_IPV6_GTPU_EH (\
  90        ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
  91        ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
  92
  93#define ICE_FDIR_INSET_IPV4_ESP (\
  94        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
  95        ICE_INSET_ESP_SPI)
  96
  97#define ICE_FDIR_INSET_IPV6_ESP (\
  98        ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
  99        ICE_INSET_ESP_SPI)
 100
 101#define ICE_FDIR_INSET_IPV4_NATT_ESP (\
 102        ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
 103        ICE_INSET_NAT_T_ESP_SPI)
 104
 105#define ICE_FDIR_INSET_IPV6_NATT_ESP (\
 106        ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
 107        ICE_INSET_NAT_T_ESP_SPI)
 108
 109static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
 110        {pattern_ethertype,                             ICE_FDIR_INSET_ETH,             ICE_INSET_NONE,                 ICE_INSET_NONE},
 111        {pattern_eth_ipv4,                              ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE,                 ICE_INSET_NONE},
 112        {pattern_eth_ipv4_udp,                          ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
 113        {pattern_eth_ipv4_tcp,                          ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
 114        {pattern_eth_ipv4_sctp,                         ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
 115        {pattern_eth_ipv6,                              ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
 116        {pattern_eth_ipv6_frag_ext,                     ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
 117        {pattern_eth_ipv6_udp,                          ICE_FDIR_INSET_ETH_IPV6_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
 118        {pattern_eth_ipv6_tcp,                          ICE_FDIR_INSET_ETH_IPV6_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
 119        {pattern_eth_ipv6_sctp,                         ICE_FDIR_INSET_ETH_IPV6_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
 120        {pattern_eth_ipv4_esp,                          ICE_FDIR_INSET_IPV4_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
 121        {pattern_eth_ipv4_udp_esp,                      ICE_FDIR_INSET_IPV4_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
 122        {pattern_eth_ipv6_esp,                          ICE_FDIR_INSET_IPV6_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
 123        {pattern_eth_ipv6_udp_esp,                      ICE_FDIR_INSET_IPV6_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
 124        {pattern_eth_ipv4_udp_vxlan_ipv4,               ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4,            ICE_INSET_NONE},
 125        {pattern_eth_ipv4_udp_vxlan_ipv4_udp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_UDP,        ICE_INSET_NONE},
 126        {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_TCP,        ICE_INSET_NONE},
 127        {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,          ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_SCTP,       ICE_INSET_NONE},
 128        {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE},
 129        {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE},
 130        {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE},
 131        {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,      ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE},
 132        /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
 133        {pattern_eth_ipv4_gtpu,                         ICE_FDIR_INSET_IPV4_GTPU,       ICE_FDIR_INSET_IPV4_GTPU,       ICE_INSET_NONE},
 134        {pattern_eth_ipv4_gtpu_eh,                      ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_INSET_NONE},
 135        {pattern_eth_ipv6_gtpu,                         ICE_FDIR_INSET_IPV6_GTPU,       ICE_FDIR_INSET_IPV6_GTPU,       ICE_INSET_NONE},
 136        {pattern_eth_ipv6_gtpu_eh,                      ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_INSET_NONE},
 137};
 138
 139static struct ice_flow_parser ice_fdir_parser;
 140
 141static int
 142ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
 143
 144static const struct rte_memzone *
 145ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
 146{
 147        const struct rte_memzone *mz;
 148
 149        mz = rte_memzone_lookup(name);
 150        if (mz)
 151                return mz;
 152
 153        return rte_memzone_reserve_aligned(name, len, socket_id,
 154                                           RTE_MEMZONE_IOVA_CONTIG,
 155                                           ICE_RING_BASE_ALIGN);
 156}
 157
 158#define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
 159
 160static int
 161ice_fdir_prof_alloc(struct ice_hw *hw)
 162{
 163        enum ice_fltr_ptype ptype, fltr_ptype;
 164
 165        if (!hw->fdir_prof) {
 166                hw->fdir_prof = (struct ice_fd_hw_prof **)
 167                        ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
 168                                   sizeof(*hw->fdir_prof));
 169                if (!hw->fdir_prof)
 170                        return -ENOMEM;
 171        }
 172        for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
 173             ptype < ICE_FLTR_PTYPE_MAX;
 174             ptype++) {
 175                if (!hw->fdir_prof[ptype]) {
 176                        hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
 177                                ice_malloc(hw, sizeof(**hw->fdir_prof));
 178                        if (!hw->fdir_prof[ptype])
 179                                goto fail_mem;
 180                }
 181        }
 182        return 0;
 183
 184fail_mem:
 185        for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
 186             fltr_ptype < ptype;
 187             fltr_ptype++) {
 188                rte_free(hw->fdir_prof[fltr_ptype]);
 189                hw->fdir_prof[fltr_ptype] = NULL;
 190        }
 191
 192        rte_free(hw->fdir_prof);
 193        hw->fdir_prof = NULL;
 194
 195        return -ENOMEM;
 196}
 197
 198static int
 199ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
 200                          struct ice_fdir_counter_pool_container *container,
 201                          uint32_t index_start,
 202                          uint32_t len)
 203{
 204        struct ice_fdir_counter_pool *pool;
 205        uint32_t i;
 206        int ret = 0;
 207
 208        pool = rte_zmalloc("ice_fdir_counter_pool",
 209                           sizeof(*pool) +
 210                           sizeof(struct ice_fdir_counter) * len,
 211                           0);
 212        if (!pool) {
 213                PMD_INIT_LOG(ERR,
 214                             "Failed to allocate memory for fdir counter pool");
 215                return -ENOMEM;
 216        }
 217
 218        TAILQ_INIT(&pool->counter_list);
 219        TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
 220
 221        for (i = 0; i < len; i++) {
 222                struct ice_fdir_counter *counter = &pool->counters[i];
 223
 224                counter->hw_index = index_start + i;
 225                TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
 226        }
 227
 228        if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
 229                PMD_INIT_LOG(ERR, "FDIR counter pool is full");
 230                ret = -EINVAL;
 231                goto free_pool;
 232        }
 233
 234        container->pools[container->index_free++] = pool;
 235        return 0;
 236
 237free_pool:
 238        rte_free(pool);
 239        return ret;
 240}
 241
 242static int
 243ice_fdir_counter_init(struct ice_pf *pf)
 244{
 245        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 246        struct ice_fdir_info *fdir_info = &pf->fdir;
 247        struct ice_fdir_counter_pool_container *container =
 248                                &fdir_info->counter;
 249        uint32_t cnt_index, len;
 250        int ret;
 251
 252        TAILQ_INIT(&container->pool_list);
 253
 254        cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
 255        len = ICE_FDIR_COUNTERS_PER_BLOCK;
 256
 257        ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
 258        if (ret) {
 259                PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
 260                return ret;
 261        }
 262
 263        return 0;
 264}
 265
 266static int
 267ice_fdir_counter_release(struct ice_pf *pf)
 268{
 269        struct ice_fdir_info *fdir_info = &pf->fdir;
 270        struct ice_fdir_counter_pool_container *container =
 271                                &fdir_info->counter;
 272        uint8_t i;
 273
 274        for (i = 0; i < container->index_free; i++) {
 275                rte_free(container->pools[i]);
 276                container->pools[i] = NULL;
 277        }
 278
 279        TAILQ_INIT(&container->pool_list);
 280        container->index_free = 0;
 281
 282        return 0;
 283}
 284
 285static struct ice_fdir_counter *
 286ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
 287                                        *container,
 288                               uint32_t id)
 289{
 290        struct ice_fdir_counter_pool *pool;
 291        struct ice_fdir_counter *counter;
 292        int i;
 293
 294        TAILQ_FOREACH(pool, &container->pool_list, next) {
 295                for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
 296                        counter = &pool->counters[i];
 297
 298                        if (counter->shared &&
 299                            counter->ref_cnt &&
 300                            counter->id == id)
 301                                return counter;
 302                }
 303        }
 304
 305        return NULL;
 306}
 307
 308static struct ice_fdir_counter *
 309ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
 310{
 311        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 312        struct ice_fdir_info *fdir_info = &pf->fdir;
 313        struct ice_fdir_counter_pool_container *container =
 314                                &fdir_info->counter;
 315        struct ice_fdir_counter_pool *pool = NULL;
 316        struct ice_fdir_counter *counter_free = NULL;
 317
 318        if (shared) {
 319                counter_free = ice_fdir_counter_shared_search(container, id);
 320                if (counter_free) {
 321                        if (counter_free->ref_cnt + 1 == 0) {
 322                                rte_errno = E2BIG;
 323                                return NULL;
 324                        }
 325                        counter_free->ref_cnt++;
 326                        return counter_free;
 327                }
 328        }
 329
 330        TAILQ_FOREACH(pool, &container->pool_list, next) {
 331                counter_free = TAILQ_FIRST(&pool->counter_list);
 332                if (counter_free)
 333                        break;
 334                counter_free = NULL;
 335        }
 336
 337        if (!counter_free) {
 338                PMD_DRV_LOG(ERR, "No free counter found\n");
 339                return NULL;
 340        }
 341
 342        counter_free->shared = shared;
 343        counter_free->id = id;
 344        counter_free->ref_cnt = 1;
 345        counter_free->pool = pool;
 346
 347        /* reset statistic counter value */
 348        ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
 349        ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
 350
 351        TAILQ_REMOVE(&pool->counter_list, counter_free, next);
 352        if (TAILQ_EMPTY(&pool->counter_list)) {
 353                TAILQ_REMOVE(&container->pool_list, pool, next);
 354                TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
 355        }
 356
 357        return counter_free;
 358}
 359
 360static void
 361ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
 362                      struct ice_fdir_counter *counter)
 363{
 364        if (!counter)
 365                return;
 366
 367        if (--counter->ref_cnt == 0) {
 368                struct ice_fdir_counter_pool *pool = counter->pool;
 369
 370                TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
 371        }
 372}
 373
 374static int
 375ice_fdir_init_filter_list(struct ice_pf *pf)
 376{
 377        struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
 378        struct ice_fdir_info *fdir_info = &pf->fdir;
 379        char fdir_hash_name[RTE_HASH_NAMESIZE];
 380        int ret;
 381
 382        struct rte_hash_parameters fdir_hash_params = {
 383                .name = fdir_hash_name,
 384                .entries = ICE_MAX_FDIR_FILTER_NUM,
 385                .key_len = sizeof(struct ice_fdir_fltr_pattern),
 386                .hash_func = rte_hash_crc,
 387                .hash_func_init_val = 0,
 388                .socket_id = rte_socket_id(),
 389                .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
 390        };
 391
 392        /* Initialize hash */
 393        snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
 394                 "fdir_%s", dev->device->name);
 395        fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
 396        if (!fdir_info->hash_table) {
 397                PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
 398                return -EINVAL;
 399        }
 400        fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
 401                                          sizeof(*fdir_info->hash_map) *
 402                                          ICE_MAX_FDIR_FILTER_NUM,
 403                                          0);
 404        if (!fdir_info->hash_map) {
 405                PMD_INIT_LOG(ERR,
 406                             "Failed to allocate memory for fdir hash map!");
 407                ret = -ENOMEM;
 408                goto err_fdir_hash_map_alloc;
 409        }
 410        return 0;
 411
 412err_fdir_hash_map_alloc:
 413        rte_hash_free(fdir_info->hash_table);
 414
 415        return ret;
 416}
 417
 418static void
 419ice_fdir_release_filter_list(struct ice_pf *pf)
 420{
 421        struct ice_fdir_info *fdir_info = &pf->fdir;
 422
 423        if (fdir_info->hash_map)
 424                rte_free(fdir_info->hash_map);
 425        if (fdir_info->hash_table)
 426                rte_hash_free(fdir_info->hash_table);
 427
 428        fdir_info->hash_map = NULL;
 429        fdir_info->hash_table = NULL;
 430}
 431
 432/*
 433 * ice_fdir_setup - reserve and initialize the Flow Director resources
 434 * @pf: board private structure
 435 */
 436static int
 437ice_fdir_setup(struct ice_pf *pf)
 438{
 439        struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
 440        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 441        const struct rte_memzone *mz = NULL;
 442        char z_name[RTE_MEMZONE_NAMESIZE];
 443        struct ice_vsi *vsi;
 444        int err = ICE_SUCCESS;
 445
 446        if ((pf->flags & ICE_FLAG_FDIR) == 0) {
 447                PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
 448                return -ENOTSUP;
 449        }
 450
 451        PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
 452                    " fd_fltr_best_effort = %u.",
 453                    hw->func_caps.fd_fltr_guar,
 454                    hw->func_caps.fd_fltr_best_effort);
 455
 456        if (pf->fdir.fdir_vsi) {
 457                PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
 458                return ICE_SUCCESS;
 459        }
 460
 461        /* make new FDIR VSI */
 462        vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
 463        if (!vsi) {
 464                PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
 465                return -EINVAL;
 466        }
 467        pf->fdir.fdir_vsi = vsi;
 468
 469        err = ice_fdir_init_filter_list(pf);
 470        if (err) {
 471                PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
 472                return -EINVAL;
 473        }
 474
 475        err = ice_fdir_counter_init(pf);
 476        if (err) {
 477                PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
 478                return -EINVAL;
 479        }
 480
 481        /*Fdir tx queue setup*/
 482        err = ice_fdir_setup_tx_resources(pf);
 483        if (err) {
 484                PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
 485                goto fail_setup_tx;
 486        }
 487
 488        /*Fdir rx queue setup*/
 489        err = ice_fdir_setup_rx_resources(pf);
 490        if (err) {
 491                PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
 492                goto fail_setup_rx;
 493        }
 494
 495        err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
 496        if (err) {
 497                PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
 498                goto fail_mem;
 499        }
 500
 501        err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
 502        if (err) {
 503                PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
 504                goto fail_mem;
 505        }
 506
 507        /* Enable FDIR MSIX interrupt */
 508        vsi->nb_used_qps = 1;
 509        ice_vsi_queues_bind_intr(vsi);
 510        ice_vsi_enable_queues_intr(vsi);
 511
 512        /* reserve memory for the fdir programming packet */
 513        snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
 514                 ICE_FDIR_MZ_NAME,
 515                 eth_dev->data->port_id);
 516        mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
 517        if (!mz) {
 518                PMD_DRV_LOG(ERR, "Cannot init memzone for "
 519                            "flow director program packet.");
 520                err = -ENOMEM;
 521                goto fail_mem;
 522        }
 523        pf->fdir.prg_pkt = mz->addr;
 524        pf->fdir.dma_addr = mz->iova;
 525        pf->fdir.mz = mz;
 526
 527        err = ice_fdir_prof_alloc(hw);
 528        if (err) {
 529                PMD_DRV_LOG(ERR, "Cannot allocate memory for "
 530                            "flow director profile.");
 531                err = -ENOMEM;
 532                goto fail_prof;
 533        }
 534
 535        PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
 536                    vsi->base_queue);
 537        return ICE_SUCCESS;
 538
 539fail_prof:
 540        rte_memzone_free(pf->fdir.mz);
 541        pf->fdir.mz = NULL;
 542fail_mem:
 543        ice_rx_queue_release(pf->fdir.rxq);
 544        pf->fdir.rxq = NULL;
 545fail_setup_rx:
 546        ice_tx_queue_release(pf->fdir.txq);
 547        pf->fdir.txq = NULL;
 548fail_setup_tx:
 549        ice_release_vsi(vsi);
 550        pf->fdir.fdir_vsi = NULL;
 551        return err;
 552}
 553
 554static void
 555ice_fdir_prof_free(struct ice_hw *hw)
 556{
 557        enum ice_fltr_ptype ptype;
 558
 559        for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
 560             ptype < ICE_FLTR_PTYPE_MAX;
 561             ptype++) {
 562                rte_free(hw->fdir_prof[ptype]);
 563                hw->fdir_prof[ptype] = NULL;
 564        }
 565
 566        rte_free(hw->fdir_prof);
 567        hw->fdir_prof = NULL;
 568}
 569
 570/* Remove a profile for some filter type */
 571static void
 572ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
 573{
 574        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 575        struct ice_fd_hw_prof *hw_prof;
 576        uint64_t prof_id;
 577        uint16_t vsi_num;
 578        int i;
 579
 580        if (!hw->fdir_prof || !hw->fdir_prof[ptype])
 581                return;
 582
 583        hw_prof = hw->fdir_prof[ptype];
 584
 585        prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
 586        for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
 587                if (hw_prof->entry_h[i][is_tunnel]) {
 588                        vsi_num = ice_get_hw_vsi_num(hw,
 589                                                     hw_prof->vsi_h[i]);
 590                        ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 591                                             vsi_num, ptype);
 592                        ice_flow_rem_entry(hw, ICE_BLK_FD,
 593                                           hw_prof->entry_h[i][is_tunnel]);
 594                        hw_prof->entry_h[i][is_tunnel] = 0;
 595                }
 596        }
 597        ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 598        rte_free(hw_prof->fdir_seg[is_tunnel]);
 599        hw_prof->fdir_seg[is_tunnel] = NULL;
 600
 601        for (i = 0; i < hw_prof->cnt; i++)
 602                hw_prof->vsi_h[i] = 0;
 603        pf->hw_prof_cnt[ptype][is_tunnel] = 0;
 604}
 605
 606/* Remove all created profiles */
 607static void
 608ice_fdir_prof_rm_all(struct ice_pf *pf)
 609{
 610        enum ice_fltr_ptype ptype;
 611
 612        for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
 613             ptype < ICE_FLTR_PTYPE_MAX;
 614             ptype++) {
 615                ice_fdir_prof_rm(pf, ptype, false);
 616                ice_fdir_prof_rm(pf, ptype, true);
 617        }
 618}
 619
 620/*
 621 * ice_fdir_teardown - release the Flow Director resources
 622 * @pf: board private structure
 623 */
 624static void
 625ice_fdir_teardown(struct ice_pf *pf)
 626{
 627        struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
 628        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 629        struct ice_vsi *vsi;
 630        int err;
 631
 632        vsi = pf->fdir.fdir_vsi;
 633        if (!vsi)
 634                return;
 635
 636        ice_vsi_disable_queues_intr(vsi);
 637
 638        err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
 639        if (err)
 640                PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
 641
 642        err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
 643        if (err)
 644                PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
 645
 646        err = ice_fdir_counter_release(pf);
 647        if (err)
 648                PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
 649
 650        ice_fdir_release_filter_list(pf);
 651
 652        ice_tx_queue_release(pf->fdir.txq);
 653        pf->fdir.txq = NULL;
 654        ice_rx_queue_release(pf->fdir.rxq);
 655        pf->fdir.rxq = NULL;
 656        ice_fdir_prof_rm_all(pf);
 657        ice_fdir_prof_free(hw);
 658        ice_release_vsi(vsi);
 659        pf->fdir.fdir_vsi = NULL;
 660
 661        if (pf->fdir.mz) {
 662                err = rte_memzone_free(pf->fdir.mz);
 663                pf->fdir.mz = NULL;
 664                if (err)
 665                        PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
 666        }
 667}
 668
 669static int
 670ice_fdir_cur_prof_conflict(struct ice_pf *pf,
 671                           enum ice_fltr_ptype ptype,
 672                           struct ice_flow_seg_info *seg,
 673                           bool is_tunnel)
 674{
 675        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 676        struct ice_flow_seg_info *ori_seg;
 677        struct ice_fd_hw_prof *hw_prof;
 678
 679        hw_prof = hw->fdir_prof[ptype];
 680        ori_seg = hw_prof->fdir_seg[is_tunnel];
 681
 682        /* profile does not exist */
 683        if (!ori_seg)
 684                return 0;
 685
 686        /* if no input set conflict, return -EEXIST */
 687        if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
 688            (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
 689                PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
 690                            ptype);
 691                return -EEXIST;
 692        }
 693
 694        /* a rule with input set conflict already exist, so give up */
 695        if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
 696                PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
 697                            ptype);
 698                return -EINVAL;
 699        }
 700
 701        /* it's safe to delete an empty profile */
 702        ice_fdir_prof_rm(pf, ptype, is_tunnel);
 703        return 0;
 704}
 705
 706static bool
 707ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
 708                               enum ice_fltr_ptype ptype,
 709                               bool is_tunnel)
 710{
 711        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 712        struct ice_fd_hw_prof *hw_prof;
 713        struct ice_flow_seg_info *seg;
 714
 715        hw_prof = hw->fdir_prof[ptype];
 716        seg = hw_prof->fdir_seg[is_tunnel];
 717
 718        /* profile does not exist */
 719        if (!seg)
 720                return true;
 721
 722        /* profile exists and rule exists, fail to resolve the conflict */
 723        if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
 724                return false;
 725
 726        /* it's safe to delete an empty profile */
 727        ice_fdir_prof_rm(pf, ptype, is_tunnel);
 728
 729        return true;
 730}
 731
 732static int
 733ice_fdir_cross_prof_conflict(struct ice_pf *pf,
 734                             enum ice_fltr_ptype ptype,
 735                             bool is_tunnel)
 736{
 737        enum ice_fltr_ptype cflct_ptype;
 738
 739        switch (ptype) {
 740        /* IPv4 */
 741        case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 742        case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 743        case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 744                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 745                if (!ice_fdir_prof_resolve_conflict
 746                        (pf, cflct_ptype, is_tunnel))
 747                        goto err;
 748                break;
 749        case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 750                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
 751                if (!ice_fdir_prof_resolve_conflict
 752                        (pf, cflct_ptype, is_tunnel))
 753                        goto err;
 754                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
 755                if (!ice_fdir_prof_resolve_conflict
 756                        (pf, cflct_ptype, is_tunnel))
 757                        goto err;
 758                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
 759                if (!ice_fdir_prof_resolve_conflict
 760                        (pf, cflct_ptype, is_tunnel))
 761                        goto err;
 762                break;
 763        /* IPv4 GTPU */
 764        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
 765        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
 766        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
 767                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
 768                if (!ice_fdir_prof_resolve_conflict
 769                        (pf, cflct_ptype, is_tunnel))
 770                        goto err;
 771                break;
 772        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
 773                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
 774                if (!ice_fdir_prof_resolve_conflict
 775                        (pf, cflct_ptype, is_tunnel))
 776                        goto err;
 777                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
 778                if (!ice_fdir_prof_resolve_conflict
 779                        (pf, cflct_ptype, is_tunnel))
 780                        goto err;
 781                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
 782                if (!ice_fdir_prof_resolve_conflict
 783                        (pf, cflct_ptype, is_tunnel))
 784                        goto err;
 785                break;
 786        /* IPv6 */
 787        case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 788        case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 789        case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 790                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
 791                if (!ice_fdir_prof_resolve_conflict
 792                        (pf, cflct_ptype, is_tunnel))
 793                        goto err;
 794                break;
 795        case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 796                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
 797                if (!ice_fdir_prof_resolve_conflict
 798                        (pf, cflct_ptype, is_tunnel))
 799                        goto err;
 800                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
 801                if (!ice_fdir_prof_resolve_conflict
 802                        (pf, cflct_ptype, is_tunnel))
 803                        goto err;
 804                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
 805                if (!ice_fdir_prof_resolve_conflict
 806                        (pf, cflct_ptype, is_tunnel))
 807                        goto err;
 808                break;
 809        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
 810        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
 811        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
 812                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
 813                if (!ice_fdir_prof_resolve_conflict
 814                        (pf, cflct_ptype, is_tunnel))
 815                        goto err;
 816                break;
 817        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
 818                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
 819                if (!ice_fdir_prof_resolve_conflict
 820                        (pf, cflct_ptype, is_tunnel))
 821                        goto err;
 822                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
 823                if (!ice_fdir_prof_resolve_conflict
 824                        (pf, cflct_ptype, is_tunnel))
 825                        goto err;
 826                cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
 827                if (!ice_fdir_prof_resolve_conflict
 828                        (pf, cflct_ptype, is_tunnel))
 829                        goto err;
 830                break;
 831        default:
 832                break;
 833        }
 834        return 0;
 835err:
 836        PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
 837                    ptype, cflct_ptype);
 838        return -EINVAL;
 839}
 840
 841static int
 842ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
 843                     struct ice_vsi *ctrl_vsi,
 844                     struct ice_flow_seg_info *seg,
 845                     enum ice_fltr_ptype ptype,
 846                     bool is_tunnel)
 847{
 848        struct ice_hw *hw = ICE_PF_TO_HW(pf);
 849        enum ice_flow_dir dir = ICE_FLOW_RX;
 850        struct ice_fd_hw_prof *hw_prof;
 851        struct ice_flow_prof *prof;
 852        uint64_t entry_1 = 0;
 853        uint64_t entry_2 = 0;
 854        uint16_t vsi_num;
 855        int ret;
 856        uint64_t prof_id;
 857
 858        /* check if have input set conflict on current profile. */
 859        ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
 860        if (ret)
 861                return ret;
 862
 863        /* check if the profile is conflict with other profile. */
 864        ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
 865        if (ret)
 866                return ret;
 867
 868        prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
 869        ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
 870                                (is_tunnel) ? 2 : 1, NULL, 0, &prof);
 871        if (ret)
 872                return ret;
 873        ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
 874                                 vsi->idx, ICE_FLOW_PRIO_NORMAL,
 875                                 seg, NULL, 0, &entry_1);
 876        if (ret) {
 877                PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
 878                            ptype);
 879                goto err_add_prof;
 880        }
 881        ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
 882                                 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 883                                 seg, NULL, 0, &entry_2);
 884        if (ret) {
 885                PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
 886                            ptype);
 887                goto err_add_entry;
 888        }
 889
 890        hw_prof = hw->fdir_prof[ptype];
 891        pf->hw_prof_cnt[ptype][is_tunnel] = 0;
 892        hw_prof->cnt = 0;
 893        hw_prof->fdir_seg[is_tunnel] = seg;
 894        hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
 895        hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
 896        pf->hw_prof_cnt[ptype][is_tunnel]++;
 897        hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
 898        hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
 899        pf->hw_prof_cnt[ptype][is_tunnel]++;
 900
 901        return ret;
 902
 903err_add_entry:
 904        vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
 905        ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 906        ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
 907err_add_prof:
 908        ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 909
 910        return ret;
 911}
 912
 913static void
 914ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
 915{
 916        uint32_t i, j;
 917
 918        struct ice_inset_map {
 919                uint64_t inset;
 920                enum ice_flow_field fld;
 921        };
 922        static const struct ice_inset_map ice_inset_map[] = {
 923                {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
 924                {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
 925                {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
 926                {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
 927                {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
 928                {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
 929                {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
 930                {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
 931                {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
 932                {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
 933                {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
 934                {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
 935                {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
 936                {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
 937                {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
 938                {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
 939                {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
 940                {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
 941                {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
 942                {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
 943                {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
 944                {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
 945                {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
 946                {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
 947                {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
 948                {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
 949                {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
 950                {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
 951                {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
 952                {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
 953                {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
 954                {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
 955                {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
 956        };
 957
 958        for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
 959                if ((inset & ice_inset_map[i].inset) ==
 960                    ice_inset_map[i].inset)
 961                        field[j++] = ice_inset_map[i].fld;
 962        }
 963}
 964
 965static void
 966ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
 967{
 968        switch (flow) {
 969        case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 970                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 971                                  ICE_FLOW_SEG_HDR_IPV4 |
 972                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
 973                break;
 974        case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 975                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 976                                  ICE_FLOW_SEG_HDR_IPV4 |
 977                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
 978                break;
 979        case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 980                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 981                                  ICE_FLOW_SEG_HDR_IPV4 |
 982                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
 983                break;
 984        case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 985                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 986                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
 987                break;
 988        case ICE_FLTR_PTYPE_FRAG_IPV4:
 989                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 990                                  ICE_FLOW_SEG_HDR_IPV_FRAG);
 991                break;
 992        case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 993                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 994                                  ICE_FLOW_SEG_HDR_IPV6 |
 995                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
 996                break;
 997        case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 998                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 999                                  ICE_FLOW_SEG_HDR_IPV6 |
1000                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1001                break;
1002        case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1003                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1004                                  ICE_FLOW_SEG_HDR_IPV6 |
1005                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1006                break;
1007        case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1008                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1009                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1010                break;
1011        case ICE_FLTR_PTYPE_FRAG_IPV6:
1012                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1013                                  ICE_FLOW_SEG_HDR_IPV_FRAG);
1014                break;
1015        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
1016        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
1017        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
1018                break;
1019        case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
1020                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_OTHER);
1021                break;
1022        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
1023                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1024                                  ICE_FLOW_SEG_HDR_IPV4 |
1025                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1026                break;
1027        case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
1028                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1029                                  ICE_FLOW_SEG_HDR_GTPU_IP |
1030                                  ICE_FLOW_SEG_HDR_IPV4 |
1031                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1032                break;
1033        case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
1034                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1035                                  ICE_FLOW_SEG_HDR_IPV6 |
1036                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1037                break;
1038        case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
1039                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1040                                  ICE_FLOW_SEG_HDR_GTPU_IP |
1041                                  ICE_FLOW_SEG_HDR_IPV6 |
1042                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1043                break;
1044        case ICE_FLTR_PTYPE_NON_IP_L2:
1045                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1046                break;
1047        case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
1048                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1049                                  ICE_FLOW_SEG_HDR_IPV4 |
1050                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1051                break;
1052        case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
1053                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1054                                  ICE_FLOW_SEG_HDR_IPV6 |
1055                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1056                break;
1057        case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
1058                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1059                                  ICE_FLOW_SEG_HDR_IPV4 |
1060                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1061                break;
1062        case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
1063                ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1064                                  ICE_FLOW_SEG_HDR_IPV6 |
1065                                  ICE_FLOW_SEG_HDR_IPV_OTHER);
1066                break;
1067        default:
1068                PMD_DRV_LOG(ERR, "not supported filter type.");
1069                break;
1070        }
1071}
1072
1073static int
1074ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1075                        uint64_t inner_input_set, uint64_t outer_input_set,
1076                        enum ice_fdir_tunnel_type ttype)
1077{
1078        struct ice_flow_seg_info *seg;
1079        struct ice_flow_seg_info *seg_tun = NULL;
1080        enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1081        uint64_t input_set;
1082        bool is_tunnel;
1083        int k, i, ret = 0;
1084
1085        if (!(inner_input_set | outer_input_set))
1086                return -EINVAL;
1087
1088        seg_tun = (struct ice_flow_seg_info *)
1089                ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1090        if (!seg_tun) {
1091                PMD_DRV_LOG(ERR, "No memory can be allocated");
1092                return -ENOMEM;
1093        }
1094
1095        /* use seg_tun[1] to record tunnel inner part */
1096        for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1097                seg = &seg_tun[k];
1098                input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1099                if (input_set == 0)
1100                        continue;
1101
1102                for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1103                        field[i] = ICE_FLOW_FIELD_IDX_MAX;
1104
1105                ice_fdir_input_set_parse(input_set, field);
1106
1107                ice_fdir_input_set_hdrs(flow, seg);
1108
1109                for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1110                        ice_flow_set_fld(seg, field[i],
1111                                         ICE_FLOW_FLD_OFF_INVAL,
1112                                         ICE_FLOW_FLD_OFF_INVAL,
1113                                         ICE_FLOW_FLD_OFF_INVAL, false);
1114                }
1115        }
1116
1117        is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1118
1119        ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1120                                   seg_tun, flow, is_tunnel);
1121
1122        if (!ret) {
1123                return ret;
1124        } else if (ret < 0) {
1125                rte_free(seg_tun);
1126                return (ret == -EEXIST) ? 0 : ret;
1127        } else {
1128                return ret;
1129        }
1130}
1131
1132static void
1133ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1134                    bool is_tunnel, bool add)
1135{
1136        struct ice_hw *hw = ICE_PF_TO_HW(pf);
1137        int cnt;
1138
1139        cnt = (add) ? 1 : -1;
1140        hw->fdir_active_fltr += cnt;
1141        if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1142                PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1143        else
1144                pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1145}
1146
1147static int
1148ice_fdir_init(struct ice_adapter *ad)
1149{
1150        struct ice_pf *pf = &ad->pf;
1151        struct ice_flow_parser *parser;
1152        int ret;
1153
1154        if (ad->hw.dcf_enabled)
1155                return 0;
1156
1157        ret = ice_fdir_setup(pf);
1158        if (ret)
1159                return ret;
1160
1161        parser = &ice_fdir_parser;
1162
1163        return ice_register_parser(parser, ad);
1164}
1165
1166static void
1167ice_fdir_uninit(struct ice_adapter *ad)
1168{
1169        struct ice_flow_parser *parser;
1170        struct ice_pf *pf = &ad->pf;
1171
1172        if (ad->hw.dcf_enabled)
1173                return;
1174
1175        parser = &ice_fdir_parser;
1176
1177        ice_unregister_parser(parser, ad);
1178
1179        ice_fdir_teardown(pf);
1180}
1181
1182static int
1183ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1184{
1185        if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1186                return 1;
1187        else
1188                return 0;
1189}
1190
1191static int
1192ice_fdir_add_del_filter(struct ice_pf *pf,
1193                        struct ice_fdir_filter_conf *filter,
1194                        bool add)
1195{
1196        struct ice_fltr_desc desc;
1197        struct ice_hw *hw = ICE_PF_TO_HW(pf);
1198        unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1199        bool is_tun;
1200        int ret;
1201
1202        filter->input.dest_vsi = pf->main_vsi->idx;
1203
1204        memset(&desc, 0, sizeof(desc));
1205        filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1206        ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1207
1208        is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1209
1210        memset(pkt, 0, ICE_FDIR_PKT_LEN);
1211        ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1212        if (ret) {
1213                PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1214                return -EINVAL;
1215        }
1216
1217        return ice_fdir_programming(pf, &desc);
1218}
1219
1220static void
1221ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1222                          struct ice_fdir_filter_conf *filter)
1223{
1224        struct ice_fdir_fltr *input = &filter->input;
1225        memset(key, 0, sizeof(*key));
1226
1227        key->flow_type = input->flow_type;
1228        rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1229        rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1230        rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1231        rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1232
1233        rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1234        rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1235
1236        key->tunnel_type = filter->tunnel_type;
1237}
1238
1239/* Check if there exists the flow director filter */
1240static struct ice_fdir_filter_conf *
1241ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1242                        const struct ice_fdir_fltr_pattern *key)
1243{
1244        int ret;
1245
1246        ret = rte_hash_lookup(fdir_info->hash_table, key);
1247        if (ret < 0)
1248                return NULL;
1249
1250        return fdir_info->hash_map[ret];
1251}
1252
1253/* Add a flow director entry into the SW list */
1254static int
1255ice_fdir_entry_insert(struct ice_pf *pf,
1256                      struct ice_fdir_filter_conf *entry,
1257                      struct ice_fdir_fltr_pattern *key)
1258{
1259        struct ice_fdir_info *fdir_info = &pf->fdir;
1260        int ret;
1261
1262        ret = rte_hash_add_key(fdir_info->hash_table, key);
1263        if (ret < 0) {
1264                PMD_DRV_LOG(ERR,
1265                            "Failed to insert fdir entry to hash table %d!",
1266                            ret);
1267                return ret;
1268        }
1269        fdir_info->hash_map[ret] = entry;
1270
1271        return 0;
1272}
1273
1274/* Delete a flow director entry from the SW list */
1275static int
1276ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1277{
1278        struct ice_fdir_info *fdir_info = &pf->fdir;
1279        int ret;
1280
1281        ret = rte_hash_del_key(fdir_info->hash_table, key);
1282        if (ret < 0) {
1283                PMD_DRV_LOG(ERR,
1284                            "Failed to delete fdir filter to hash table %d!",
1285                            ret);
1286                return ret;
1287        }
1288        fdir_info->hash_map[ret] = NULL;
1289
1290        return 0;
1291}
1292
1293static int
1294ice_fdir_create_filter(struct ice_adapter *ad,
1295                       struct rte_flow *flow,
1296                       void *meta,
1297                       struct rte_flow_error *error)
1298{
1299        struct ice_pf *pf = &ad->pf;
1300        struct ice_fdir_filter_conf *filter = meta;
1301        struct ice_fdir_info *fdir_info = &pf->fdir;
1302        struct ice_fdir_filter_conf *entry, *node;
1303        struct ice_fdir_fltr_pattern key;
1304        bool is_tun;
1305        int ret;
1306
1307        ice_fdir_extract_fltr_key(&key, filter);
1308        node = ice_fdir_entry_lookup(fdir_info, &key);
1309        if (node) {
1310                rte_flow_error_set(error, EEXIST,
1311                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1312                                   "Rule already exists!");
1313                return -rte_errno;
1314        }
1315
1316        entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1317        if (!entry) {
1318                rte_flow_error_set(error, ENOMEM,
1319                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1320                                   "Failed to allocate memory");
1321                return -rte_errno;
1322        }
1323
1324        is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1325
1326        ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1327                                      filter->input_set_i, filter->input_set_o,
1328                                      filter->tunnel_type);
1329        if (ret) {
1330                rte_flow_error_set(error, -ret,
1331                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1332                                   "Profile configure failed.");
1333                goto free_entry;
1334        }
1335
1336        /* alloc counter for FDIR */
1337        if (filter->input.cnt_ena) {
1338                struct rte_flow_action_count *act_count = &filter->act_count;
1339
1340                filter->counter = ice_fdir_counter_alloc(pf,
1341                                                         act_count->shared,
1342                                                         act_count->id);
1343                if (!filter->counter) {
1344                        rte_flow_error_set(error, EINVAL,
1345                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1346                                        "Failed to alloc FDIR counter.");
1347                        goto free_entry;
1348                }
1349                filter->input.cnt_index = filter->counter->hw_index;
1350        }
1351
1352        ret = ice_fdir_add_del_filter(pf, filter, true);
1353        if (ret) {
1354                rte_flow_error_set(error, -ret,
1355                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1356                                   "Add filter rule failed.");
1357                goto free_counter;
1358        }
1359
1360        if (filter->mark_flag == 1)
1361                ice_fdir_rx_parsing_enable(ad, 1);
1362
1363        rte_memcpy(entry, filter, sizeof(*entry));
1364        ret = ice_fdir_entry_insert(pf, entry, &key);
1365        if (ret) {
1366                rte_flow_error_set(error, -ret,
1367                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1368                                   "Insert entry to table failed.");
1369                goto free_entry;
1370        }
1371
1372        flow->rule = entry;
1373        ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1374
1375        return 0;
1376
1377free_counter:
1378        if (filter->counter) {
1379                ice_fdir_counter_free(pf, filter->counter);
1380                filter->counter = NULL;
1381        }
1382
1383free_entry:
1384        rte_free(entry);
1385        return -rte_errno;
1386}
1387
1388static int
1389ice_fdir_destroy_filter(struct ice_adapter *ad,
1390                        struct rte_flow *flow,
1391                        struct rte_flow_error *error)
1392{
1393        struct ice_pf *pf = &ad->pf;
1394        struct ice_fdir_info *fdir_info = &pf->fdir;
1395        struct ice_fdir_filter_conf *filter, *entry;
1396        struct ice_fdir_fltr_pattern key;
1397        bool is_tun;
1398        int ret;
1399
1400        filter = (struct ice_fdir_filter_conf *)flow->rule;
1401
1402        is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1403
1404        if (filter->counter) {
1405                ice_fdir_counter_free(pf, filter->counter);
1406                filter->counter = NULL;
1407        }
1408
1409        ice_fdir_extract_fltr_key(&key, filter);
1410        entry = ice_fdir_entry_lookup(fdir_info, &key);
1411        if (!entry) {
1412                rte_flow_error_set(error, ENOENT,
1413                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1414                                   "Can't find entry.");
1415                return -rte_errno;
1416        }
1417
1418        ret = ice_fdir_add_del_filter(pf, filter, false);
1419        if (ret) {
1420                rte_flow_error_set(error, -ret,
1421                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1422                                   "Del filter rule failed.");
1423                return -rte_errno;
1424        }
1425
1426        ret = ice_fdir_entry_del(pf, &key);
1427        if (ret) {
1428                rte_flow_error_set(error, -ret,
1429                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1430                                   "Remove entry from table failed.");
1431                return -rte_errno;
1432        }
1433
1434        ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1435
1436        if (filter->mark_flag == 1)
1437                ice_fdir_rx_parsing_enable(ad, 0);
1438
1439        flow->rule = NULL;
1440
1441        rte_free(filter);
1442
1443        return 0;
1444}
1445
1446static int
1447ice_fdir_query_count(struct ice_adapter *ad,
1448                      struct rte_flow *flow,
1449                      struct rte_flow_query_count *flow_stats,
1450                      struct rte_flow_error *error)
1451{
1452        struct ice_pf *pf = &ad->pf;
1453        struct ice_hw *hw = ICE_PF_TO_HW(pf);
1454        struct ice_fdir_filter_conf *filter = flow->rule;
1455        struct ice_fdir_counter *counter = filter->counter;
1456        uint64_t hits_lo, hits_hi;
1457
1458        if (!counter) {
1459                rte_flow_error_set(error, EINVAL,
1460                                  RTE_FLOW_ERROR_TYPE_ACTION,
1461                                  NULL,
1462                                  "FDIR counters not available");
1463                return -rte_errno;
1464        }
1465
1466        /*
1467         * Reading the low 32-bits latches the high 32-bits into a shadow
1468         * register. Reading the high 32-bit returns the value in the
1469         * shadow register.
1470         */
1471        hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1472        hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1473
1474        flow_stats->hits_set = 1;
1475        flow_stats->hits = hits_lo | (hits_hi << 32);
1476        flow_stats->bytes_set = 0;
1477        flow_stats->bytes = 0;
1478
1479        if (flow_stats->reset) {
1480                /* reset statistic counter value */
1481                ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1482                ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1483        }
1484
1485        return 0;
1486}
1487
1488static struct ice_flow_engine ice_fdir_engine = {
1489        .init = ice_fdir_init,
1490        .uninit = ice_fdir_uninit,
1491        .create = ice_fdir_create_filter,
1492        .destroy = ice_fdir_destroy_filter,
1493        .query_count = ice_fdir_query_count,
1494        .type = ICE_FLOW_ENGINE_FDIR,
1495};
1496
1497static int
1498ice_fdir_parse_action_qregion(struct ice_pf *pf,
1499                              struct rte_flow_error *error,
1500                              const struct rte_flow_action *act,
1501                              struct ice_fdir_filter_conf *filter)
1502{
1503        const struct rte_flow_action_rss *rss = act->conf;
1504        uint32_t i;
1505
1506        if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1507                rte_flow_error_set(error, EINVAL,
1508                                   RTE_FLOW_ERROR_TYPE_ACTION, act,
1509                                   "Invalid action.");
1510                return -rte_errno;
1511        }
1512
1513        if (rss->queue_num <= 1) {
1514                rte_flow_error_set(error, EINVAL,
1515                                   RTE_FLOW_ERROR_TYPE_ACTION, act,
1516                                   "Queue region size can't be 0 or 1.");
1517                return -rte_errno;
1518        }
1519
1520        /* check if queue index for queue region is continuous */
1521        for (i = 0; i < rss->queue_num - 1; i++) {
1522                if (rss->queue[i + 1] != rss->queue[i] + 1) {
1523                        rte_flow_error_set(error, EINVAL,
1524                                           RTE_FLOW_ERROR_TYPE_ACTION, act,
1525                                           "Discontinuous queue region");
1526                        return -rte_errno;
1527                }
1528        }
1529
1530        if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1531                rte_flow_error_set(error, EINVAL,
1532                                   RTE_FLOW_ERROR_TYPE_ACTION, act,
1533                                   "Invalid queue region indexes.");
1534                return -rte_errno;
1535        }
1536
1537        if (!(rte_is_power_of_2(rss->queue_num) &&
1538             (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1539                rte_flow_error_set(error, EINVAL,
1540                                   RTE_FLOW_ERROR_TYPE_ACTION, act,
1541                                   "The region size should be any of the following values:"
1542                                   "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1543                                   "of queues do not exceed the VSI allocation.");
1544                return -rte_errno;
1545        }
1546
1547        filter->input.q_index = rss->queue[0];
1548        filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1549        filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1550
1551        return 0;
1552}
1553
1554static int
1555ice_fdir_parse_action(struct ice_adapter *ad,
1556                      const struct rte_flow_action actions[],
1557                      struct rte_flow_error *error,
1558                      struct ice_fdir_filter_conf *filter)
1559{
1560        struct ice_pf *pf = &ad->pf;
1561        const struct rte_flow_action_queue *act_q;
1562        const struct rte_flow_action_mark *mark_spec = NULL;
1563        const struct rte_flow_action_count *act_count;
1564        uint32_t dest_num = 0;
1565        uint32_t mark_num = 0;
1566        uint32_t counter_num = 0;
1567        int ret;
1568
1569        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1570                switch (actions->type) {
1571                case RTE_FLOW_ACTION_TYPE_VOID:
1572                        break;
1573                case RTE_FLOW_ACTION_TYPE_QUEUE:
1574                        dest_num++;
1575
1576                        act_q = actions->conf;
1577                        filter->input.q_index = act_q->index;
1578                        if (filter->input.q_index >=
1579                                        pf->dev_data->nb_rx_queues) {
1580                                rte_flow_error_set(error, EINVAL,
1581                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1582                                                   actions,
1583                                                   "Invalid queue for FDIR.");
1584                                return -rte_errno;
1585                        }
1586                        filter->input.dest_ctl =
1587                                ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1588                        break;
1589                case RTE_FLOW_ACTION_TYPE_DROP:
1590                        dest_num++;
1591
1592                        filter->input.dest_ctl =
1593                                ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1594                        break;
1595                case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1596                        dest_num++;
1597
1598                        filter->input.dest_ctl =
1599                                ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1600                        break;
1601                case RTE_FLOW_ACTION_TYPE_RSS:
1602                        dest_num++;
1603
1604                        ret = ice_fdir_parse_action_qregion(pf,
1605                                                error, actions, filter);
1606                        if (ret)
1607                                return ret;
1608                        break;
1609                case RTE_FLOW_ACTION_TYPE_MARK:
1610                        mark_num++;
1611                        filter->mark_flag = 1;
1612                        mark_spec = actions->conf;
1613                        filter->input.fltr_id = mark_spec->id;
1614                        filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1615                        break;
1616                case RTE_FLOW_ACTION_TYPE_COUNT:
1617                        counter_num++;
1618
1619                        act_count = actions->conf;
1620                        filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1621                        rte_memcpy(&filter->act_count, act_count,
1622                                                sizeof(filter->act_count));
1623
1624                        break;
1625                default:
1626                        rte_flow_error_set(error, EINVAL,
1627                                   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1628                                   "Invalid action.");
1629                        return -rte_errno;
1630                }
1631        }
1632
1633        if (dest_num >= 2) {
1634                rte_flow_error_set(error, EINVAL,
1635                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1636                           "Unsupported action combination");
1637                return -rte_errno;
1638        }
1639
1640        if (mark_num >= 2) {
1641                rte_flow_error_set(error, EINVAL,
1642                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1643                           "Too many mark actions");
1644                return -rte_errno;
1645        }
1646
1647        if (counter_num >= 2) {
1648                rte_flow_error_set(error, EINVAL,
1649                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1650                           "Too many count actions");
1651                return -rte_errno;
1652        }
1653
1654        if (dest_num + mark_num + counter_num == 0) {
1655                rte_flow_error_set(error, EINVAL,
1656                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1657                           "Empty action");
1658                return -rte_errno;
1659        }
1660
1661        /* set default action to PASSTHRU mode, in "mark/count only" case. */
1662        if (dest_num == 0)
1663                filter->input.dest_ctl =
1664                        ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1665
1666        return 0;
1667}
1668
1669static int
1670ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1671                       const struct rte_flow_item pattern[],
1672                       struct rte_flow_error *error,
1673                       struct ice_fdir_filter_conf *filter)
1674{
1675        const struct rte_flow_item *item = pattern;
1676        enum rte_flow_item_type item_type;
1677        enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1678        enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
1679        enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1680        const struct rte_flow_item_eth *eth_spec, *eth_mask;
1681        const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1682        const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1683        const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1684                                        *ipv6_frag_mask;
1685        const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1686        const struct rte_flow_item_udp *udp_spec, *udp_mask;
1687        const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1688        const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1689        const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1690        const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1691        const struct rte_flow_item_esp *esp_spec, *esp_mask;
1692        uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1693        uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1694        uint64_t *input_set;
1695        uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1696        uint8_t  ipv6_addr_mask[16] = {
1697                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1698                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1699        };
1700        uint32_t vtc_flow_cpu;
1701        uint16_t ether_type;
1702        enum rte_flow_item_type next_type;
1703        bool is_outer = true;
1704        struct ice_fdir_extra *p_ext_data;
1705        struct ice_fdir_v4 *p_v4 = NULL;
1706        struct ice_fdir_v6 *p_v6 = NULL;
1707
1708        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1709                if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1710                        tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1711                /* To align with shared code behavior, save gtpu outer
1712                 * fields in inner struct.
1713                 */
1714                if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1715                    item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1716                        is_outer = false;
1717                }
1718        }
1719
1720        /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1721         * flow. input_set_i is used for inner part.
1722         */
1723        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1724                item_type = item->type;
1725
1726                if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1727                                    item_type ==
1728                                    RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1729                        rte_flow_error_set(error, EINVAL,
1730                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1731                                           "Not support range");
1732                }
1733
1734                input_set = (tunnel_type && !is_outer) ?
1735                            &input_set_i : &input_set_o;
1736
1737                switch (item_type) {
1738                case RTE_FLOW_ITEM_TYPE_ETH:
1739                        flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1740                        eth_spec = item->spec;
1741                        eth_mask = item->mask;
1742
1743                        if (!(eth_spec && eth_mask))
1744                                break;
1745
1746                        if (!rte_is_zero_ether_addr(&eth_mask->dst))
1747                                *input_set |= ICE_INSET_DMAC;
1748                        if (!rte_is_zero_ether_addr(&eth_mask->src))
1749                                *input_set |= ICE_INSET_SMAC;
1750
1751                        next_type = (item + 1)->type;
1752                        /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1753                        if (eth_mask->type == RTE_BE16(0xffff) &&
1754                            next_type == RTE_FLOW_ITEM_TYPE_END) {
1755                                *input_set |= ICE_INSET_ETHERTYPE;
1756                                ether_type = rte_be_to_cpu_16(eth_spec->type);
1757
1758                                if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1759                                    ether_type == RTE_ETHER_TYPE_IPV6) {
1760                                        rte_flow_error_set(error, EINVAL,
1761                                                           RTE_FLOW_ERROR_TYPE_ITEM,
1762                                                           item,
1763                                                           "Unsupported ether_type.");
1764                                        return -rte_errno;
1765                                }
1766                        }
1767
1768                        p_ext_data = (tunnel_type && is_outer) ?
1769                                     &filter->input.ext_data_outer :
1770                                     &filter->input.ext_data;
1771                        rte_memcpy(&p_ext_data->src_mac,
1772                                   &eth_spec->src, RTE_ETHER_ADDR_LEN);
1773                        rte_memcpy(&p_ext_data->dst_mac,
1774                                   &eth_spec->dst, RTE_ETHER_ADDR_LEN);
1775                        rte_memcpy(&p_ext_data->ether_type,
1776                                   &eth_spec->type, sizeof(eth_spec->type));
1777                        break;
1778                case RTE_FLOW_ITEM_TYPE_IPV4:
1779                        flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1780                        l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1781                        ipv4_spec = item->spec;
1782                        ipv4_last = item->last;
1783                        ipv4_mask = item->mask;
1784                        p_v4 = (tunnel_type && is_outer) ?
1785                               &filter->input.ip_outer.v4 :
1786                               &filter->input.ip.v4;
1787
1788                        if (!(ipv4_spec && ipv4_mask))
1789                                break;
1790
1791                        /* Check IPv4 mask and update input set */
1792                        if (ipv4_mask->hdr.version_ihl ||
1793                            ipv4_mask->hdr.total_length ||
1794                            ipv4_mask->hdr.hdr_checksum) {
1795                                rte_flow_error_set(error, EINVAL,
1796                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1797                                                   item,
1798                                                   "Invalid IPv4 mask.");
1799                                return -rte_errno;
1800                        }
1801
1802                        if (ipv4_last &&
1803                            (ipv4_last->hdr.version_ihl ||
1804                             ipv4_last->hdr.type_of_service ||
1805                             ipv4_last->hdr.time_to_live ||
1806                             ipv4_last->hdr.total_length |
1807                             ipv4_last->hdr.next_proto_id ||
1808                             ipv4_last->hdr.hdr_checksum ||
1809                             ipv4_last->hdr.src_addr ||
1810                             ipv4_last->hdr.dst_addr)) {
1811                                rte_flow_error_set(error, EINVAL,
1812                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1813                                                   item, "Invalid IPv4 last.");
1814                                return -rte_errno;
1815                        }
1816
1817                        if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1818                                *input_set |= ICE_INSET_IPV4_DST;
1819                        if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1820                                *input_set |= ICE_INSET_IPV4_SRC;
1821                        if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1822                                *input_set |= ICE_INSET_IPV4_TTL;
1823                        if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1824                                *input_set |= ICE_INSET_IPV4_PROTO;
1825                        if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1826                                *input_set |= ICE_INSET_IPV4_TOS;
1827
1828                        p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1829                        p_v4->src_ip = ipv4_spec->hdr.src_addr;
1830                        p_v4->ttl = ipv4_spec->hdr.time_to_live;
1831                        p_v4->proto = ipv4_spec->hdr.next_proto_id;
1832                        p_v4->tos = ipv4_spec->hdr.type_of_service;
1833
1834                        /* fragment Ipv4:
1835                         * spec is 0x2000, mask is 0x2000
1836                         */
1837                        if (ipv4_spec->hdr.fragment_offset ==
1838                            rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
1839                            ipv4_mask->hdr.fragment_offset ==
1840                            rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
1841                                /* all IPv4 fragment packet has the same
1842                                 * ethertype, if the spec and mask is valid,
1843                                 * set ethertype into input set.
1844                                 */
1845                                flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
1846                                *input_set |= ICE_INSET_ETHERTYPE;
1847                                input_set_o |= ICE_INSET_ETHERTYPE;
1848                        } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1849                                rte_flow_error_set(error, EINVAL,
1850                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1851                                                   item, "Invalid IPv4 mask.");
1852                                return -rte_errno;
1853                        }
1854
1855                        break;
1856                case RTE_FLOW_ITEM_TYPE_IPV6:
1857                        flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1858                        l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1859                        ipv6_spec = item->spec;
1860                        ipv6_mask = item->mask;
1861                        p_v6 = (tunnel_type && is_outer) ?
1862                               &filter->input.ip_outer.v6 :
1863                               &filter->input.ip.v6;
1864
1865                        if (!(ipv6_spec && ipv6_mask))
1866                                break;
1867
1868                        /* Check IPv6 mask and update input set */
1869                        if (ipv6_mask->hdr.payload_len) {
1870                                rte_flow_error_set(error, EINVAL,
1871                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1872                                                   item,
1873                                                   "Invalid IPv6 mask");
1874                                return -rte_errno;
1875                        }
1876
1877                        if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1878                                    RTE_DIM(ipv6_mask->hdr.src_addr)))
1879                                *input_set |= ICE_INSET_IPV6_SRC;
1880                        if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1881                                    RTE_DIM(ipv6_mask->hdr.dst_addr)))
1882                                *input_set |= ICE_INSET_IPV6_DST;
1883
1884                        if ((ipv6_mask->hdr.vtc_flow &
1885                             rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1886                            == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1887                                *input_set |= ICE_INSET_IPV6_TC;
1888                        if (ipv6_mask->hdr.proto == UINT8_MAX)
1889                                *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1890                        if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1891                                *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1892
1893                        rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1894                        rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1895                        vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1896                        p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1897                        p_v6->proto = ipv6_spec->hdr.proto;
1898                        p_v6->hlim = ipv6_spec->hdr.hop_limits;
1899                        break;
1900                case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1901                        l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
1902                        flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
1903                        ipv6_frag_spec = item->spec;
1904                        ipv6_frag_mask = item->mask;
1905
1906                        if (!(ipv6_frag_spec && ipv6_frag_mask))
1907                                break;
1908
1909                        /* fragment Ipv6:
1910                         * spec is 0x1, mask is 0x1
1911                         */
1912                        if (ipv6_frag_spec->hdr.frag_data ==
1913                            rte_cpu_to_be_16(1) &&
1914                            ipv6_frag_mask->hdr.frag_data ==
1915                            rte_cpu_to_be_16(1)) {
1916                                /* all IPv6 fragment packet has the same
1917                                 * ethertype, if the spec and mask is valid,
1918                                 * set ethertype into input set.
1919                                 */
1920                                *input_set |= ICE_INSET_ETHERTYPE;
1921                                input_set_o |= ICE_INSET_ETHERTYPE;
1922                        } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1923                                rte_flow_error_set(error, EINVAL,
1924                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1925                                                   item, "Invalid IPv6 mask.");
1926                                return -rte_errno;
1927                        }
1928
1929                        break;
1930
1931                case RTE_FLOW_ITEM_TYPE_TCP:
1932                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1933                                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1934                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1935                                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1936
1937                        tcp_spec = item->spec;
1938                        tcp_mask = item->mask;
1939
1940                        if (!(tcp_spec && tcp_mask))
1941                                break;
1942
1943                        /* Check TCP mask and update input set */
1944                        if (tcp_mask->hdr.sent_seq ||
1945                            tcp_mask->hdr.recv_ack ||
1946                            tcp_mask->hdr.data_off ||
1947                            tcp_mask->hdr.tcp_flags ||
1948                            tcp_mask->hdr.rx_win ||
1949                            tcp_mask->hdr.cksum ||
1950                            tcp_mask->hdr.tcp_urp) {
1951                                rte_flow_error_set(error, EINVAL,
1952                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1953                                                   item,
1954                                                   "Invalid TCP mask");
1955                                return -rte_errno;
1956                        }
1957
1958                        if (tcp_mask->hdr.src_port == UINT16_MAX)
1959                                *input_set |= ICE_INSET_TCP_SRC_PORT;
1960                        if (tcp_mask->hdr.dst_port == UINT16_MAX)
1961                                *input_set |= ICE_INSET_TCP_DST_PORT;
1962
1963                        /* Get filter info */
1964                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1965                                assert(p_v4);
1966                                p_v4->dst_port = tcp_spec->hdr.dst_port;
1967                                p_v4->src_port = tcp_spec->hdr.src_port;
1968                        } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1969                                assert(p_v6);
1970                                p_v6->dst_port = tcp_spec->hdr.dst_port;
1971                                p_v6->src_port = tcp_spec->hdr.src_port;
1972                        }
1973                        break;
1974                case RTE_FLOW_ITEM_TYPE_UDP:
1975                        l4 = RTE_FLOW_ITEM_TYPE_UDP;
1976                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1977                                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1978                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1979                                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1980
1981                        udp_spec = item->spec;
1982                        udp_mask = item->mask;
1983
1984                        if (!(udp_spec && udp_mask))
1985                                break;
1986
1987                        /* Check UDP mask and update input set*/
1988                        if (udp_mask->hdr.dgram_len ||
1989                            udp_mask->hdr.dgram_cksum) {
1990                                rte_flow_error_set(error, EINVAL,
1991                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1992                                                   item,
1993                                                   "Invalid UDP mask");
1994                                return -rte_errno;
1995                        }
1996
1997                        if (udp_mask->hdr.src_port == UINT16_MAX)
1998                                *input_set |= ICE_INSET_UDP_SRC_PORT;
1999                        if (udp_mask->hdr.dst_port == UINT16_MAX)
2000                                *input_set |= ICE_INSET_UDP_DST_PORT;
2001
2002                        /* Get filter info */
2003                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2004                                assert(p_v4);
2005                                p_v4->dst_port = udp_spec->hdr.dst_port;
2006                                p_v4->src_port = udp_spec->hdr.src_port;
2007                        } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2008                                assert(p_v6);
2009                                p_v6->src_port = udp_spec->hdr.src_port;
2010                                p_v6->dst_port = udp_spec->hdr.dst_port;
2011                        }
2012                        break;
2013                case RTE_FLOW_ITEM_TYPE_SCTP:
2014                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2015                                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
2016                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2017                                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
2018
2019                        sctp_spec = item->spec;
2020                        sctp_mask = item->mask;
2021
2022                        if (!(sctp_spec && sctp_mask))
2023                                break;
2024
2025                        /* Check SCTP mask and update input set */
2026                        if (sctp_mask->hdr.cksum) {
2027                                rte_flow_error_set(error, EINVAL,
2028                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2029                                                   item,
2030                                                   "Invalid UDP mask");
2031                                return -rte_errno;
2032                        }
2033
2034                        if (sctp_mask->hdr.src_port == UINT16_MAX)
2035                                *input_set |= ICE_INSET_SCTP_SRC_PORT;
2036                        if (sctp_mask->hdr.dst_port == UINT16_MAX)
2037                                *input_set |= ICE_INSET_SCTP_DST_PORT;
2038
2039                        /* Get filter info */
2040                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2041                                assert(p_v4);
2042                                p_v4->dst_port = sctp_spec->hdr.dst_port;
2043                                p_v4->src_port = sctp_spec->hdr.src_port;
2044                        } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2045                                assert(p_v6);
2046                                p_v6->dst_port = sctp_spec->hdr.dst_port;
2047                                p_v6->src_port = sctp_spec->hdr.src_port;
2048                        }
2049                        break;
2050                case RTE_FLOW_ITEM_TYPE_VOID:
2051                        break;
2052                case RTE_FLOW_ITEM_TYPE_VXLAN:
2053                        l3 = RTE_FLOW_ITEM_TYPE_END;
2054                        vxlan_spec = item->spec;
2055                        vxlan_mask = item->mask;
2056                        is_outer = false;
2057
2058                        if (!(vxlan_spec && vxlan_mask))
2059                                break;
2060
2061                        if (vxlan_mask->hdr.vx_flags) {
2062                                rte_flow_error_set(error, EINVAL,
2063                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2064                                                   item,
2065                                                   "Invalid vxlan field");
2066                                return -rte_errno;
2067                        }
2068
2069                        if (vxlan_mask->hdr.vx_vni)
2070                                *input_set |= ICE_INSET_VXLAN_VNI;
2071
2072                        filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2073
2074                        break;
2075                case RTE_FLOW_ITEM_TYPE_GTPU:
2076                        l3 = RTE_FLOW_ITEM_TYPE_END;
2077                        tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2078                        gtp_spec = item->spec;
2079                        gtp_mask = item->mask;
2080
2081                        if (!(gtp_spec && gtp_mask))
2082                                break;
2083
2084                        if (gtp_mask->v_pt_rsv_flags ||
2085                            gtp_mask->msg_type ||
2086                            gtp_mask->msg_len) {
2087                                rte_flow_error_set(error, EINVAL,
2088                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2089                                                   item,
2090                                                   "Invalid GTP mask");
2091                                return -rte_errno;
2092                        }
2093
2094                        if (gtp_mask->teid == UINT32_MAX)
2095                                input_set_o |= ICE_INSET_GTPU_TEID;
2096
2097                        filter->input.gtpu_data.teid = gtp_spec->teid;
2098                        break;
2099                case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2100                        tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2101                        gtp_psc_spec = item->spec;
2102                        gtp_psc_mask = item->mask;
2103
2104                        if (!(gtp_psc_spec && gtp_psc_mask))
2105                                break;
2106
2107                        if (gtp_psc_mask->qfi == UINT8_MAX)
2108                                input_set_o |= ICE_INSET_GTPU_QFI;
2109
2110                        filter->input.gtpu_data.qfi =
2111                                gtp_psc_spec->qfi;
2112                        break;
2113                case RTE_FLOW_ITEM_TYPE_ESP:
2114                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2115                            l4 == RTE_FLOW_ITEM_TYPE_UDP)
2116                                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
2117                        else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2118                                 l4 == RTE_FLOW_ITEM_TYPE_UDP)
2119                                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
2120                        else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2121                                 l4 == RTE_FLOW_ITEM_TYPE_END)
2122                                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
2123                        else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2124                                 l4 == RTE_FLOW_ITEM_TYPE_END)
2125                                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
2126
2127                        esp_spec = item->spec;
2128                        esp_mask = item->mask;
2129
2130                        if (!(esp_spec && esp_mask))
2131                                break;
2132
2133                        if (esp_mask->hdr.spi == UINT32_MAX) {
2134                                if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
2135                                        *input_set |= ICE_INSET_NAT_T_ESP_SPI;
2136                                else
2137                                        *input_set |= ICE_INSET_ESP_SPI;
2138                        }
2139
2140                        if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2141                                filter->input.ip.v4.sec_parm_idx =
2142                                        esp_spec->hdr.spi;
2143                        else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2144                                filter->input.ip.v6.sec_parm_idx =
2145                                        esp_spec->hdr.spi;
2146                        break;
2147                default:
2148                        rte_flow_error_set(error, EINVAL,
2149                                           RTE_FLOW_ERROR_TYPE_ITEM,
2150                                           item,
2151                                           "Invalid pattern item.");
2152                        return -rte_errno;
2153                }
2154        }
2155
2156        if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2157                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2158                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2159        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2160                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2161                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2162        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2163                flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2164                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2165        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2166                flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2167                flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2168        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2169                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2170                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
2171        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2172                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
2173                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
2174        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2175                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
2176                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
2177        else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2178                flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
2179                flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
2180
2181        filter->tunnel_type = tunnel_type;
2182        filter->input.flow_type = flow_type;
2183        filter->input_set_o = input_set_o;
2184        filter->input_set_i = input_set_i;
2185
2186        return 0;
2187}
2188
2189static int
2190ice_fdir_parse(struct ice_adapter *ad,
2191               struct ice_pattern_match_item *array,
2192               uint32_t array_len,
2193               const struct rte_flow_item pattern[],
2194               const struct rte_flow_action actions[],
2195               uint32_t priority __rte_unused,
2196               void **meta,
2197               struct rte_flow_error *error)
2198{
2199        struct ice_pf *pf = &ad->pf;
2200        struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2201        struct ice_pattern_match_item *item = NULL;
2202        uint64_t input_set;
2203        int ret;
2204
2205        memset(filter, 0, sizeof(*filter));
2206        item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2207                                             error);
2208        if (!item)
2209                return -rte_errno;
2210
2211        ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2212        if (ret)
2213                goto error;
2214        input_set = filter->input_set_o | filter->input_set_i;
2215        if (!input_set || filter->input_set_o &
2216            ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2217            filter->input_set_i & ~item->input_set_mask_i) {
2218                rte_flow_error_set(error, EINVAL,
2219                                   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2220                                   pattern,
2221                                   "Invalid input set");
2222                ret = -rte_errno;
2223                goto error;
2224        }
2225
2226        ret = ice_fdir_parse_action(ad, actions, error, filter);
2227        if (ret)
2228                goto error;
2229
2230        if (meta)
2231                *meta = filter;
2232error:
2233        rte_free(item);
2234        return ret;
2235}
2236
2237static struct ice_flow_parser ice_fdir_parser = {
2238        .engine = &ice_fdir_engine,
2239        .array = ice_fdir_pattern_list,
2240        .array_len = RTE_DIM(ice_fdir_pattern_list),
2241        .parse_pattern_action = ice_fdir_parse,
2242        .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2243};
2244
2245RTE_INIT(ice_fdir_engine_register)
2246{
2247        ice_register_flow_engine(&ice_fdir_engine);
2248}
2249