dpdk/drivers/net/mlx5/mlx5_flow_dv.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright 2018 Mellanox Technologies, Ltd
   3 */
   4
   5#include <sys/queue.h>
   6#include <stdalign.h>
   7#include <stdint.h>
   8#include <string.h>
   9#include <unistd.h>
  10
  11#include <rte_common.h>
  12#include <rte_ether.h>
  13#include <ethdev_driver.h>
  14#include <rte_flow.h>
  15#include <rte_flow_driver.h>
  16#include <rte_malloc.h>
  17#include <rte_cycles.h>
  18#include <rte_bus_pci.h>
  19#include <rte_ip.h>
  20#include <rte_gre.h>
  21#include <rte_vxlan.h>
  22#include <rte_gtp.h>
  23#include <rte_eal_paging.h>
  24#include <rte_mpls.h>
  25#include <rte_mtr.h>
  26#include <rte_mtr_driver.h>
  27#include <rte_tailq.h>
  28
  29#include <mlx5_glue.h>
  30#include <mlx5_devx_cmds.h>
  31#include <mlx5_prm.h>
  32#include <mlx5_malloc.h>
  33
  34#include "mlx5_defs.h"
  35#include "mlx5.h"
  36#include "mlx5_common_os.h"
  37#include "mlx5_flow.h"
  38#include "mlx5_flow_os.h"
  39#include "mlx5_rx.h"
  40#include "mlx5_tx.h"
  41#include "rte_pmd_mlx5.h"
  42
  43#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
  44
  45#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
  46#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
  47#endif
  48
  49#ifndef HAVE_MLX5DV_DR_ESWITCH
  50#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
  51#define MLX5DV_FLOW_TABLE_TYPE_FDB 0
  52#endif
  53#endif
  54
  55#ifndef HAVE_MLX5DV_DR
  56#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
  57#endif
  58
  59/* VLAN header definitions */
  60#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
  61#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
  62#define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
  63#define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
  64#define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
  65
  66union flow_dv_attr {
  67        struct {
  68                uint32_t valid:1;
  69                uint32_t ipv4:1;
  70                uint32_t ipv6:1;
  71                uint32_t tcp:1;
  72                uint32_t udp:1;
  73                uint32_t reserved:27;
  74        };
  75        uint32_t attr;
  76};
  77
  78static int
  79flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
  80                             struct mlx5_flow_tbl_resource *tbl);
  81
  82static int
  83flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
  84                                     uint32_t encap_decap_idx);
  85
  86static int
  87flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
  88                                        uint32_t port_id);
  89static void
  90flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
  91
  92static int
  93flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
  94                                  uint32_t rix_jump);
  95
  96/**
  97 * Initialize flow attributes structure according to flow items' types.
  98 *
  99 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
 100 * mode. For tunnel mode, the items to be modified are the outermost ones.
 101 *
 102 * @param[in] item
 103 *   Pointer to item specification.
 104 * @param[out] attr
 105 *   Pointer to flow attributes structure.
 106 * @param[in] dev_flow
 107 *   Pointer to the sub flow.
 108 * @param[in] tunnel_decap
 109 *   Whether action is after tunnel decapsulation.
 110 */
 111static void
 112flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 113                  struct mlx5_flow *dev_flow, bool tunnel_decap)
 114{
 115        uint64_t layers = dev_flow->handle->layers;
 116
 117        /*
 118         * If layers is already initialized, it means this dev_flow is the
 119         * suffix flow, the layers flags is set by the prefix flow. Need to
 120         * use the layer flags from prefix flow as the suffix flow may not
 121         * have the user defined items as the flow is split.
 122         */
 123        if (layers) {
 124                if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
 125                        attr->ipv4 = 1;
 126                else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
 127                        attr->ipv6 = 1;
 128                if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
 129                        attr->tcp = 1;
 130                else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
 131                        attr->udp = 1;
 132                attr->valid = 1;
 133                return;
 134        }
 135        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 136                uint8_t next_protocol = 0xff;
 137                switch (item->type) {
 138                case RTE_FLOW_ITEM_TYPE_GRE:
 139                case RTE_FLOW_ITEM_TYPE_NVGRE:
 140                case RTE_FLOW_ITEM_TYPE_VXLAN:
 141                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 142                case RTE_FLOW_ITEM_TYPE_GENEVE:
 143                case RTE_FLOW_ITEM_TYPE_MPLS:
 144                case RTE_FLOW_ITEM_TYPE_GTP:
 145                        if (tunnel_decap)
 146                                attr->attr = 0;
 147                        break;
 148                case RTE_FLOW_ITEM_TYPE_IPV4:
 149                        if (!attr->ipv6)
 150                                attr->ipv4 = 1;
 151                        if (item->mask != NULL &&
 152                            ((const struct rte_flow_item_ipv4 *)
 153                            item->mask)->hdr.next_proto_id)
 154                                next_protocol =
 155                                    ((const struct rte_flow_item_ipv4 *)
 156                                      (item->spec))->hdr.next_proto_id &
 157                                    ((const struct rte_flow_item_ipv4 *)
 158                                      (item->mask))->hdr.next_proto_id;
 159                        if ((next_protocol == IPPROTO_IPIP ||
 160                            next_protocol == IPPROTO_IPV6) && tunnel_decap)
 161                                attr->attr = 0;
 162                        break;
 163                case RTE_FLOW_ITEM_TYPE_IPV6:
 164                        if (!attr->ipv4)
 165                                attr->ipv6 = 1;
 166                        if (item->mask != NULL &&
 167                            ((const struct rte_flow_item_ipv6 *)
 168                            item->mask)->hdr.proto)
 169                                next_protocol =
 170                                    ((const struct rte_flow_item_ipv6 *)
 171                                      (item->spec))->hdr.proto &
 172                                    ((const struct rte_flow_item_ipv6 *)
 173                                      (item->mask))->hdr.proto;
 174                        if ((next_protocol == IPPROTO_IPIP ||
 175                            next_protocol == IPPROTO_IPV6) && tunnel_decap)
 176                                attr->attr = 0;
 177                        break;
 178                case RTE_FLOW_ITEM_TYPE_UDP:
 179                        if (!attr->tcp)
 180                                attr->udp = 1;
 181                        break;
 182                case RTE_FLOW_ITEM_TYPE_TCP:
 183                        if (!attr->udp)
 184                                attr->tcp = 1;
 185                        break;
 186                default:
 187                        break;
 188                }
 189        }
 190        attr->valid = 1;
 191}
 192
 193/*
 194 * Convert rte_mtr_color to mlx5 color.
 195 *
 196 * @param[in] rcol
 197 *   rte_mtr_color.
 198 *
 199 * @return
 200 *   mlx5 color.
 201 */
 202static inline int
 203rte_col_2_mlx5_col(enum rte_color rcol)
 204{
 205        switch (rcol) {
 206        case RTE_COLOR_GREEN:
 207                return MLX5_FLOW_COLOR_GREEN;
 208        case RTE_COLOR_YELLOW:
 209                return MLX5_FLOW_COLOR_YELLOW;
 210        case RTE_COLOR_RED:
 211                return MLX5_FLOW_COLOR_RED;
 212        default:
 213                break;
 214        }
 215        return MLX5_FLOW_COLOR_UNDEFINED;
 216}
 217
 218struct field_modify_info {
 219        uint32_t size; /* Size of field in protocol header, in bytes. */
 220        uint32_t offset; /* Offset of field in protocol header, in bytes. */
 221        enum mlx5_modification_field id;
 222};
 223
 224struct field_modify_info modify_eth[] = {
 225        {4,  0, MLX5_MODI_OUT_DMAC_47_16},
 226        {2,  4, MLX5_MODI_OUT_DMAC_15_0},
 227        {4,  6, MLX5_MODI_OUT_SMAC_47_16},
 228        {2, 10, MLX5_MODI_OUT_SMAC_15_0},
 229        {0, 0, 0},
 230};
 231
 232struct field_modify_info modify_vlan_out_first_vid[] = {
 233        /* Size in bits !!! */
 234        {12, 0, MLX5_MODI_OUT_FIRST_VID},
 235        {0, 0, 0},
 236};
 237
 238struct field_modify_info modify_ipv4[] = {
 239        {1,  1, MLX5_MODI_OUT_IP_DSCP},
 240        {1,  8, MLX5_MODI_OUT_IPV4_TTL},
 241        {4, 12, MLX5_MODI_OUT_SIPV4},
 242        {4, 16, MLX5_MODI_OUT_DIPV4},
 243        {0, 0, 0},
 244};
 245
 246struct field_modify_info modify_ipv6[] = {
 247        {1,  0, MLX5_MODI_OUT_IP_DSCP},
 248        {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
 249        {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
 250        {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
 251        {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
 252        {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
 253        {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
 254        {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
 255        {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
 256        {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
 257        {0, 0, 0},
 258};
 259
 260struct field_modify_info modify_udp[] = {
 261        {2, 0, MLX5_MODI_OUT_UDP_SPORT},
 262        {2, 2, MLX5_MODI_OUT_UDP_DPORT},
 263        {0, 0, 0},
 264};
 265
 266struct field_modify_info modify_tcp[] = {
 267        {2, 0, MLX5_MODI_OUT_TCP_SPORT},
 268        {2, 2, MLX5_MODI_OUT_TCP_DPORT},
 269        {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
 270        {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
 271        {0, 0, 0},
 272};
 273
 274static void
 275mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
 276                          uint8_t next_protocol, uint64_t *item_flags,
 277                          int *tunnel)
 278{
 279        MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
 280                    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
 281        if (next_protocol == IPPROTO_IPIP) {
 282                *item_flags |= MLX5_FLOW_LAYER_IPIP;
 283                *tunnel = 1;
 284        }
 285        if (next_protocol == IPPROTO_IPV6) {
 286                *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
 287                *tunnel = 1;
 288        }
 289}
 290
 291static inline struct mlx5_hlist *
 292flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
 293                     const char *name, uint32_t size, bool direct_key,
 294                     bool lcores_share, void *ctx,
 295                     mlx5_list_create_cb cb_create,
 296                     mlx5_list_match_cb cb_match,
 297                     mlx5_list_remove_cb cb_remove,
 298                     mlx5_list_clone_cb cb_clone,
 299                     mlx5_list_clone_free_cb cb_clone_free,
 300                     struct rte_flow_error *error)
 301{
 302        struct mlx5_hlist *hl;
 303        struct mlx5_hlist *expected = NULL;
 304        char s[MLX5_NAME_SIZE];
 305
 306        hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
 307        if (likely(hl))
 308                return hl;
 309        snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
 310        hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
 311                        ctx, cb_create, cb_match, cb_remove, cb_clone,
 312                        cb_clone_free);
 313        if (!hl) {
 314                DRV_LOG(ERR, "%s hash creation failed", name);
 315                rte_flow_error_set(error, ENOMEM,
 316                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 317                                   "cannot allocate resource memory");
 318                return NULL;
 319        }
 320        if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
 321                                         __ATOMIC_SEQ_CST,
 322                                         __ATOMIC_SEQ_CST)) {
 323                mlx5_hlist_destroy(hl);
 324                hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
 325        }
 326        return hl;
 327}
 328
 329/* Update VLAN's VID/PCP based on input rte_flow_action.
 330 *
 331 * @param[in] action
 332 *   Pointer to struct rte_flow_action.
 333 * @param[out] vlan
 334 *   Pointer to struct rte_vlan_hdr.
 335 */
 336static void
 337mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
 338                         struct rte_vlan_hdr *vlan)
 339{
 340        uint16_t vlan_tci;
 341        if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
 342                vlan_tci =
 343                    ((const struct rte_flow_action_of_set_vlan_pcp *)
 344                                               action->conf)->vlan_pcp;
 345                vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
 346                vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
 347                vlan->vlan_tci |= vlan_tci;
 348        } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
 349                vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
 350                vlan->vlan_tci |= rte_be_to_cpu_16
 351                    (((const struct rte_flow_action_of_set_vlan_vid *)
 352                                             action->conf)->vlan_vid);
 353        }
 354}
 355
 356/**
 357 * Fetch 1, 2, 3 or 4 byte field from the byte array
 358 * and return as unsigned integer in host-endian format.
 359 *
 360 * @param[in] data
 361 *   Pointer to data array.
 362 * @param[in] size
 363 *   Size of field to extract.
 364 *
 365 * @return
 366 *   converted field in host endian format.
 367 */
 368static inline uint32_t
 369flow_dv_fetch_field(const uint8_t *data, uint32_t size)
 370{
 371        uint32_t ret;
 372
 373        switch (size) {
 374        case 1:
 375                ret = *data;
 376                break;
 377        case 2:
 378                ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
 379                break;
 380        case 3:
 381                ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
 382                ret = (ret << 8) | *(data + sizeof(uint16_t));
 383                break;
 384        case 4:
 385                ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
 386                break;
 387        default:
 388                MLX5_ASSERT(false);
 389                ret = 0;
 390                break;
 391        }
 392        return ret;
 393}
 394
 395/**
 396 * Convert modify-header action to DV specification.
 397 *
 398 * Data length of each action is determined by provided field description
 399 * and the item mask. Data bit offset and width of each action is determined
 400 * by provided item mask.
 401 *
 402 * @param[in] item
 403 *   Pointer to item specification.
 404 * @param[in] field
 405 *   Pointer to field modification information.
 406 *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
 407 *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
 408 *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
 409 * @param[in] dcopy
 410 *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
 411 *   Negative offset value sets the same offset as source offset.
 412 *   size field is ignored, value is taken from source field.
 413 * @param[in,out] resource
 414 *   Pointer to the modify-header resource.
 415 * @param[in] type
 416 *   Type of modification.
 417 * @param[out] error
 418 *   Pointer to the error structure.
 419 *
 420 * @return
 421 *   0 on success, a negative errno value otherwise and rte_errno is set.
 422 */
 423static int
 424flow_dv_convert_modify_action(struct rte_flow_item *item,
 425                              struct field_modify_info *field,
 426                              struct field_modify_info *dcopy,
 427                              struct mlx5_flow_dv_modify_hdr_resource *resource,
 428                              uint32_t type, struct rte_flow_error *error)
 429{
 430        uint32_t i = resource->actions_num;
 431        struct mlx5_modification_cmd *actions = resource->actions;
 432        uint32_t carry_b = 0;
 433
 434        /*
 435         * The item and mask are provided in big-endian format.
 436         * The fields should be presented as in big-endian format either.
 437         * Mask must be always present, it defines the actual field width.
 438         */
 439        MLX5_ASSERT(item->mask);
 440        MLX5_ASSERT(field->size);
 441        do {
 442                uint32_t size_b;
 443                uint32_t off_b;
 444                uint32_t mask;
 445                uint32_t data;
 446                bool next_field = true;
 447                bool next_dcopy = true;
 448
 449                if (i >= MLX5_MAX_MODIFY_NUM)
 450                        return rte_flow_error_set(error, EINVAL,
 451                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 452                                 "too many items to modify");
 453                /* Fetch variable byte size mask from the array. */
 454                mask = flow_dv_fetch_field((const uint8_t *)item->mask +
 455                                           field->offset, field->size);
 456                if (!mask) {
 457                        ++field;
 458                        continue;
 459                }
 460                /* Deduce actual data width in bits from mask value. */
 461                off_b = rte_bsf32(mask) + carry_b;
 462                size_b = sizeof(uint32_t) * CHAR_BIT -
 463                         off_b - __builtin_clz(mask);
 464                MLX5_ASSERT(size_b);
 465                actions[i] = (struct mlx5_modification_cmd) {
 466                        .action_type = type,
 467                        .field = field->id,
 468                        .offset = off_b,
 469                        .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
 470                                0 : size_b,
 471                };
 472                if (type == MLX5_MODIFICATION_TYPE_COPY) {
 473                        MLX5_ASSERT(dcopy);
 474                        actions[i].dst_field = dcopy->id;
 475                        actions[i].dst_offset =
 476                                (int)dcopy->offset < 0 ? off_b : dcopy->offset;
 477                        /* Convert entire record to big-endian format. */
 478                        actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
 479                        /*
 480                         * Destination field overflow. Copy leftovers of
 481                         * a source field to the next destination field.
 482                         */
 483                        carry_b = 0;
 484                        if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
 485                            dcopy->size != 0) {
 486                                actions[i].length =
 487                                        dcopy->size * CHAR_BIT - dcopy->offset;
 488                                carry_b = actions[i].length;
 489                                next_field = false;
 490                        }
 491                        /*
 492                         * Not enough bits in a source filed to fill a
 493                         * destination field. Switch to the next source.
 494                         */
 495                        if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
 496                            (size_b == field->size * CHAR_BIT - off_b)) {
 497                                actions[i].length =
 498                                        field->size * CHAR_BIT - off_b;
 499                                dcopy->offset += actions[i].length;
 500                                next_dcopy = false;
 501                        }
 502                        if (next_dcopy)
 503                                ++dcopy;
 504                } else {
 505                        MLX5_ASSERT(item->spec);
 506                        data = flow_dv_fetch_field((const uint8_t *)item->spec +
 507                                                   field->offset, field->size);
 508                        /* Shift out the trailing masked bits from data. */
 509                        data = (data & mask) >> off_b;
 510                        actions[i].data1 = rte_cpu_to_be_32(data);
 511                }
 512                /* Convert entire record to expected big-endian format. */
 513                actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
 514                if (next_field)
 515                        ++field;
 516                ++i;
 517        } while (field->size);
 518        if (resource->actions_num == i)
 519                return rte_flow_error_set(error, EINVAL,
 520                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 521                                          "invalid modification flow item");
 522        resource->actions_num = i;
 523        return 0;
 524}
 525
 526/**
 527 * Convert modify-header set IPv4 address action to DV specification.
 528 *
 529 * @param[in,out] resource
 530 *   Pointer to the modify-header resource.
 531 * @param[in] action
 532 *   Pointer to action specification.
 533 * @param[out] error
 534 *   Pointer to the error structure.
 535 *
 536 * @return
 537 *   0 on success, a negative errno value otherwise and rte_errno is set.
 538 */
 539static int
 540flow_dv_convert_action_modify_ipv4
 541                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 542                         const struct rte_flow_action *action,
 543                         struct rte_flow_error *error)
 544{
 545        const struct rte_flow_action_set_ipv4 *conf =
 546                (const struct rte_flow_action_set_ipv4 *)(action->conf);
 547        struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
 548        struct rte_flow_item_ipv4 ipv4;
 549        struct rte_flow_item_ipv4 ipv4_mask;
 550
 551        memset(&ipv4, 0, sizeof(ipv4));
 552        memset(&ipv4_mask, 0, sizeof(ipv4_mask));
 553        if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
 554                ipv4.hdr.src_addr = conf->ipv4_addr;
 555                ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
 556        } else {
 557                ipv4.hdr.dst_addr = conf->ipv4_addr;
 558                ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
 559        }
 560        item.spec = &ipv4;
 561        item.mask = &ipv4_mask;
 562        return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
 563                                             MLX5_MODIFICATION_TYPE_SET, error);
 564}
 565
 566/**
 567 * Convert modify-header set IPv6 address action to DV specification.
 568 *
 569 * @param[in,out] resource
 570 *   Pointer to the modify-header resource.
 571 * @param[in] action
 572 *   Pointer to action specification.
 573 * @param[out] error
 574 *   Pointer to the error structure.
 575 *
 576 * @return
 577 *   0 on success, a negative errno value otherwise and rte_errno is set.
 578 */
 579static int
 580flow_dv_convert_action_modify_ipv6
 581                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 582                         const struct rte_flow_action *action,
 583                         struct rte_flow_error *error)
 584{
 585        const struct rte_flow_action_set_ipv6 *conf =
 586                (const struct rte_flow_action_set_ipv6 *)(action->conf);
 587        struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
 588        struct rte_flow_item_ipv6 ipv6;
 589        struct rte_flow_item_ipv6 ipv6_mask;
 590
 591        memset(&ipv6, 0, sizeof(ipv6));
 592        memset(&ipv6_mask, 0, sizeof(ipv6_mask));
 593        if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
 594                memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
 595                       sizeof(ipv6.hdr.src_addr));
 596                memcpy(&ipv6_mask.hdr.src_addr,
 597                       &rte_flow_item_ipv6_mask.hdr.src_addr,
 598                       sizeof(ipv6.hdr.src_addr));
 599        } else {
 600                memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
 601                       sizeof(ipv6.hdr.dst_addr));
 602                memcpy(&ipv6_mask.hdr.dst_addr,
 603                       &rte_flow_item_ipv6_mask.hdr.dst_addr,
 604                       sizeof(ipv6.hdr.dst_addr));
 605        }
 606        item.spec = &ipv6;
 607        item.mask = &ipv6_mask;
 608        return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
 609                                             MLX5_MODIFICATION_TYPE_SET, error);
 610}
 611
 612/**
 613 * Convert modify-header set MAC address action to DV specification.
 614 *
 615 * @param[in,out] resource
 616 *   Pointer to the modify-header resource.
 617 * @param[in] action
 618 *   Pointer to action specification.
 619 * @param[out] error
 620 *   Pointer to the error structure.
 621 *
 622 * @return
 623 *   0 on success, a negative errno value otherwise and rte_errno is set.
 624 */
 625static int
 626flow_dv_convert_action_modify_mac
 627                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 628                         const struct rte_flow_action *action,
 629                         struct rte_flow_error *error)
 630{
 631        const struct rte_flow_action_set_mac *conf =
 632                (const struct rte_flow_action_set_mac *)(action->conf);
 633        struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
 634        struct rte_flow_item_eth eth;
 635        struct rte_flow_item_eth eth_mask;
 636
 637        memset(&eth, 0, sizeof(eth));
 638        memset(&eth_mask, 0, sizeof(eth_mask));
 639        if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
 640                memcpy(&eth.src.addr_bytes, &conf->mac_addr,
 641                       sizeof(eth.src.addr_bytes));
 642                memcpy(&eth_mask.src.addr_bytes,
 643                       &rte_flow_item_eth_mask.src.addr_bytes,
 644                       sizeof(eth_mask.src.addr_bytes));
 645        } else {
 646                memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
 647                       sizeof(eth.dst.addr_bytes));
 648                memcpy(&eth_mask.dst.addr_bytes,
 649                       &rte_flow_item_eth_mask.dst.addr_bytes,
 650                       sizeof(eth_mask.dst.addr_bytes));
 651        }
 652        item.spec = &eth;
 653        item.mask = &eth_mask;
 654        return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
 655                                             MLX5_MODIFICATION_TYPE_SET, error);
 656}
 657
 658/**
 659 * Convert modify-header set VLAN VID action to DV specification.
 660 *
 661 * @param[in,out] resource
 662 *   Pointer to the modify-header resource.
 663 * @param[in] action
 664 *   Pointer to action specification.
 665 * @param[out] error
 666 *   Pointer to the error structure.
 667 *
 668 * @return
 669 *   0 on success, a negative errno value otherwise and rte_errno is set.
 670 */
 671static int
 672flow_dv_convert_action_modify_vlan_vid
 673                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 674                         const struct rte_flow_action *action,
 675                         struct rte_flow_error *error)
 676{
 677        const struct rte_flow_action_of_set_vlan_vid *conf =
 678                (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
 679        int i = resource->actions_num;
 680        struct mlx5_modification_cmd *actions = resource->actions;
 681        struct field_modify_info *field = modify_vlan_out_first_vid;
 682
 683        if (i >= MLX5_MAX_MODIFY_NUM)
 684                return rte_flow_error_set(error, EINVAL,
 685                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 686                         "too many items to modify");
 687        actions[i] = (struct mlx5_modification_cmd) {
 688                .action_type = MLX5_MODIFICATION_TYPE_SET,
 689                .field = field->id,
 690                .length = field->size,
 691                .offset = field->offset,
 692        };
 693        actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
 694        actions[i].data1 = conf->vlan_vid;
 695        actions[i].data1 = actions[i].data1 << 16;
 696        resource->actions_num = ++i;
 697        return 0;
 698}
 699
 700/**
 701 * Convert modify-header set TP action to DV specification.
 702 *
 703 * @param[in,out] resource
 704 *   Pointer to the modify-header resource.
 705 * @param[in] action
 706 *   Pointer to action specification.
 707 * @param[in] items
 708 *   Pointer to rte_flow_item objects list.
 709 * @param[in] attr
 710 *   Pointer to flow attributes structure.
 711 * @param[in] dev_flow
 712 *   Pointer to the sub flow.
 713 * @param[in] tunnel_decap
 714 *   Whether action is after tunnel decapsulation.
 715 * @param[out] error
 716 *   Pointer to the error structure.
 717 *
 718 * @return
 719 *   0 on success, a negative errno value otherwise and rte_errno is set.
 720 */
 721static int
 722flow_dv_convert_action_modify_tp
 723                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 724                         const struct rte_flow_action *action,
 725                         const struct rte_flow_item *items,
 726                         union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
 727                         bool tunnel_decap, struct rte_flow_error *error)
 728{
 729        const struct rte_flow_action_set_tp *conf =
 730                (const struct rte_flow_action_set_tp *)(action->conf);
 731        struct rte_flow_item item;
 732        struct rte_flow_item_udp udp;
 733        struct rte_flow_item_udp udp_mask;
 734        struct rte_flow_item_tcp tcp;
 735        struct rte_flow_item_tcp tcp_mask;
 736        struct field_modify_info *field;
 737
 738        if (!attr->valid)
 739                flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
 740        if (attr->udp) {
 741                memset(&udp, 0, sizeof(udp));
 742                memset(&udp_mask, 0, sizeof(udp_mask));
 743                if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
 744                        udp.hdr.src_port = conf->port;
 745                        udp_mask.hdr.src_port =
 746                                        rte_flow_item_udp_mask.hdr.src_port;
 747                } else {
 748                        udp.hdr.dst_port = conf->port;
 749                        udp_mask.hdr.dst_port =
 750                                        rte_flow_item_udp_mask.hdr.dst_port;
 751                }
 752                item.type = RTE_FLOW_ITEM_TYPE_UDP;
 753                item.spec = &udp;
 754                item.mask = &udp_mask;
 755                field = modify_udp;
 756        } else {
 757                MLX5_ASSERT(attr->tcp);
 758                memset(&tcp, 0, sizeof(tcp));
 759                memset(&tcp_mask, 0, sizeof(tcp_mask));
 760                if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
 761                        tcp.hdr.src_port = conf->port;
 762                        tcp_mask.hdr.src_port =
 763                                        rte_flow_item_tcp_mask.hdr.src_port;
 764                } else {
 765                        tcp.hdr.dst_port = conf->port;
 766                        tcp_mask.hdr.dst_port =
 767                                        rte_flow_item_tcp_mask.hdr.dst_port;
 768                }
 769                item.type = RTE_FLOW_ITEM_TYPE_TCP;
 770                item.spec = &tcp;
 771                item.mask = &tcp_mask;
 772                field = modify_tcp;
 773        }
 774        return flow_dv_convert_modify_action(&item, field, NULL, resource,
 775                                             MLX5_MODIFICATION_TYPE_SET, error);
 776}
 777
 778/**
 779 * Convert modify-header set TTL action to DV specification.
 780 *
 781 * @param[in,out] resource
 782 *   Pointer to the modify-header resource.
 783 * @param[in] action
 784 *   Pointer to action specification.
 785 * @param[in] items
 786 *   Pointer to rte_flow_item objects list.
 787 * @param[in] attr
 788 *   Pointer to flow attributes structure.
 789 * @param[in] dev_flow
 790 *   Pointer to the sub flow.
 791 * @param[in] tunnel_decap
 792 *   Whether action is after tunnel decapsulation.
 793 * @param[out] error
 794 *   Pointer to the error structure.
 795 *
 796 * @return
 797 *   0 on success, a negative errno value otherwise and rte_errno is set.
 798 */
 799static int
 800flow_dv_convert_action_modify_ttl
 801                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 802                         const struct rte_flow_action *action,
 803                         const struct rte_flow_item *items,
 804                         union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
 805                         bool tunnel_decap, struct rte_flow_error *error)
 806{
 807        const struct rte_flow_action_set_ttl *conf =
 808                (const struct rte_flow_action_set_ttl *)(action->conf);
 809        struct rte_flow_item item;
 810        struct rte_flow_item_ipv4 ipv4;
 811        struct rte_flow_item_ipv4 ipv4_mask;
 812        struct rte_flow_item_ipv6 ipv6;
 813        struct rte_flow_item_ipv6 ipv6_mask;
 814        struct field_modify_info *field;
 815
 816        if (!attr->valid)
 817                flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
 818        if (attr->ipv4) {
 819                memset(&ipv4, 0, sizeof(ipv4));
 820                memset(&ipv4_mask, 0, sizeof(ipv4_mask));
 821                ipv4.hdr.time_to_live = conf->ttl_value;
 822                ipv4_mask.hdr.time_to_live = 0xFF;
 823                item.type = RTE_FLOW_ITEM_TYPE_IPV4;
 824                item.spec = &ipv4;
 825                item.mask = &ipv4_mask;
 826                field = modify_ipv4;
 827        } else {
 828                MLX5_ASSERT(attr->ipv6);
 829                memset(&ipv6, 0, sizeof(ipv6));
 830                memset(&ipv6_mask, 0, sizeof(ipv6_mask));
 831                ipv6.hdr.hop_limits = conf->ttl_value;
 832                ipv6_mask.hdr.hop_limits = 0xFF;
 833                item.type = RTE_FLOW_ITEM_TYPE_IPV6;
 834                item.spec = &ipv6;
 835                item.mask = &ipv6_mask;
 836                field = modify_ipv6;
 837        }
 838        return flow_dv_convert_modify_action(&item, field, NULL, resource,
 839                                             MLX5_MODIFICATION_TYPE_SET, error);
 840}
 841
 842/**
 843 * Convert modify-header decrement TTL action to DV specification.
 844 *
 845 * @param[in,out] resource
 846 *   Pointer to the modify-header resource.
 847 * @param[in] action
 848 *   Pointer to action specification.
 849 * @param[in] items
 850 *   Pointer to rte_flow_item objects list.
 851 * @param[in] attr
 852 *   Pointer to flow attributes structure.
 853 * @param[in] dev_flow
 854 *   Pointer to the sub flow.
 855 * @param[in] tunnel_decap
 856 *   Whether action is after tunnel decapsulation.
 857 * @param[out] error
 858 *   Pointer to the error structure.
 859 *
 860 * @return
 861 *   0 on success, a negative errno value otherwise and rte_errno is set.
 862 */
 863static int
 864flow_dv_convert_action_modify_dec_ttl
 865                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 866                         const struct rte_flow_item *items,
 867                         union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
 868                         bool tunnel_decap, struct rte_flow_error *error)
 869{
 870        struct rte_flow_item item;
 871        struct rte_flow_item_ipv4 ipv4;
 872        struct rte_flow_item_ipv4 ipv4_mask;
 873        struct rte_flow_item_ipv6 ipv6;
 874        struct rte_flow_item_ipv6 ipv6_mask;
 875        struct field_modify_info *field;
 876
 877        if (!attr->valid)
 878                flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
 879        if (attr->ipv4) {
 880                memset(&ipv4, 0, sizeof(ipv4));
 881                memset(&ipv4_mask, 0, sizeof(ipv4_mask));
 882                ipv4.hdr.time_to_live = 0xFF;
 883                ipv4_mask.hdr.time_to_live = 0xFF;
 884                item.type = RTE_FLOW_ITEM_TYPE_IPV4;
 885                item.spec = &ipv4;
 886                item.mask = &ipv4_mask;
 887                field = modify_ipv4;
 888        } else {
 889                MLX5_ASSERT(attr->ipv6);
 890                memset(&ipv6, 0, sizeof(ipv6));
 891                memset(&ipv6_mask, 0, sizeof(ipv6_mask));
 892                ipv6.hdr.hop_limits = 0xFF;
 893                ipv6_mask.hdr.hop_limits = 0xFF;
 894                item.type = RTE_FLOW_ITEM_TYPE_IPV6;
 895                item.spec = &ipv6;
 896                item.mask = &ipv6_mask;
 897                field = modify_ipv6;
 898        }
 899        return flow_dv_convert_modify_action(&item, field, NULL, resource,
 900                                             MLX5_MODIFICATION_TYPE_ADD, error);
 901}
 902
 903/**
 904 * Convert modify-header increment/decrement TCP Sequence number
 905 * to DV specification.
 906 *
 907 * @param[in,out] resource
 908 *   Pointer to the modify-header resource.
 909 * @param[in] action
 910 *   Pointer to action specification.
 911 * @param[out] error
 912 *   Pointer to the error structure.
 913 *
 914 * @return
 915 *   0 on success, a negative errno value otherwise and rte_errno is set.
 916 */
 917static int
 918flow_dv_convert_action_modify_tcp_seq
 919                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 920                         const struct rte_flow_action *action,
 921                         struct rte_flow_error *error)
 922{
 923        const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
 924        uint64_t value = rte_be_to_cpu_32(*conf);
 925        struct rte_flow_item item;
 926        struct rte_flow_item_tcp tcp;
 927        struct rte_flow_item_tcp tcp_mask;
 928
 929        memset(&tcp, 0, sizeof(tcp));
 930        memset(&tcp_mask, 0, sizeof(tcp_mask));
 931        if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
 932                /*
 933                 * The HW has no decrement operation, only increment operation.
 934                 * To simulate decrement X from Y using increment operation
 935                 * we need to add UINT32_MAX X times to Y.
 936                 * Each adding of UINT32_MAX decrements Y by 1.
 937                 */
 938                value *= UINT32_MAX;
 939        tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
 940        tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
 941        item.type = RTE_FLOW_ITEM_TYPE_TCP;
 942        item.spec = &tcp;
 943        item.mask = &tcp_mask;
 944        return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
 945                                             MLX5_MODIFICATION_TYPE_ADD, error);
 946}
 947
 948/**
 949 * Convert modify-header increment/decrement TCP Acknowledgment number
 950 * to DV specification.
 951 *
 952 * @param[in,out] resource
 953 *   Pointer to the modify-header resource.
 954 * @param[in] action
 955 *   Pointer to action specification.
 956 * @param[out] error
 957 *   Pointer to the error structure.
 958 *
 959 * @return
 960 *   0 on success, a negative errno value otherwise and rte_errno is set.
 961 */
 962static int
 963flow_dv_convert_action_modify_tcp_ack
 964                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
 965                         const struct rte_flow_action *action,
 966                         struct rte_flow_error *error)
 967{
 968        const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
 969        uint64_t value = rte_be_to_cpu_32(*conf);
 970        struct rte_flow_item item;
 971        struct rte_flow_item_tcp tcp;
 972        struct rte_flow_item_tcp tcp_mask;
 973
 974        memset(&tcp, 0, sizeof(tcp));
 975        memset(&tcp_mask, 0, sizeof(tcp_mask));
 976        if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
 977                /*
 978                 * The HW has no decrement operation, only increment operation.
 979                 * To simulate decrement X from Y using increment operation
 980                 * we need to add UINT32_MAX X times to Y.
 981                 * Each adding of UINT32_MAX decrements Y by 1.
 982                 */
 983                value *= UINT32_MAX;
 984        tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
 985        tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
 986        item.type = RTE_FLOW_ITEM_TYPE_TCP;
 987        item.spec = &tcp;
 988        item.mask = &tcp_mask;
 989        return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
 990                                             MLX5_MODIFICATION_TYPE_ADD, error);
 991}
 992
 993static enum mlx5_modification_field reg_to_field[] = {
 994        [REG_NON] = MLX5_MODI_OUT_NONE,
 995        [REG_A] = MLX5_MODI_META_DATA_REG_A,
 996        [REG_B] = MLX5_MODI_META_DATA_REG_B,
 997        [REG_C_0] = MLX5_MODI_META_REG_C_0,
 998        [REG_C_1] = MLX5_MODI_META_REG_C_1,
 999        [REG_C_2] = MLX5_MODI_META_REG_C_2,
1000        [REG_C_3] = MLX5_MODI_META_REG_C_3,
1001        [REG_C_4] = MLX5_MODI_META_REG_C_4,
1002        [REG_C_5] = MLX5_MODI_META_REG_C_5,
1003        [REG_C_6] = MLX5_MODI_META_REG_C_6,
1004        [REG_C_7] = MLX5_MODI_META_REG_C_7,
1005};
1006
1007/**
1008 * Convert register set to DV specification.
1009 *
1010 * @param[in,out] resource
1011 *   Pointer to the modify-header resource.
1012 * @param[in] action
1013 *   Pointer to action specification.
1014 * @param[out] error
1015 *   Pointer to the error structure.
1016 *
1017 * @return
1018 *   0 on success, a negative errno value otherwise and rte_errno is set.
1019 */
1020static int
1021flow_dv_convert_action_set_reg
1022                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
1023                         const struct rte_flow_action *action,
1024                         struct rte_flow_error *error)
1025{
1026        const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1027        struct mlx5_modification_cmd *actions = resource->actions;
1028        uint32_t i = resource->actions_num;
1029
1030        if (i >= MLX5_MAX_MODIFY_NUM)
1031                return rte_flow_error_set(error, EINVAL,
1032                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1033                                          "too many items to modify");
1034        MLX5_ASSERT(conf->id != REG_NON);
1035        MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1036        actions[i] = (struct mlx5_modification_cmd) {
1037                .action_type = MLX5_MODIFICATION_TYPE_SET,
1038                .field = reg_to_field[conf->id],
1039                .offset = conf->offset,
1040                .length = conf->length,
1041        };
1042        actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1043        actions[i].data1 = rte_cpu_to_be_32(conf->data);
1044        ++i;
1045        resource->actions_num = i;
1046        return 0;
1047}
1048
1049/**
1050 * Convert SET_TAG action to DV specification.
1051 *
1052 * @param[in] dev
1053 *   Pointer to the rte_eth_dev structure.
1054 * @param[in,out] resource
1055 *   Pointer to the modify-header resource.
1056 * @param[in] conf
1057 *   Pointer to action specification.
1058 * @param[out] error
1059 *   Pointer to the error structure.
1060 *
1061 * @return
1062 *   0 on success, a negative errno value otherwise and rte_errno is set.
1063 */
1064static int
1065flow_dv_convert_action_set_tag
1066                        (struct rte_eth_dev *dev,
1067                         struct mlx5_flow_dv_modify_hdr_resource *resource,
1068                         const struct rte_flow_action_set_tag *conf,
1069                         struct rte_flow_error *error)
1070{
1071        rte_be32_t data = rte_cpu_to_be_32(conf->data);
1072        rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1073        struct rte_flow_item item = {
1074                .spec = &data,
1075                .mask = &mask,
1076        };
1077        struct field_modify_info reg_c_x[] = {
1078                [1] = {0, 0, 0},
1079        };
1080        enum mlx5_modification_field reg_type;
1081        int ret;
1082
1083        ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1084        if (ret < 0)
1085                return ret;
1086        MLX5_ASSERT(ret != REG_NON);
1087        MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1088        reg_type = reg_to_field[ret];
1089        MLX5_ASSERT(reg_type > 0);
1090        reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1091        return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1092                                             MLX5_MODIFICATION_TYPE_SET, error);
1093}
1094
1095/**
1096 * Convert internal COPY_REG action to DV specification.
1097 *
1098 * @param[in] dev
1099 *   Pointer to the rte_eth_dev structure.
1100 * @param[in,out] res
1101 *   Pointer to the modify-header resource.
1102 * @param[in] action
1103 *   Pointer to action specification.
1104 * @param[out] error
1105 *   Pointer to the error structure.
1106 *
1107 * @return
1108 *   0 on success, a negative errno value otherwise and rte_errno is set.
1109 */
1110static int
1111flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1112                                 struct mlx5_flow_dv_modify_hdr_resource *res,
1113                                 const struct rte_flow_action *action,
1114                                 struct rte_flow_error *error)
1115{
1116        const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1117        rte_be32_t mask = RTE_BE32(UINT32_MAX);
1118        struct rte_flow_item item = {
1119                .spec = NULL,
1120                .mask = &mask,
1121        };
1122        struct field_modify_info reg_src[] = {
1123                {4, 0, reg_to_field[conf->src]},
1124                {0, 0, 0},
1125        };
1126        struct field_modify_info reg_dst = {
1127                .offset = 0,
1128                .id = reg_to_field[conf->dst],
1129        };
1130        /* Adjust reg_c[0] usage according to reported mask. */
1131        if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1132                struct mlx5_priv *priv = dev->data->dev_private;
1133                uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1134
1135                MLX5_ASSERT(reg_c0);
1136                MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
1137                            MLX5_XMETA_MODE_LEGACY);
1138                if (conf->dst == REG_C_0) {
1139                        /* Copy to reg_c[0], within mask only. */
1140                        reg_dst.offset = rte_bsf32(reg_c0);
1141                        mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1142                } else {
1143                        reg_dst.offset = 0;
1144                        mask = rte_cpu_to_be_32(reg_c0);
1145                }
1146        }
1147        return flow_dv_convert_modify_action(&item,
1148                                             reg_src, &reg_dst, res,
1149                                             MLX5_MODIFICATION_TYPE_COPY,
1150                                             error);
1151}
1152
1153/**
1154 * Convert MARK action to DV specification. This routine is used
1155 * in extensive metadata only and requires metadata register to be
1156 * handled. In legacy mode hardware tag resource is engaged.
1157 *
1158 * @param[in] dev
1159 *   Pointer to the rte_eth_dev structure.
1160 * @param[in] conf
1161 *   Pointer to MARK action specification.
1162 * @param[in,out] resource
1163 *   Pointer to the modify-header resource.
1164 * @param[out] error
1165 *   Pointer to the error structure.
1166 *
1167 * @return
1168 *   0 on success, a negative errno value otherwise and rte_errno is set.
1169 */
1170static int
1171flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1172                            const struct rte_flow_action_mark *conf,
1173                            struct mlx5_flow_dv_modify_hdr_resource *resource,
1174                            struct rte_flow_error *error)
1175{
1176        struct mlx5_priv *priv = dev->data->dev_private;
1177        rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1178                                           priv->sh->dv_mark_mask);
1179        rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1180        struct rte_flow_item item = {
1181                .spec = &data,
1182                .mask = &mask,
1183        };
1184        struct field_modify_info reg_c_x[] = {
1185                [1] = {0, 0, 0},
1186        };
1187        int reg;
1188
1189        if (!mask)
1190                return rte_flow_error_set(error, EINVAL,
1191                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1192                                          NULL, "zero mark action mask");
1193        reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1194        if (reg < 0)
1195                return reg;
1196        MLX5_ASSERT(reg > 0);
1197        if (reg == REG_C_0) {
1198                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1199                uint32_t shl_c0 = rte_bsf32(msk_c0);
1200
1201                data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1202                mask = rte_cpu_to_be_32(mask) & msk_c0;
1203                mask = rte_cpu_to_be_32(mask << shl_c0);
1204        }
1205        reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1206        return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1207                                             MLX5_MODIFICATION_TYPE_SET, error);
1208}
1209
1210/**
1211 * Get metadata register index for specified steering domain.
1212 *
1213 * @param[in] dev
1214 *   Pointer to the rte_eth_dev structure.
1215 * @param[in] attr
1216 *   Attributes of flow to determine steering domain.
1217 * @param[out] error
1218 *   Pointer to the error structure.
1219 *
1220 * @return
1221 *   positive index on success, a negative errno value otherwise
1222 *   and rte_errno is set.
1223 */
1224static enum modify_reg
1225flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1226                         const struct rte_flow_attr *attr,
1227                         struct rte_flow_error *error)
1228{
1229        int reg =
1230                mlx5_flow_get_reg_id(dev, attr->transfer ?
1231                                          MLX5_METADATA_FDB :
1232                                            attr->egress ?
1233                                            MLX5_METADATA_TX :
1234                                            MLX5_METADATA_RX, 0, error);
1235        if (reg < 0)
1236                return rte_flow_error_set(error,
1237                                          ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1238                                          NULL, "unavailable "
1239                                          "metadata register");
1240        return reg;
1241}
1242
1243/**
1244 * Convert SET_META action to DV specification.
1245 *
1246 * @param[in] dev
1247 *   Pointer to the rte_eth_dev structure.
1248 * @param[in,out] resource
1249 *   Pointer to the modify-header resource.
1250 * @param[in] attr
1251 *   Attributes of flow that includes this item.
1252 * @param[in] conf
1253 *   Pointer to action specification.
1254 * @param[out] error
1255 *   Pointer to the error structure.
1256 *
1257 * @return
1258 *   0 on success, a negative errno value otherwise and rte_errno is set.
1259 */
1260static int
1261flow_dv_convert_action_set_meta
1262                        (struct rte_eth_dev *dev,
1263                         struct mlx5_flow_dv_modify_hdr_resource *resource,
1264                         const struct rte_flow_attr *attr,
1265                         const struct rte_flow_action_set_meta *conf,
1266                         struct rte_flow_error *error)
1267{
1268        uint32_t mask = rte_cpu_to_be_32(conf->mask);
1269        uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1270        struct rte_flow_item item = {
1271                .spec = &data,
1272                .mask = &mask,
1273        };
1274        struct field_modify_info reg_c_x[] = {
1275                [1] = {0, 0, 0},
1276        };
1277        int reg = flow_dv_get_metadata_reg(dev, attr, error);
1278
1279        if (reg < 0)
1280                return reg;
1281        MLX5_ASSERT(reg != REG_NON);
1282        if (reg == REG_C_0) {
1283                struct mlx5_priv *priv = dev->data->dev_private;
1284                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1285                uint32_t shl_c0 = rte_bsf32(msk_c0);
1286
1287                data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1288                mask = rte_cpu_to_be_32(mask) & msk_c0;
1289                mask = rte_cpu_to_be_32(mask << shl_c0);
1290        }
1291        reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1292        /* The routine expects parameters in memory as big-endian ones. */
1293        return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1294                                             MLX5_MODIFICATION_TYPE_SET, error);
1295}
1296
1297/**
1298 * Convert modify-header set IPv4 DSCP action to DV specification.
1299 *
1300 * @param[in,out] resource
1301 *   Pointer to the modify-header resource.
1302 * @param[in] action
1303 *   Pointer to action specification.
1304 * @param[out] error
1305 *   Pointer to the error structure.
1306 *
1307 * @return
1308 *   0 on success, a negative errno value otherwise and rte_errno is set.
1309 */
1310static int
1311flow_dv_convert_action_modify_ipv4_dscp
1312                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
1313                         const struct rte_flow_action *action,
1314                         struct rte_flow_error *error)
1315{
1316        const struct rte_flow_action_set_dscp *conf =
1317                (const struct rte_flow_action_set_dscp *)(action->conf);
1318        struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1319        struct rte_flow_item_ipv4 ipv4;
1320        struct rte_flow_item_ipv4 ipv4_mask;
1321
1322        memset(&ipv4, 0, sizeof(ipv4));
1323        memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1324        ipv4.hdr.type_of_service = conf->dscp;
1325        ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1326        item.spec = &ipv4;
1327        item.mask = &ipv4_mask;
1328        return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1329                                             MLX5_MODIFICATION_TYPE_SET, error);
1330}
1331
1332/**
1333 * Convert modify-header set IPv6 DSCP action to DV specification.
1334 *
1335 * @param[in,out] resource
1336 *   Pointer to the modify-header resource.
1337 * @param[in] action
1338 *   Pointer to action specification.
1339 * @param[out] error
1340 *   Pointer to the error structure.
1341 *
1342 * @return
1343 *   0 on success, a negative errno value otherwise and rte_errno is set.
1344 */
1345static int
1346flow_dv_convert_action_modify_ipv6_dscp
1347                        (struct mlx5_flow_dv_modify_hdr_resource *resource,
1348                         const struct rte_flow_action *action,
1349                         struct rte_flow_error *error)
1350{
1351        const struct rte_flow_action_set_dscp *conf =
1352                (const struct rte_flow_action_set_dscp *)(action->conf);
1353        struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1354        struct rte_flow_item_ipv6 ipv6;
1355        struct rte_flow_item_ipv6 ipv6_mask;
1356
1357        memset(&ipv6, 0, sizeof(ipv6));
1358        memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1359        /*
1360         * Even though the DSCP bits offset of IPv6 is not byte aligned,
1361         * rdma-core only accept the DSCP bits byte aligned start from
1362         * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1363         * bits in IPv6 case as rdma-core requires byte aligned value.
1364         */
1365        ipv6.hdr.vtc_flow = conf->dscp;
1366        ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1367        item.spec = &ipv6;
1368        item.mask = &ipv6_mask;
1369        return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1370                                             MLX5_MODIFICATION_TYPE_SET, error);
1371}
1372
1373static int
1374mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1375                           enum rte_flow_field_id field, int inherit,
1376                           const struct rte_flow_attr *attr,
1377                           struct rte_flow_error *error)
1378{
1379        struct mlx5_priv *priv = dev->data->dev_private;
1380
1381        switch (field) {
1382        case RTE_FLOW_FIELD_START:
1383                return 32;
1384        case RTE_FLOW_FIELD_MAC_DST:
1385        case RTE_FLOW_FIELD_MAC_SRC:
1386                return 48;
1387        case RTE_FLOW_FIELD_VLAN_TYPE:
1388                return 16;
1389        case RTE_FLOW_FIELD_VLAN_ID:
1390                return 12;
1391        case RTE_FLOW_FIELD_MAC_TYPE:
1392                return 16;
1393        case RTE_FLOW_FIELD_IPV4_DSCP:
1394                return 6;
1395        case RTE_FLOW_FIELD_IPV4_TTL:
1396                return 8;
1397        case RTE_FLOW_FIELD_IPV4_SRC:
1398        case RTE_FLOW_FIELD_IPV4_DST:
1399                return 32;
1400        case RTE_FLOW_FIELD_IPV6_DSCP:
1401                return 6;
1402        case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1403                return 8;
1404        case RTE_FLOW_FIELD_IPV6_SRC:
1405        case RTE_FLOW_FIELD_IPV6_DST:
1406                return 128;
1407        case RTE_FLOW_FIELD_TCP_PORT_SRC:
1408        case RTE_FLOW_FIELD_TCP_PORT_DST:
1409                return 16;
1410        case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1411        case RTE_FLOW_FIELD_TCP_ACK_NUM:
1412                return 32;
1413        case RTE_FLOW_FIELD_TCP_FLAGS:
1414                return 9;
1415        case RTE_FLOW_FIELD_UDP_PORT_SRC:
1416        case RTE_FLOW_FIELD_UDP_PORT_DST:
1417                return 16;
1418        case RTE_FLOW_FIELD_VXLAN_VNI:
1419        case RTE_FLOW_FIELD_GENEVE_VNI:
1420                return 24;
1421        case RTE_FLOW_FIELD_GTP_TEID:
1422        case RTE_FLOW_FIELD_TAG:
1423                return 32;
1424        case RTE_FLOW_FIELD_MARK:
1425                return __builtin_popcount(priv->sh->dv_mark_mask);
1426        case RTE_FLOW_FIELD_META:
1427                return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1428                        __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1429        case RTE_FLOW_FIELD_POINTER:
1430        case RTE_FLOW_FIELD_VALUE:
1431                return inherit < 0 ? 0 : inherit;
1432        case RTE_FLOW_FIELD_IPV4_ECN:
1433        case RTE_FLOW_FIELD_IPV6_ECN:
1434                return 2;
1435        default:
1436                MLX5_ASSERT(false);
1437        }
1438        return 0;
1439}
1440
1441static void
1442mlx5_flow_field_id_to_modify_info
1443                (const struct rte_flow_action_modify_data *data,
1444                 struct field_modify_info *info, uint32_t *mask,
1445                 uint32_t width, struct rte_eth_dev *dev,
1446                 const struct rte_flow_attr *attr, struct rte_flow_error *error)
1447{
1448        struct mlx5_priv *priv = dev->data->dev_private;
1449        uint32_t idx = 0;
1450        uint32_t off = 0;
1451
1452        switch (data->field) {
1453        case RTE_FLOW_FIELD_START:
1454                /* not supported yet */
1455                MLX5_ASSERT(false);
1456                break;
1457        case RTE_FLOW_FIELD_MAC_DST:
1458                off = data->offset > 16 ? data->offset - 16 : 0;
1459                if (mask) {
1460                        if (data->offset < 16) {
1461                                info[idx] = (struct field_modify_info){2, 4,
1462                                                MLX5_MODI_OUT_DMAC_15_0};
1463                                if (width < 16) {
1464                                        mask[1] = rte_cpu_to_be_16(0xffff >>
1465                                                                 (16 - width));
1466                                        width = 0;
1467                                } else {
1468                                        mask[1] = RTE_BE16(0xffff);
1469                                        width -= 16;
1470                                }
1471                                if (!width)
1472                                        break;
1473                                ++idx;
1474                        }
1475                        info[idx] = (struct field_modify_info){4, 0,
1476                                                MLX5_MODI_OUT_DMAC_47_16};
1477                        mask[0] = rte_cpu_to_be_32((0xffffffff >>
1478                                                    (32 - width)) << off);
1479                } else {
1480                        if (data->offset < 16)
1481                                info[idx++] = (struct field_modify_info){2, 0,
1482                                                MLX5_MODI_OUT_DMAC_15_0};
1483                        info[idx] = (struct field_modify_info){4, off,
1484                                                MLX5_MODI_OUT_DMAC_47_16};
1485                }
1486                break;
1487        case RTE_FLOW_FIELD_MAC_SRC:
1488                off = data->offset > 16 ? data->offset - 16 : 0;
1489                if (mask) {
1490                        if (data->offset < 16) {
1491                                info[idx] = (struct field_modify_info){2, 4,
1492                                                MLX5_MODI_OUT_SMAC_15_0};
1493                                if (width < 16) {
1494                                        mask[1] = rte_cpu_to_be_16(0xffff >>
1495                                                                 (16 - width));
1496                                        width = 0;
1497                                } else {
1498                                        mask[1] = RTE_BE16(0xffff);
1499                                        width -= 16;
1500                                }
1501                                if (!width)
1502                                        break;
1503                                ++idx;
1504                        }
1505                        info[idx] = (struct field_modify_info){4, 0,
1506                                                MLX5_MODI_OUT_SMAC_47_16};
1507                        mask[0] = rte_cpu_to_be_32((0xffffffff >>
1508                                                    (32 - width)) << off);
1509                } else {
1510                        if (data->offset < 16)
1511                                info[idx++] = (struct field_modify_info){2, 0,
1512                                                MLX5_MODI_OUT_SMAC_15_0};
1513                        info[idx] = (struct field_modify_info){4, off,
1514                                                MLX5_MODI_OUT_SMAC_47_16};
1515                }
1516                break;
1517        case RTE_FLOW_FIELD_VLAN_TYPE:
1518                /* not supported yet */
1519                break;
1520        case RTE_FLOW_FIELD_VLAN_ID:
1521                info[idx] = (struct field_modify_info){2, 0,
1522                                        MLX5_MODI_OUT_FIRST_VID};
1523                if (mask)
1524                        mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1525                break;
1526        case RTE_FLOW_FIELD_MAC_TYPE:
1527                info[idx] = (struct field_modify_info){2, 0,
1528                                        MLX5_MODI_OUT_ETHERTYPE};
1529                if (mask)
1530                        mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1531                break;
1532        case RTE_FLOW_FIELD_IPV4_DSCP:
1533                info[idx] = (struct field_modify_info){1, 0,
1534                                        MLX5_MODI_OUT_IP_DSCP};
1535                if (mask)
1536                        mask[idx] = 0x3f >> (6 - width);
1537                break;
1538        case RTE_FLOW_FIELD_IPV4_TTL:
1539                info[idx] = (struct field_modify_info){1, 0,
1540                                        MLX5_MODI_OUT_IPV4_TTL};
1541                if (mask)
1542                        mask[idx] = 0xff >> (8 - width);
1543                break;
1544        case RTE_FLOW_FIELD_IPV4_SRC:
1545                info[idx] = (struct field_modify_info){4, 0,
1546                                        MLX5_MODI_OUT_SIPV4};
1547                if (mask)
1548                        mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1549                                                     (32 - width));
1550                break;
1551        case RTE_FLOW_FIELD_IPV4_DST:
1552                info[idx] = (struct field_modify_info){4, 0,
1553                                        MLX5_MODI_OUT_DIPV4};
1554                if (mask)
1555                        mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1556                                                     (32 - width));
1557                break;
1558        case RTE_FLOW_FIELD_IPV6_DSCP:
1559                info[idx] = (struct field_modify_info){1, 0,
1560                                        MLX5_MODI_OUT_IP_DSCP};
1561                if (mask)
1562                        mask[idx] = 0x3f >> (6 - width);
1563                break;
1564        case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1565                info[idx] = (struct field_modify_info){1, 0,
1566                                        MLX5_MODI_OUT_IPV6_HOPLIMIT};
1567                if (mask)
1568                        mask[idx] = 0xff >> (8 - width);
1569                break;
1570        case RTE_FLOW_FIELD_IPV6_SRC:
1571                if (mask) {
1572                        if (data->offset < 32) {
1573                                info[idx] = (struct field_modify_info){4, 12,
1574                                                MLX5_MODI_OUT_SIPV6_31_0};
1575                                if (width < 32) {
1576                                        mask[3] =
1577                                                rte_cpu_to_be_32(0xffffffff >>
1578                                                                 (32 - width));
1579                                        width = 0;
1580                                } else {
1581                                        mask[3] = RTE_BE32(0xffffffff);
1582                                        width -= 32;
1583                                }
1584                                if (!width)
1585                                        break;
1586                                ++idx;
1587                        }
1588                        if (data->offset < 64) {
1589                                info[idx] = (struct field_modify_info){4, 8,
1590                                                MLX5_MODI_OUT_SIPV6_63_32};
1591                                if (width < 32) {
1592                                        mask[2] =
1593                                                rte_cpu_to_be_32(0xffffffff >>
1594                                                                 (32 - width));
1595                                        width = 0;
1596                                } else {
1597                                        mask[2] = RTE_BE32(0xffffffff);
1598                                        width -= 32;
1599                                }
1600                                if (!width)
1601                                        break;
1602                                ++idx;
1603                        }
1604                        if (data->offset < 96) {
1605                                info[idx] = (struct field_modify_info){4, 4,
1606                                                MLX5_MODI_OUT_SIPV6_95_64};
1607                                if (width < 32) {
1608                                        mask[1] =
1609                                                rte_cpu_to_be_32(0xffffffff >>
1610                                                                 (32 - width));
1611                                        width = 0;
1612                                } else {
1613                                        mask[1] = RTE_BE32(0xffffffff);
1614                                        width -= 32;
1615                                }
1616                                if (!width)
1617                                        break;
1618                                ++idx;
1619                        }
1620                        info[idx] = (struct field_modify_info){4, 0,
1621                                                MLX5_MODI_OUT_SIPV6_127_96};
1622                        mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1623                } else {
1624                        if (data->offset < 32)
1625                                info[idx++] = (struct field_modify_info){4, 0,
1626                                                MLX5_MODI_OUT_SIPV6_31_0};
1627                        if (data->offset < 64)
1628                                info[idx++] = (struct field_modify_info){4, 0,
1629                                                MLX5_MODI_OUT_SIPV6_63_32};
1630                        if (data->offset < 96)
1631                                info[idx++] = (struct field_modify_info){4, 0,
1632                                                MLX5_MODI_OUT_SIPV6_95_64};
1633                        if (data->offset < 128)
1634                                info[idx++] = (struct field_modify_info){4, 0,
1635                                                MLX5_MODI_OUT_SIPV6_127_96};
1636                }
1637                break;
1638        case RTE_FLOW_FIELD_IPV6_DST:
1639                if (mask) {
1640                        if (data->offset < 32) {
1641                                info[idx] = (struct field_modify_info){4, 12,
1642                                                MLX5_MODI_OUT_DIPV6_31_0};
1643                                if (width < 32) {
1644                                        mask[3] =
1645                                                rte_cpu_to_be_32(0xffffffff >>
1646                                                                 (32 - width));
1647                                        width = 0;
1648                                } else {
1649                                        mask[3] = RTE_BE32(0xffffffff);
1650                                        width -= 32;
1651                                }
1652                                if (!width)
1653                                        break;
1654                                ++idx;
1655                        }
1656                        if (data->offset < 64) {
1657                                info[idx] = (struct field_modify_info){4, 8,
1658                                                MLX5_MODI_OUT_DIPV6_63_32};
1659                                if (width < 32) {
1660                                        mask[2] =
1661                                                rte_cpu_to_be_32(0xffffffff >>
1662                                                                 (32 - width));
1663                                        width = 0;
1664                                } else {
1665                                        mask[2] = RTE_BE32(0xffffffff);
1666                                        width -= 32;
1667                                }
1668                                if (!width)
1669                                        break;
1670                                ++idx;
1671                        }
1672                        if (data->offset < 96) {
1673                                info[idx] = (struct field_modify_info){4, 4,
1674                                                MLX5_MODI_OUT_DIPV6_95_64};
1675                                if (width < 32) {
1676                                        mask[1] =
1677                                                rte_cpu_to_be_32(0xffffffff >>
1678                                                                 (32 - width));
1679                                        width = 0;
1680                                } else {
1681                                        mask[1] = RTE_BE32(0xffffffff);
1682                                        width -= 32;
1683                                }
1684                                if (!width)
1685                                        break;
1686                                ++idx;
1687                        }
1688                        info[idx] = (struct field_modify_info){4, 0,
1689                                                MLX5_MODI_OUT_DIPV6_127_96};
1690                        mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1691                } else {
1692                        if (data->offset < 32)
1693                                info[idx++] = (struct field_modify_info){4, 0,
1694                                                MLX5_MODI_OUT_DIPV6_31_0};
1695                        if (data->offset < 64)
1696                                info[idx++] = (struct field_modify_info){4, 0,
1697                                                MLX5_MODI_OUT_DIPV6_63_32};
1698                        if (data->offset < 96)
1699                                info[idx++] = (struct field_modify_info){4, 0,
1700                                                MLX5_MODI_OUT_DIPV6_95_64};
1701                        if (data->offset < 128)
1702                                info[idx++] = (struct field_modify_info){4, 0,
1703                                                MLX5_MODI_OUT_DIPV6_127_96};
1704                }
1705                break;
1706        case RTE_FLOW_FIELD_TCP_PORT_SRC:
1707                info[idx] = (struct field_modify_info){2, 0,
1708                                        MLX5_MODI_OUT_TCP_SPORT};
1709                if (mask)
1710                        mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1711                break;
1712        case RTE_FLOW_FIELD_TCP_PORT_DST:
1713                info[idx] = (struct field_modify_info){2, 0,
1714                                        MLX5_MODI_OUT_TCP_DPORT};
1715                if (mask)
1716                        mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717                break;
1718        case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1719                info[idx] = (struct field_modify_info){4, 0,
1720                                        MLX5_MODI_OUT_TCP_SEQ_NUM};
1721                if (mask)
1722                        mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1723                                                     (32 - width));
1724                break;
1725        case RTE_FLOW_FIELD_TCP_ACK_NUM:
1726                info[idx] = (struct field_modify_info){4, 0,
1727                                        MLX5_MODI_OUT_TCP_ACK_NUM};
1728                if (mask)
1729                        mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1730                                                     (32 - width));
1731                break;
1732        case RTE_FLOW_FIELD_TCP_FLAGS:
1733                info[idx] = (struct field_modify_info){2, 0,
1734                                        MLX5_MODI_OUT_TCP_FLAGS};
1735                if (mask)
1736                        mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1737                break;
1738        case RTE_FLOW_FIELD_UDP_PORT_SRC:
1739                info[idx] = (struct field_modify_info){2, 0,
1740                                        MLX5_MODI_OUT_UDP_SPORT};
1741                if (mask)
1742                        mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1743                break;
1744        case RTE_FLOW_FIELD_UDP_PORT_DST:
1745                info[idx] = (struct field_modify_info){2, 0,
1746                                        MLX5_MODI_OUT_UDP_DPORT};
1747                if (mask)
1748                        mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749                break;
1750        case RTE_FLOW_FIELD_VXLAN_VNI:
1751                /* not supported yet */
1752                break;
1753        case RTE_FLOW_FIELD_GENEVE_VNI:
1754                /* not supported yet*/
1755                break;
1756        case RTE_FLOW_FIELD_GTP_TEID:
1757                info[idx] = (struct field_modify_info){4, 0,
1758                                        MLX5_MODI_GTP_TEID};
1759                if (mask)
1760                        mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1761                                                     (32 - width));
1762                break;
1763        case RTE_FLOW_FIELD_TAG:
1764                {
1765                        int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1766                                                   data->level, error);
1767                        if (reg < 0)
1768                                return;
1769                        MLX5_ASSERT(reg != REG_NON);
1770                        MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1771                        info[idx] = (struct field_modify_info){4, 0,
1772                                                reg_to_field[reg]};
1773                        if (mask)
1774                                mask[idx] =
1775                                        rte_cpu_to_be_32(0xffffffff >>
1776                                                         (32 - width));
1777                }
1778                break;
1779        case RTE_FLOW_FIELD_MARK:
1780                {
1781                        uint32_t mark_mask = priv->sh->dv_mark_mask;
1782                        uint32_t mark_count = __builtin_popcount(mark_mask);
1783                        int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1784                                                       0, error);
1785                        if (reg < 0)
1786                                return;
1787                        MLX5_ASSERT(reg != REG_NON);
1788                        MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1789                        info[idx] = (struct field_modify_info){4, 0,
1790                                                reg_to_field[reg]};
1791                        if (mask)
1792                                mask[idx] = rte_cpu_to_be_32((mark_mask >>
1793                                         (mark_count - width)) & mark_mask);
1794                }
1795                break;
1796        case RTE_FLOW_FIELD_META:
1797                {
1798                        uint32_t meta_mask = priv->sh->dv_meta_mask;
1799                        uint32_t meta_count = __builtin_popcount(meta_mask);
1800                        int reg = flow_dv_get_metadata_reg(dev, attr, error);
1801                        if (reg < 0)
1802                                return;
1803                        MLX5_ASSERT(reg != REG_NON);
1804                        MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1805                        info[idx] = (struct field_modify_info){4, 0,
1806                                                reg_to_field[reg]};
1807                        if (mask)
1808                                mask[idx] = rte_cpu_to_be_32((meta_mask >>
1809                                        (meta_count - width)) & meta_mask);
1810                }
1811                break;
1812        case RTE_FLOW_FIELD_IPV4_ECN:
1813        case RTE_FLOW_FIELD_IPV6_ECN:
1814                info[idx] = (struct field_modify_info){1, 0,
1815                                        MLX5_MODI_OUT_IP_ECN};
1816                if (mask)
1817                        mask[idx] = 0x3 >> (2 - width);
1818                break;
1819        case RTE_FLOW_FIELD_POINTER:
1820        case RTE_FLOW_FIELD_VALUE:
1821        default:
1822                MLX5_ASSERT(false);
1823                break;
1824        }
1825}
1826
1827/**
1828 * Convert modify_field action to DV specification.
1829 *
1830 * @param[in] dev
1831 *   Pointer to the rte_eth_dev structure.
1832 * @param[in,out] resource
1833 *   Pointer to the modify-header resource.
1834 * @param[in] action
1835 *   Pointer to action specification.
1836 * @param[in] attr
1837 *   Attributes of flow that includes this item.
1838 * @param[out] error
1839 *   Pointer to the error structure.
1840 *
1841 * @return
1842 *   0 on success, a negative errno value otherwise and rte_errno is set.
1843 */
1844static int
1845flow_dv_convert_action_modify_field
1846                        (struct rte_eth_dev *dev,
1847                         struct mlx5_flow_dv_modify_hdr_resource *resource,
1848                         const struct rte_flow_action *action,
1849                         const struct rte_flow_attr *attr,
1850                         struct rte_flow_error *error)
1851{
1852        const struct rte_flow_action_modify_field *conf =
1853                (const struct rte_flow_action_modify_field *)(action->conf);
1854        struct rte_flow_item item = {
1855                .spec = NULL,
1856                .mask = NULL
1857        };
1858        struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1859                                                                {0, 0, 0} };
1860        struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1861                                                                {0, 0, 0} };
1862        uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1863        uint32_t type, meta = 0;
1864
1865        if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1866            conf->src.field == RTE_FLOW_FIELD_VALUE) {
1867                type = MLX5_MODIFICATION_TYPE_SET;
1868                /** For SET fill the destination field (field) first. */
1869                mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1870                                                  conf->width, dev,
1871                                                  attr, error);
1872                item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1873                                        (void *)(uintptr_t)conf->src.pvalue :
1874                                        (void *)(uintptr_t)&conf->src.value;
1875                if (conf->dst.field == RTE_FLOW_FIELD_META) {
1876                        meta = *(const unaligned_uint32_t *)item.spec;
1877                        meta = rte_cpu_to_be_32(meta);
1878                        item.spec = &meta;
1879                }
1880        } else {
1881                type = MLX5_MODIFICATION_TYPE_COPY;
1882                /** For COPY fill the destination field (dcopy) without mask. */
1883                mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1884                                                  conf->width, dev,
1885                                                  attr, error);
1886                /** Then construct the source field (field) with mask. */
1887                mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1888                                                  conf->width, dev,
1889                                                  attr, error);
1890        }
1891        item.mask = &mask;
1892        return flow_dv_convert_modify_action(&item,
1893                        field, dcopy, resource, type, error);
1894}
1895
1896/**
1897 * Validate MARK item.
1898 *
1899 * @param[in] dev
1900 *   Pointer to the rte_eth_dev structure.
1901 * @param[in] item
1902 *   Item specification.
1903 * @param[in] attr
1904 *   Attributes of flow that includes this item.
1905 * @param[out] error
1906 *   Pointer to error structure.
1907 *
1908 * @return
1909 *   0 on success, a negative errno value otherwise and rte_errno is set.
1910 */
1911static int
1912flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1913                           const struct rte_flow_item *item,
1914                           const struct rte_flow_attr *attr __rte_unused,
1915                           struct rte_flow_error *error)
1916{
1917        struct mlx5_priv *priv = dev->data->dev_private;
1918        struct mlx5_sh_config *config = &priv->sh->config;
1919        const struct rte_flow_item_mark *spec = item->spec;
1920        const struct rte_flow_item_mark *mask = item->mask;
1921        const struct rte_flow_item_mark nic_mask = {
1922                .id = priv->sh->dv_mark_mask,
1923        };
1924        int ret;
1925
1926        if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1927                return rte_flow_error_set(error, ENOTSUP,
1928                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1929                                          "extended metadata feature"
1930                                          " isn't enabled");
1931        if (!mlx5_flow_ext_mreg_supported(dev))
1932                return rte_flow_error_set(error, ENOTSUP,
1933                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1934                                          "extended metadata register"
1935                                          " isn't supported");
1936        if (!nic_mask.id)
1937                return rte_flow_error_set(error, ENOTSUP,
1938                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                          "extended metadata register"
1940                                          " isn't available");
1941        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1942        if (ret < 0)
1943                return ret;
1944        if (!spec)
1945                return rte_flow_error_set(error, EINVAL,
1946                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1947                                          item->spec,
1948                                          "data cannot be empty");
1949        if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1950                return rte_flow_error_set(error, EINVAL,
1951                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1952                                          &spec->id,
1953                                          "mark id exceeds the limit");
1954        if (!mask)
1955                mask = &nic_mask;
1956        if (!mask->id)
1957                return rte_flow_error_set(error, EINVAL,
1958                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1959                                        "mask cannot be zero");
1960
1961        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1962                                        (const uint8_t *)&nic_mask,
1963                                        sizeof(struct rte_flow_item_mark),
1964                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1965        if (ret < 0)
1966                return ret;
1967        return 0;
1968}
1969
1970/**
1971 * Validate META item.
1972 *
1973 * @param[in] dev
1974 *   Pointer to the rte_eth_dev structure.
1975 * @param[in] item
1976 *   Item specification.
1977 * @param[in] attr
1978 *   Attributes of flow that includes this item.
1979 * @param[out] error
1980 *   Pointer to error structure.
1981 *
1982 * @return
1983 *   0 on success, a negative errno value otherwise and rte_errno is set.
1984 */
1985static int
1986flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1987                           const struct rte_flow_item *item,
1988                           const struct rte_flow_attr *attr,
1989                           struct rte_flow_error *error)
1990{
1991        struct mlx5_priv *priv = dev->data->dev_private;
1992        struct mlx5_sh_config *config = &priv->sh->config;
1993        const struct rte_flow_item_meta *spec = item->spec;
1994        const struct rte_flow_item_meta *mask = item->mask;
1995        struct rte_flow_item_meta nic_mask = {
1996                .data = UINT32_MAX
1997        };
1998        int reg;
1999        int ret;
2000
2001        if (!spec)
2002                return rte_flow_error_set(error, EINVAL,
2003                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2004                                          item->spec,
2005                                          "data cannot be empty");
2006        if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2007                if (!mlx5_flow_ext_mreg_supported(dev))
2008                        return rte_flow_error_set(error, ENOTSUP,
2009                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2010                                          "extended metadata register"
2011                                          " isn't supported");
2012                reg = flow_dv_get_metadata_reg(dev, attr, error);
2013                if (reg < 0)
2014                        return reg;
2015                if (reg == REG_NON)
2016                        return rte_flow_error_set(error, ENOTSUP,
2017                                        RTE_FLOW_ERROR_TYPE_ITEM, item,
2018                                        "unavailable extended metadata register");
2019                if (reg == REG_B)
2020                        return rte_flow_error_set(error, ENOTSUP,
2021                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2022                                          "match on reg_b "
2023                                          "isn't supported");
2024                if (reg != REG_A)
2025                        nic_mask.data = priv->sh->dv_meta_mask;
2026        } else {
2027                if (attr->transfer)
2028                        return rte_flow_error_set(error, ENOTSUP,
2029                                        RTE_FLOW_ERROR_TYPE_ITEM, item,
2030                                        "extended metadata feature "
2031                                        "should be enabled when "
2032                                        "meta item is requested "
2033                                        "with e-switch mode ");
2034                if (attr->ingress)
2035                        return rte_flow_error_set(error, ENOTSUP,
2036                                        RTE_FLOW_ERROR_TYPE_ITEM, item,
2037                                        "match on metadata for ingress "
2038                                        "is not supported in legacy "
2039                                        "metadata mode");
2040        }
2041        if (!mask)
2042                mask = &rte_flow_item_meta_mask;
2043        if (!mask->data)
2044                return rte_flow_error_set(error, EINVAL,
2045                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2046                                        "mask cannot be zero");
2047
2048        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2049                                        (const uint8_t *)&nic_mask,
2050                                        sizeof(struct rte_flow_item_meta),
2051                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2052        return ret;
2053}
2054
2055/**
2056 * Validate TAG item.
2057 *
2058 * @param[in] dev
2059 *   Pointer to the rte_eth_dev structure.
2060 * @param[in] item
2061 *   Item specification.
2062 * @param[in] attr
2063 *   Attributes of flow that includes this item.
2064 * @param[out] error
2065 *   Pointer to error structure.
2066 *
2067 * @return
2068 *   0 on success, a negative errno value otherwise and rte_errno is set.
2069 */
2070static int
2071flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2072                          const struct rte_flow_item *item,
2073                          const struct rte_flow_attr *attr __rte_unused,
2074                          struct rte_flow_error *error)
2075{
2076        const struct rte_flow_item_tag *spec = item->spec;
2077        const struct rte_flow_item_tag *mask = item->mask;
2078        const struct rte_flow_item_tag nic_mask = {
2079                .data = RTE_BE32(UINT32_MAX),
2080                .index = 0xff,
2081        };
2082        int ret;
2083
2084        if (!mlx5_flow_ext_mreg_supported(dev))
2085                return rte_flow_error_set(error, ENOTSUP,
2086                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2087                                          "extensive metadata register"
2088                                          " isn't supported");
2089        if (!spec)
2090                return rte_flow_error_set(error, EINVAL,
2091                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2092                                          item->spec,
2093                                          "data cannot be empty");
2094        if (!mask)
2095                mask = &rte_flow_item_tag_mask;
2096        if (!mask->data)
2097                return rte_flow_error_set(error, EINVAL,
2098                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2099                                        "mask cannot be zero");
2100
2101        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2102                                        (const uint8_t *)&nic_mask,
2103                                        sizeof(struct rte_flow_item_tag),
2104                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2105        if (ret < 0)
2106                return ret;
2107        if (mask->index != 0xff)
2108                return rte_flow_error_set(error, EINVAL,
2109                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2110                                          "partial mask for tag index"
2111                                          " is not supported");
2112        ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2113        if (ret < 0)
2114                return ret;
2115        MLX5_ASSERT(ret != REG_NON);
2116        return 0;
2117}
2118
2119/**
2120 * Validate vport item.
2121 *
2122 * @param[in] dev
2123 *   Pointer to the rte_eth_dev structure.
2124 * @param[in] item
2125 *   Item specification.
2126 * @param[in] attr
2127 *   Attributes of flow that includes this item.
2128 * @param[in] item_flags
2129 *   Bit-fields that holds the items detected until now.
2130 * @param[out] error
2131 *   Pointer to error structure.
2132 *
2133 * @return
2134 *   0 on success, a negative errno value otherwise and rte_errno is set.
2135 */
2136static int
2137flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2138                              const struct rte_flow_item *item,
2139                              const struct rte_flow_attr *attr,
2140                              uint64_t item_flags,
2141                              struct rte_flow_error *error)
2142{
2143        const struct rte_flow_item_port_id *spec = item->spec;
2144        const struct rte_flow_item_port_id *mask = item->mask;
2145        const struct rte_flow_item_port_id switch_mask = {
2146                        .id = 0xffffffff,
2147        };
2148        struct mlx5_priv *esw_priv;
2149        struct mlx5_priv *dev_priv;
2150        int ret;
2151
2152        if (!attr->transfer)
2153                return rte_flow_error_set(error, EINVAL,
2154                                          RTE_FLOW_ERROR_TYPE_ITEM,
2155                                          NULL,
2156                                          "match on port id is valid only"
2157                                          " when transfer flag is enabled");
2158        if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2159                return rte_flow_error_set(error, ENOTSUP,
2160                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2161                                          "multiple source ports are not"
2162                                          " supported");
2163        if (!mask)
2164                mask = &switch_mask;
2165        if (mask->id != 0xffffffff)
2166                return rte_flow_error_set(error, ENOTSUP,
2167                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2168                                           mask,
2169                                           "no support for partial mask on"
2170                                           " \"id\" field");
2171        ret = mlx5_flow_item_acceptable
2172                                (item, (const uint8_t *)mask,
2173                                 (const uint8_t *)&rte_flow_item_port_id_mask,
2174                                 sizeof(struct rte_flow_item_port_id),
2175                                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2176        if (ret)
2177                return ret;
2178        if (!spec)
2179                return 0;
2180        if (spec->id == MLX5_PORT_ESW_MGR)
2181                return 0;
2182        esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2183        if (!esw_priv)
2184                return rte_flow_error_set(error, rte_errno,
2185                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2186                                          "failed to obtain E-Switch info for"
2187                                          " port");
2188        dev_priv = mlx5_dev_to_eswitch_info(dev);
2189        if (!dev_priv)
2190                return rte_flow_error_set(error, rte_errno,
2191                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2192                                          NULL,
2193                                          "failed to obtain E-Switch info");
2194        if (esw_priv->domain_id != dev_priv->domain_id)
2195                return rte_flow_error_set(error, EINVAL,
2196                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2197                                          "cannot match on a port from a"
2198                                          " different E-Switch");
2199        return 0;
2200}
2201
2202/**
2203 * Validate represented port item.
2204 *
2205 * @param[in] dev
2206 *   Pointer to the rte_eth_dev structure.
2207 * @param[in] item
2208 *   Item specification.
2209 * @param[in] attr
2210 *   Attributes of flow that includes this item.
2211 * @param[in] item_flags
2212 *   Bit-fields that holds the items detected until now.
2213 * @param[out] error
2214 *   Pointer to error structure.
2215 *
2216 * @return
2217 *   0 on success, a negative errno value otherwise and rte_errno is set.
2218 */
2219static int
2220flow_dv_validate_item_represented_port(struct rte_eth_dev *dev,
2221                                       const struct rte_flow_item *item,
2222                                       const struct rte_flow_attr *attr,
2223                                       uint64_t item_flags,
2224                                       struct rte_flow_error *error)
2225{
2226        const struct rte_flow_item_ethdev *spec = item->spec;
2227        const struct rte_flow_item_ethdev *mask = item->mask;
2228        const struct rte_flow_item_ethdev switch_mask = {
2229                        .port_id = UINT16_MAX,
2230        };
2231        struct mlx5_priv *esw_priv;
2232        struct mlx5_priv *dev_priv;
2233        int ret;
2234
2235        if (!attr->transfer)
2236                return rte_flow_error_set(error, EINVAL,
2237                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2238                                          "match on port id is valid only when transfer flag is enabled");
2239        if (item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT)
2240                return rte_flow_error_set(error, ENOTSUP,
2241                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2242                                          "multiple source ports are not supported");
2243        if (!mask)
2244                mask = &switch_mask;
2245        if (mask->port_id != UINT16_MAX)
2246                return rte_flow_error_set(error, ENOTSUP,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2248                                           "no support for partial mask on \"id\" field");
2249        ret = mlx5_flow_item_acceptable
2250                                (item, (const uint8_t *)mask,
2251                                 (const uint8_t *)&rte_flow_item_ethdev_mask,
2252                                 sizeof(struct rte_flow_item_ethdev),
2253                                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2254        if (ret)
2255                return ret;
2256        if (!spec || spec->port_id == UINT16_MAX)
2257                return 0;
2258        esw_priv = mlx5_port_to_eswitch_info(spec->port_id, false);
2259        if (!esw_priv)
2260                return rte_flow_error_set(error, rte_errno,
2261                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2262                                          "failed to obtain E-Switch info for port");
2263        dev_priv = mlx5_dev_to_eswitch_info(dev);
2264        if (!dev_priv)
2265                return rte_flow_error_set(error, rte_errno,
2266                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2267                                          NULL,
2268                                          "failed to obtain E-Switch info");
2269        if (esw_priv->domain_id != dev_priv->domain_id)
2270                return rte_flow_error_set(error, EINVAL,
2271                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2272                                          "cannot match on a port from a different E-Switch");
2273        return 0;
2274}
2275
2276/**
2277 * Validate VLAN item.
2278 *
2279 * @param[in] item
2280 *   Item specification.
2281 * @param[in] item_flags
2282 *   Bit-fields that holds the items detected until now.
2283 * @param[in] dev
2284 *   Ethernet device flow is being created on.
2285 * @param[out] error
2286 *   Pointer to error structure.
2287 *
2288 * @return
2289 *   0 on success, a negative errno value otherwise and rte_errno is set.
2290 */
2291static int
2292flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2293                           uint64_t item_flags,
2294                           struct rte_eth_dev *dev,
2295                           struct rte_flow_error *error)
2296{
2297        const struct rte_flow_item_vlan *mask = item->mask;
2298        const struct rte_flow_item_vlan nic_mask = {
2299                .tci = RTE_BE16(UINT16_MAX),
2300                .inner_type = RTE_BE16(UINT16_MAX),
2301                .has_more_vlan = 1,
2302        };
2303        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2304        int ret;
2305        const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2306                                        MLX5_FLOW_LAYER_INNER_L4) :
2307                                       (MLX5_FLOW_LAYER_OUTER_L3 |
2308                                        MLX5_FLOW_LAYER_OUTER_L4);
2309        const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2310                                        MLX5_FLOW_LAYER_OUTER_VLAN;
2311
2312        if (item_flags & vlanm)
2313                return rte_flow_error_set(error, EINVAL,
2314                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2315                                          "multiple VLAN layers not supported");
2316        else if ((item_flags & l34m) != 0)
2317                return rte_flow_error_set(error, EINVAL,
2318                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2319                                          "VLAN cannot follow L3/L4 layer");
2320        if (!mask)
2321                mask = &rte_flow_item_vlan_mask;
2322        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2323                                        (const uint8_t *)&nic_mask,
2324                                        sizeof(struct rte_flow_item_vlan),
2325                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2326        if (ret)
2327                return ret;
2328        if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2329                struct mlx5_priv *priv = dev->data->dev_private;
2330
2331                if (priv->vmwa_context) {
2332                        /*
2333                         * Non-NULL context means we have a virtual machine
2334                         * and SR-IOV enabled, we have to create VLAN interface
2335                         * to make hypervisor to setup E-Switch vport
2336                         * context correctly. We avoid creating the multiple
2337                         * VLAN interfaces, so we cannot support VLAN tag mask.
2338                         */
2339                        return rte_flow_error_set(error, EINVAL,
2340                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2341                                                  item,
2342                                                  "VLAN tag mask is not"
2343                                                  " supported in virtual"
2344                                                  " environment");
2345                }
2346        }
2347        return 0;
2348}
2349
2350/*
2351 * GTP flags are contained in 1 byte of the format:
2352 * -------------------------------------------
2353 * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2354 * |-----------------------------------------|
2355 * | value | Version | PT | Res | E | S | PN |
2356 * -------------------------------------------
2357 *
2358 * Matching is supported only for GTP flags E, S, PN.
2359 */
2360#define MLX5_GTP_FLAGS_MASK     0x07
2361
2362/**
2363 * Validate GTP item.
2364 *
2365 * @param[in] dev
2366 *   Pointer to the rte_eth_dev structure.
2367 * @param[in] item
2368 *   Item specification.
2369 * @param[in] item_flags
2370 *   Bit-fields that holds the items detected until now.
2371 * @param[out] error
2372 *   Pointer to error structure.
2373 *
2374 * @return
2375 *   0 on success, a negative errno value otherwise and rte_errno is set.
2376 */
2377static int
2378flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2379                          const struct rte_flow_item *item,
2380                          uint64_t item_flags,
2381                          struct rte_flow_error *error)
2382{
2383        struct mlx5_priv *priv = dev->data->dev_private;
2384        const struct rte_flow_item_gtp *spec = item->spec;
2385        const struct rte_flow_item_gtp *mask = item->mask;
2386        const struct rte_flow_item_gtp nic_mask = {
2387                .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2388                .msg_type = 0xff,
2389                .teid = RTE_BE32(0xffffffff),
2390        };
2391
2392        if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
2393                return rte_flow_error_set(error, ENOTSUP,
2394                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2395                                          "GTP support is not enabled");
2396        if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2397                return rte_flow_error_set(error, ENOTSUP,
2398                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2399                                          "multiple tunnel layers not"
2400                                          " supported");
2401        if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2402                return rte_flow_error_set(error, EINVAL,
2403                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2404                                          "no outer UDP layer found");
2405        if (!mask)
2406                mask = &rte_flow_item_gtp_mask;
2407        if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2408                return rte_flow_error_set(error, ENOTSUP,
2409                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2410                                          "Match is supported for GTP"
2411                                          " flags only");
2412        return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2413                                         (const uint8_t *)&nic_mask,
2414                                         sizeof(struct rte_flow_item_gtp),
2415                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2416}
2417
2418/**
2419 * Validate GTP PSC item.
2420 *
2421 * @param[in] item
2422 *   Item specification.
2423 * @param[in] last_item
2424 *   Previous validated item in the pattern items.
2425 * @param[in] gtp_item
2426 *   Previous GTP item specification.
2427 * @param[in] attr
2428 *   Pointer to flow attributes.
2429 * @param[out] error
2430 *   Pointer to error structure.
2431 *
2432 * @return
2433 *   0 on success, a negative errno value otherwise and rte_errno is set.
2434 */
2435static int
2436flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2437                              uint64_t last_item,
2438                              const struct rte_flow_item *gtp_item,
2439                              const struct rte_flow_attr *attr,
2440                              struct rte_flow_error *error)
2441{
2442        const struct rte_flow_item_gtp *gtp_spec;
2443        const struct rte_flow_item_gtp *gtp_mask;
2444        const struct rte_flow_item_gtp_psc *mask;
2445        const struct rte_flow_item_gtp_psc nic_mask = {
2446                .hdr.type = 0xF,
2447                .hdr.qfi = 0x3F,
2448        };
2449
2450        if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2451                return rte_flow_error_set
2452                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2453                         "GTP PSC item must be preceded with GTP item");
2454        gtp_spec = gtp_item->spec;
2455        gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2456        /* GTP spec and E flag is requested to match zero. */
2457        if (gtp_spec &&
2458                (gtp_mask->v_pt_rsv_flags &
2459                ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2460                return rte_flow_error_set
2461                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2462                         "GTP E flag must be 1 to match GTP PSC");
2463        /* Check the flow is not created in group zero. */
2464        if (!attr->transfer && !attr->group)
2465                return rte_flow_error_set
2466                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2467                         "GTP PSC is not supported for group 0");
2468        /* GTP spec is here and E flag is requested to match zero. */
2469        if (!item->spec)
2470                return 0;
2471        mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2472        return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2473                                         (const uint8_t *)&nic_mask,
2474                                         sizeof(struct rte_flow_item_gtp_psc),
2475                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2476}
2477
2478/**
2479 * Validate IPV4 item.
2480 * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2481 * add specific validation of fragment_offset field,
2482 *
2483 * @param[in] item
2484 *   Item specification.
2485 * @param[in] item_flags
2486 *   Bit-fields that holds the items detected until now.
2487 * @param[out] error
2488 *   Pointer to error structure.
2489 *
2490 * @return
2491 *   0 on success, a negative errno value otherwise and rte_errno is set.
2492 */
2493static int
2494flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2495                           const struct rte_flow_item *item,
2496                           uint64_t item_flags, uint64_t last_item,
2497                           uint16_t ether_type, struct rte_flow_error *error)
2498{
2499        int ret;
2500        struct mlx5_priv *priv = dev->data->dev_private;
2501        struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
2502        const struct rte_flow_item_ipv4 *spec = item->spec;
2503        const struct rte_flow_item_ipv4 *last = item->last;
2504        const struct rte_flow_item_ipv4 *mask = item->mask;
2505        rte_be16_t fragment_offset_spec = 0;
2506        rte_be16_t fragment_offset_last = 0;
2507        struct rte_flow_item_ipv4 nic_ipv4_mask = {
2508                .hdr = {
2509                        .src_addr = RTE_BE32(0xffffffff),
2510                        .dst_addr = RTE_BE32(0xffffffff),
2511                        .type_of_service = 0xff,
2512                        .fragment_offset = RTE_BE16(0xffff),
2513                        .next_proto_id = 0xff,
2514                        .time_to_live = 0xff,
2515                },
2516        };
2517
2518        if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2519                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2520                bool ihl_cap = !tunnel ?
2521                               attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
2522                if (!ihl_cap)
2523                        return rte_flow_error_set(error, ENOTSUP,
2524                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2525                                                  item,
2526                                                  "IPV4 ihl offload not supported");
2527                nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2528        }
2529        ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2530                                           ether_type, &nic_ipv4_mask,
2531                                           MLX5_ITEM_RANGE_ACCEPTED, error);
2532        if (ret < 0)
2533                return ret;
2534        if (spec && mask)
2535                fragment_offset_spec = spec->hdr.fragment_offset &
2536                                       mask->hdr.fragment_offset;
2537        if (!fragment_offset_spec)
2538                return 0;
2539        /*
2540         * spec and mask are valid, enforce using full mask to make sure the
2541         * complete value is used correctly.
2542         */
2543        if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2544                        != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2545                return rte_flow_error_set(error, EINVAL,
2546                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2547                                          item, "must use full mask for"
2548                                          " fragment_offset");
2549        /*
2550         * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2551         * indicating this is 1st fragment of fragmented packet.
2552         * This is not yet supported in MLX5, return appropriate error message.
2553         */
2554        if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2555                return rte_flow_error_set(error, ENOTSUP,
2556                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2557                                          "match on first fragment not "
2558                                          "supported");
2559        if (fragment_offset_spec && !last)
2560                return rte_flow_error_set(error, ENOTSUP,
2561                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2562                                          "specified value not supported");
2563        /* spec and last are valid, validate the specified range. */
2564        fragment_offset_last = last->hdr.fragment_offset &
2565                               mask->hdr.fragment_offset;
2566        /*
2567         * Match on fragment_offset spec 0x2001 and last 0x3fff
2568         * means MF is 1 and frag-offset is > 0.
2569         * This packet is fragment 2nd and onward, excluding last.
2570         * This is not yet supported in MLX5, return appropriate
2571         * error message.
2572         */
2573        if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2574            fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2575                return rte_flow_error_set(error, ENOTSUP,
2576                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2577                                          last, "match on following "
2578                                          "fragments not supported");
2579        /*
2580         * Match on fragment_offset spec 0x0001 and last 0x1fff
2581         * means MF is 0 and frag-offset is > 0.
2582         * This packet is last fragment of fragmented packet.
2583         * This is not yet supported in MLX5, return appropriate
2584         * error message.
2585         */
2586        if (fragment_offset_spec == RTE_BE16(1) &&
2587            fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2588                return rte_flow_error_set(error, ENOTSUP,
2589                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2590                                          last, "match on last "
2591                                          "fragment not supported");
2592        /*
2593         * Match on fragment_offset spec 0x0001 and last 0x3fff
2594         * means MF and/or frag-offset is not 0.
2595         * This is a fragmented packet.
2596         * Other range values are invalid and rejected.
2597         */
2598        if (!(fragment_offset_spec == RTE_BE16(1) &&
2599              fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2600                return rte_flow_error_set(error, ENOTSUP,
2601                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2602                                          "specified range not supported");
2603        return 0;
2604}
2605
2606/**
2607 * Validate IPV6 fragment extension item.
2608 *
2609 * @param[in] item
2610 *   Item specification.
2611 * @param[in] item_flags
2612 *   Bit-fields that holds the items detected until now.
2613 * @param[out] error
2614 *   Pointer to error structure.
2615 *
2616 * @return
2617 *   0 on success, a negative errno value otherwise and rte_errno is set.
2618 */
2619static int
2620flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2621                                    uint64_t item_flags,
2622                                    struct rte_flow_error *error)
2623{
2624        const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2625        const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2626        const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2627        rte_be16_t frag_data_spec = 0;
2628        rte_be16_t frag_data_last = 0;
2629        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2630        const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2631                                      MLX5_FLOW_LAYER_OUTER_L4;
2632        int ret = 0;
2633        struct rte_flow_item_ipv6_frag_ext nic_mask = {
2634                .hdr = {
2635                        .next_header = 0xff,
2636                        .frag_data = RTE_BE16(0xffff),
2637                },
2638        };
2639
2640        if (item_flags & l4m)
2641                return rte_flow_error_set(error, EINVAL,
2642                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2643                                          "ipv6 fragment extension item cannot "
2644                                          "follow L4 item.");
2645        if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2646            (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2647                return rte_flow_error_set(error, EINVAL,
2648                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2649                                          "ipv6 fragment extension item must "
2650                                          "follow ipv6 item");
2651        if (spec && mask)
2652                frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2653        if (!frag_data_spec)
2654                return 0;
2655        /*
2656         * spec and mask are valid, enforce using full mask to make sure the
2657         * complete value is used correctly.
2658         */
2659        if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2660                                RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2661                return rte_flow_error_set(error, EINVAL,
2662                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2663                                          item, "must use full mask for"
2664                                          " frag_data");
2665        /*
2666         * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2667         * This is 1st fragment of fragmented packet.
2668         */
2669        if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2670                return rte_flow_error_set(error, ENOTSUP,
2671                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2672                                          "match on first fragment not "
2673                                          "supported");
2674        if (frag_data_spec && !last)
2675                return rte_flow_error_set(error, EINVAL,
2676                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
2677                                          "specified value not supported");
2678        ret = mlx5_flow_item_acceptable
2679                                (item, (const uint8_t *)mask,
2680                                 (const uint8_t *)&nic_mask,
2681                                 sizeof(struct rte_flow_item_ipv6_frag_ext),
2682                                 MLX5_ITEM_RANGE_ACCEPTED, error);
2683        if (ret)
2684                return ret;
2685        /* spec and last are valid, validate the specified range. */
2686        frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2687        /*
2688         * Match on frag_data spec 0x0009 and last 0xfff9
2689         * means M is 1 and frag-offset is > 0.
2690         * This packet is fragment 2nd and onward, excluding last.
2691         * This is not yet supported in MLX5, return appropriate
2692         * error message.
2693         */
2694        if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2695                                       RTE_IPV6_EHDR_MF_MASK) &&
2696            frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2697                return rte_flow_error_set(error, ENOTSUP,
2698                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2699                                          last, "match on following "
2700                                          "fragments not supported");
2701        /*
2702         * Match on frag_data spec 0x0008 and last 0xfff8
2703         * means M is 0 and frag-offset is > 0.
2704         * This packet is last fragment of fragmented packet.
2705         * This is not yet supported in MLX5, return appropriate
2706         * error message.
2707         */
2708        if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2709            frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2710                return rte_flow_error_set(error, ENOTSUP,
2711                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2712                                          last, "match on last "
2713                                          "fragment not supported");
2714        /* Other range values are invalid and rejected. */
2715        return rte_flow_error_set(error, EINVAL,
2716                                  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2717                                  "specified range not supported");
2718}
2719
2720/*
2721 * Validate ASO CT item.
2722 *
2723 * @param[in] dev
2724 *   Pointer to the rte_eth_dev structure.
2725 * @param[in] item
2726 *   Item specification.
2727 * @param[in] item_flags
2728 *   Pointer to bit-fields that holds the items detected until now.
2729 * @param[out] error
2730 *   Pointer to error structure.
2731 *
2732 * @return
2733 *   0 on success, a negative errno value otherwise and rte_errno is set.
2734 */
2735static int
2736flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2737                             const struct rte_flow_item *item,
2738                             uint64_t *item_flags,
2739                             struct rte_flow_error *error)
2740{
2741        const struct rte_flow_item_conntrack *spec = item->spec;
2742        const struct rte_flow_item_conntrack *mask = item->mask;
2743        RTE_SET_USED(dev);
2744        uint32_t flags;
2745
2746        if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2747                return rte_flow_error_set(error, EINVAL,
2748                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2749                                          "Only one CT is supported");
2750        if (!mask)
2751                mask = &rte_flow_item_conntrack_mask;
2752        flags = spec->flags & mask->flags;
2753        if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2754            ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2755             (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2756             (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2757                return rte_flow_error_set(error, EINVAL,
2758                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2759                                          "Conflict status bits");
2760        /* State change also needs to be considered. */
2761        *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2762        return 0;
2763}
2764
2765/**
2766 * Validate the pop VLAN action.
2767 *
2768 * @param[in] dev
2769 *   Pointer to the rte_eth_dev structure.
2770 * @param[in] action_flags
2771 *   Holds the actions detected until now.
2772 * @param[in] action
2773 *   Pointer to the pop vlan action.
2774 * @param[in] item_flags
2775 *   The items found in this flow rule.
2776 * @param[in] attr
2777 *   Pointer to flow attributes.
2778 * @param[out] error
2779 *   Pointer to error structure.
2780 *
2781 * @return
2782 *   0 on success, a negative errno value otherwise and rte_errno is set.
2783 */
2784static int
2785flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2786                                 uint64_t action_flags,
2787                                 const struct rte_flow_action *action,
2788                                 uint64_t item_flags,
2789                                 const struct rte_flow_attr *attr,
2790                                 struct rte_flow_error *error)
2791{
2792        const struct mlx5_priv *priv = dev->data->dev_private;
2793        struct mlx5_dev_ctx_shared *sh = priv->sh;
2794        bool direction_error = false;
2795
2796        if (!priv->sh->pop_vlan_action)
2797                return rte_flow_error_set(error, ENOTSUP,
2798                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799                                          NULL,
2800                                          "pop vlan action is not supported");
2801        /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2802        if (attr->transfer) {
2803                bool fdb_tx = priv->representor_id != UINT16_MAX;
2804                bool is_cx5 = sh->steering_format_version ==
2805                    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2806
2807                if (fdb_tx && is_cx5)
2808                        direction_error = true;
2809        } else if (attr->egress) {
2810                direction_error = true;
2811        }
2812        if (direction_error)
2813                return rte_flow_error_set(error, ENOTSUP,
2814                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2815                                          NULL,
2816                                          "pop vlan action not supported for egress");
2817        if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2818                return rte_flow_error_set(error, ENOTSUP,
2819                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2820                                          "no support for multiple VLAN "
2821                                          "actions");
2822        /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2823        if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2824            !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2825                return rte_flow_error_set(error, ENOTSUP,
2826                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2827                                          NULL,
2828                                          "cannot pop vlan after decap without "
2829                                          "match on inner vlan in the flow");
2830        /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2831        if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2832            !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2833                return rte_flow_error_set(error, ENOTSUP,
2834                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2835                                          NULL,
2836                                          "cannot pop vlan without a "
2837                                          "match on (outer) vlan in the flow");
2838        if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2839                return rte_flow_error_set(error, EINVAL,
2840                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2841                                          "wrong action order, port_id should "
2842                                          "be after pop VLAN action");
2843        if (!attr->transfer && priv->representor)
2844                return rte_flow_error_set(error, ENOTSUP,
2845                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2846                                          "pop vlan action for VF representor "
2847                                          "not supported on NIC table");
2848        return 0;
2849}
2850
2851/**
2852 * Get VLAN default info from vlan match info.
2853 *
2854 * @param[in] items
2855 *   the list of item specifications.
2856 * @param[out] vlan
2857 *   pointer VLAN info to fill to.
2858 *
2859 * @return
2860 *   0 on success, a negative errno value otherwise and rte_errno is set.
2861 */
2862static void
2863flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2864                                  struct rte_vlan_hdr *vlan)
2865{
2866        const struct rte_flow_item_vlan nic_mask = {
2867                .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2868                                MLX5DV_FLOW_VLAN_VID_MASK),
2869                .inner_type = RTE_BE16(0xffff),
2870        };
2871
2872        if (items == NULL)
2873                return;
2874        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2875                int type = items->type;
2876
2877                if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2878                    type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2879                        break;
2880        }
2881        if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2882                const struct rte_flow_item_vlan *vlan_m = items->mask;
2883                const struct rte_flow_item_vlan *vlan_v = items->spec;
2884
2885                /* If VLAN item in pattern doesn't contain data, return here. */
2886                if (!vlan_v)
2887                        return;
2888                if (!vlan_m)
2889                        vlan_m = &nic_mask;
2890                /* Only full match values are accepted */
2891                if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2892                     MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2893                        vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2894                        vlan->vlan_tci |=
2895                                rte_be_to_cpu_16(vlan_v->tci &
2896                                                 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2897                }
2898                if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2899                     MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2900                        vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2901                        vlan->vlan_tci |=
2902                                rte_be_to_cpu_16(vlan_v->tci &
2903                                                 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2904                }
2905                if (vlan_m->inner_type == nic_mask.inner_type)
2906                        vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2907                                                           vlan_m->inner_type);
2908        }
2909}
2910
2911/**
2912 * Validate the push VLAN action.
2913 *
2914 * @param[in] dev
2915 *   Pointer to the rte_eth_dev structure.
2916 * @param[in] action_flags
2917 *   Holds the actions detected until now.
2918 * @param[in] item_flags
2919 *   The items found in this flow rule.
2920 * @param[in] action
2921 *   Pointer to the action structure.
2922 * @param[in] attr
2923 *   Pointer to flow attributes
2924 * @param[out] error
2925 *   Pointer to error structure.
2926 *
2927 * @return
2928 *   0 on success, a negative errno value otherwise and rte_errno is set.
2929 */
2930static int
2931flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2932                                  uint64_t action_flags,
2933                                  const struct rte_flow_item_vlan *vlan_m,
2934                                  const struct rte_flow_action *action,
2935                                  const struct rte_flow_attr *attr,
2936                                  struct rte_flow_error *error)
2937{
2938        const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2939        const struct mlx5_priv *priv = dev->data->dev_private;
2940
2941        if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2942            push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2943                return rte_flow_error_set(error, EINVAL,
2944                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2945                                          "invalid vlan ethertype");
2946        if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2947                return rte_flow_error_set(error, EINVAL,
2948                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2949                                          "wrong action order, port_id should "
2950                                          "be after push VLAN");
2951        if (!attr->transfer && priv->representor)
2952                return rte_flow_error_set(error, ENOTSUP,
2953                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2954                                          "push vlan action for VF representor "
2955                                          "not supported on NIC table");
2956        if (vlan_m &&
2957            (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2958            (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2959                MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2960            !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2961            !(mlx5_flow_find_action
2962                (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2963                return rte_flow_error_set(error, EINVAL,
2964                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2965                                          "not full match mask on VLAN PCP and "
2966                                          "there is no of_set_vlan_pcp action, "
2967                                          "push VLAN action cannot figure out "
2968                                          "PCP value");
2969        if (vlan_m &&
2970            (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2971            (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2972                MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2973            !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2974            !(mlx5_flow_find_action
2975                (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2976                return rte_flow_error_set(error, EINVAL,
2977                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
2978                                          "not full match mask on VLAN VID and "
2979                                          "there is no of_set_vlan_vid action, "
2980                                          "push VLAN action cannot figure out "
2981                                          "VID value");
2982        (void)attr;
2983        return 0;
2984}
2985
2986/**
2987 * Validate the set VLAN PCP.
2988 *
2989 * @param[in] action_flags
2990 *   Holds the actions detected until now.
2991 * @param[in] actions
2992 *   Pointer to the list of actions remaining in the flow rule.
2993 * @param[out] error
2994 *   Pointer to error structure.
2995 *
2996 * @return
2997 *   0 on success, a negative errno value otherwise and rte_errno is set.
2998 */
2999static int
3000flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
3001                                     const struct rte_flow_action actions[],
3002                                     struct rte_flow_error *error)
3003{
3004        const struct rte_flow_action *action = actions;
3005        const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
3006
3007        if (conf->vlan_pcp > 7)
3008                return rte_flow_error_set(error, EINVAL,
3009                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                          "VLAN PCP value is too big");
3011        if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
3012                return rte_flow_error_set(error, ENOTSUP,
3013                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                          "set VLAN PCP action must follow "
3015                                          "the push VLAN action");
3016        if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
3017                return rte_flow_error_set(error, ENOTSUP,
3018                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3019                                          "Multiple VLAN PCP modification are "
3020                                          "not supported");
3021        if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3022                return rte_flow_error_set(error, EINVAL,
3023                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3024                                          "wrong action order, port_id should "
3025                                          "be after set VLAN PCP");
3026        return 0;
3027}
3028
3029/**
3030 * Validate the set VLAN VID.
3031 *
3032 * @param[in] item_flags
3033 *   Holds the items detected in this rule.
3034 * @param[in] action_flags
3035 *   Holds the actions detected until now.
3036 * @param[in] actions
3037 *   Pointer to the list of actions remaining in the flow rule.
3038 * @param[out] error
3039 *   Pointer to error structure.
3040 *
3041 * @return
3042 *   0 on success, a negative errno value otherwise and rte_errno is set.
3043 */
3044static int
3045flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
3046                                     uint64_t action_flags,
3047                                     const struct rte_flow_action actions[],
3048                                     struct rte_flow_error *error)
3049{
3050        const struct rte_flow_action *action = actions;
3051        const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3052
3053        if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3054                return rte_flow_error_set(error, EINVAL,
3055                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3056                                          "VLAN VID value is too big");
3057        if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3058            !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3059                return rte_flow_error_set(error, ENOTSUP,
3060                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3061                                          "set VLAN VID action must follow push"
3062                                          " VLAN action or match on VLAN item");
3063        if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3064                return rte_flow_error_set(error, ENOTSUP,
3065                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3066                                          "Multiple VLAN VID modifications are "
3067                                          "not supported");
3068        if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3069                return rte_flow_error_set(error, EINVAL,
3070                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3071                                          "wrong action order, port_id should "
3072                                          "be after set VLAN VID");
3073        return 0;
3074}
3075
3076/*
3077 * Validate the FLAG action.
3078 *
3079 * @param[in] dev
3080 *   Pointer to the rte_eth_dev structure.
3081 * @param[in] action_flags
3082 *   Holds the actions detected until now.
3083 * @param[in] attr
3084 *   Pointer to flow attributes
3085 * @param[out] error
3086 *   Pointer to error structure.
3087 *
3088 * @return
3089 *   0 on success, a negative errno value otherwise and rte_errno is set.
3090 */
3091static int
3092flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3093                             uint64_t action_flags,
3094                             const struct rte_flow_attr *attr,
3095                             struct rte_flow_error *error)
3096{
3097        struct mlx5_priv *priv = dev->data->dev_private;
3098        struct mlx5_sh_config *config = &priv->sh->config;
3099        int ret;
3100
3101        /* Fall back if no extended metadata register support. */
3102        if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3103                return mlx5_flow_validate_action_flag(action_flags, attr,
3104                                                      error);
3105        /* Extensive metadata mode requires registers. */
3106        if (!mlx5_flow_ext_mreg_supported(dev))
3107                return rte_flow_error_set(error, ENOTSUP,
3108                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3109                                          "no metadata registers "
3110                                          "to support flag action");
3111        if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3112                return rte_flow_error_set(error, ENOTSUP,
3113                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3114                                          "extended metadata register"
3115                                          " isn't available");
3116        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3117        if (ret < 0)
3118                return ret;
3119        MLX5_ASSERT(ret > 0);
3120        if (action_flags & MLX5_FLOW_ACTION_MARK)
3121                return rte_flow_error_set(error, EINVAL,
3122                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                          "can't mark and flag in same flow");
3124        if (action_flags & MLX5_FLOW_ACTION_FLAG)
3125                return rte_flow_error_set(error, EINVAL,
3126                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3127                                          "can't have 2 flag"
3128                                          " actions in same flow");
3129        return 0;
3130}
3131
3132/**
3133 * Validate MARK action.
3134 *
3135 * @param[in] dev
3136 *   Pointer to the rte_eth_dev structure.
3137 * @param[in] action
3138 *   Pointer to action.
3139 * @param[in] action_flags
3140 *   Holds the actions detected until now.
3141 * @param[in] attr
3142 *   Pointer to flow attributes
3143 * @param[out] error
3144 *   Pointer to error structure.
3145 *
3146 * @return
3147 *   0 on success, a negative errno value otherwise and rte_errno is set.
3148 */
3149static int
3150flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3151                             const struct rte_flow_action *action,
3152                             uint64_t action_flags,
3153                             const struct rte_flow_attr *attr,
3154                             struct rte_flow_error *error)
3155{
3156        struct mlx5_priv *priv = dev->data->dev_private;
3157        struct mlx5_sh_config *config = &priv->sh->config;
3158        const struct rte_flow_action_mark *mark = action->conf;
3159        int ret;
3160
3161        if (is_tunnel_offload_active(dev))
3162                return rte_flow_error_set(error, ENOTSUP,
3163                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3164                                          "no mark action "
3165                                          "if tunnel offload active");
3166        /* Fall back if no extended metadata register support. */
3167        if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3168                return mlx5_flow_validate_action_mark(action, action_flags,
3169                                                      attr, error);
3170        /* Extensive metadata mode requires registers. */
3171        if (!mlx5_flow_ext_mreg_supported(dev))
3172                return rte_flow_error_set(error, ENOTSUP,
3173                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3174                                          "no metadata registers "
3175                                          "to support mark action");
3176        if (!priv->sh->dv_mark_mask)
3177                return rte_flow_error_set(error, ENOTSUP,
3178                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3179                                          "extended metadata register"
3180                                          " isn't available");
3181        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3182        if (ret < 0)
3183                return ret;
3184        MLX5_ASSERT(ret > 0);
3185        if (!mark)
3186                return rte_flow_error_set(error, EINVAL,
3187                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3188                                          "configuration cannot be null");
3189        if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3190                return rte_flow_error_set(error, EINVAL,
3191                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3192                                          &mark->id,
3193                                          "mark id exceeds the limit");
3194        if (action_flags & MLX5_FLOW_ACTION_FLAG)
3195                return rte_flow_error_set(error, EINVAL,
3196                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3197                                          "can't flag and mark in same flow");
3198        if (action_flags & MLX5_FLOW_ACTION_MARK)
3199                return rte_flow_error_set(error, EINVAL,
3200                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3201                                          "can't have 2 mark actions in same"
3202                                          " flow");
3203        return 0;
3204}
3205
3206/**
3207 * Validate SET_META action.
3208 *
3209 * @param[in] dev
3210 *   Pointer to the rte_eth_dev structure.
3211 * @param[in] action
3212 *   Pointer to the action structure.
3213 * @param[in] action_flags
3214 *   Holds the actions detected until now.
3215 * @param[in] attr
3216 *   Pointer to flow attributes
3217 * @param[out] error
3218 *   Pointer to error structure.
3219 *
3220 * @return
3221 *   0 on success, a negative errno value otherwise and rte_errno is set.
3222 */
3223static int
3224flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3225                                 const struct rte_flow_action *action,
3226                                 uint64_t action_flags __rte_unused,
3227                                 const struct rte_flow_attr *attr,
3228                                 struct rte_flow_error *error)
3229{
3230        struct mlx5_priv *priv = dev->data->dev_private;
3231        struct mlx5_sh_config *config = &priv->sh->config;
3232        const struct rte_flow_action_set_meta *conf;
3233        uint32_t nic_mask = UINT32_MAX;
3234        int reg;
3235
3236        if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3237            !mlx5_flow_ext_mreg_supported(dev))
3238                return rte_flow_error_set(error, ENOTSUP,
3239                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3240                                          "extended metadata register"
3241                                          " isn't supported");
3242        reg = flow_dv_get_metadata_reg(dev, attr, error);
3243        if (reg < 0)
3244                return reg;
3245        if (reg == REG_NON)
3246                return rte_flow_error_set(error, ENOTSUP,
3247                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                          "unavailable extended metadata register");
3249        if (reg != REG_A && reg != REG_B) {
3250                struct mlx5_priv *priv = dev->data->dev_private;
3251
3252                nic_mask = priv->sh->dv_meta_mask;
3253        }
3254        if (!(action->conf))
3255                return rte_flow_error_set(error, EINVAL,
3256                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3257                                          "configuration cannot be null");
3258        conf = (const struct rte_flow_action_set_meta *)action->conf;
3259        if (!conf->mask)
3260                return rte_flow_error_set(error, EINVAL,
3261                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                          "zero mask doesn't have any effect");
3263        if (conf->mask & ~nic_mask)
3264                return rte_flow_error_set(error, EINVAL,
3265                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3266                                          "meta data must be within reg C0");
3267        return 0;
3268}
3269
3270/**
3271 * Validate SET_TAG action.
3272 *
3273 * @param[in] dev
3274 *   Pointer to the rte_eth_dev structure.
3275 * @param[in] action
3276 *   Pointer to the action structure.
3277 * @param[in] action_flags
3278 *   Holds the actions detected until now.
3279 * @param[in] attr
3280 *   Pointer to flow attributes
3281 * @param[out] error
3282 *   Pointer to error structure.
3283 *
3284 * @return
3285 *   0 on success, a negative errno value otherwise and rte_errno is set.
3286 */
3287static int
3288flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3289                                const struct rte_flow_action *action,
3290                                uint64_t action_flags,
3291                                const struct rte_flow_attr *attr,
3292                                struct rte_flow_error *error)
3293{
3294        const struct rte_flow_action_set_tag *conf;
3295        const uint64_t terminal_action_flags =
3296                MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3297                MLX5_FLOW_ACTION_RSS;
3298        int ret;
3299
3300        if (!mlx5_flow_ext_mreg_supported(dev))
3301                return rte_flow_error_set(error, ENOTSUP,
3302                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3303                                          "extensive metadata register"
3304                                          " isn't supported");
3305        if (!(action->conf))
3306                return rte_flow_error_set(error, EINVAL,
3307                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3308                                          "configuration cannot be null");
3309        conf = (const struct rte_flow_action_set_tag *)action->conf;
3310        if (!conf->mask)
3311                return rte_flow_error_set(error, EINVAL,
3312                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3313                                          "zero mask doesn't have any effect");
3314        ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3315        if (ret < 0)
3316                return ret;
3317        if (!attr->transfer && attr->ingress &&
3318            (action_flags & terminal_action_flags))
3319                return rte_flow_error_set(error, EINVAL,
3320                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3321                                          "set_tag has no effect"
3322                                          " with terminal actions");
3323        return 0;
3324}
3325
3326/**
3327 * Indicates whether ASO aging is supported.
3328 *
3329 * @param[in] sh
3330 *   Pointer to shared device context structure.
3331 * @param[in] attr
3332 *   Attributes of flow that includes AGE action.
3333 *
3334 * @return
3335 *   True when ASO aging is supported, false otherwise.
3336 */
3337static inline bool
3338flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
3339                const struct rte_flow_attr *attr)
3340{
3341        MLX5_ASSERT(sh && attr);
3342        return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
3343}
3344
3345/**
3346 * Validate count action.
3347 *
3348 * @param[in] dev
3349 *   Pointer to rte_eth_dev structure.
3350 * @param[in] shared
3351 *   Indicator if action is shared.
3352 * @param[in] action_flags
3353 *   Holds the actions detected until now.
3354 * @param[in] attr
3355 *   Attributes of flow that includes this action.
3356 * @param[out] error
3357 *   Pointer to error structure.
3358 *
3359 * @return
3360 *   0 on success, a negative errno value otherwise and rte_errno is set.
3361 */
3362static int
3363flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3364                              uint64_t action_flags,
3365                              const struct rte_flow_attr *attr,
3366                              struct rte_flow_error *error)
3367{
3368        struct mlx5_priv *priv = dev->data->dev_private;
3369
3370        if (!priv->sh->cdev->config.devx)
3371                goto notsup_err;
3372        if (action_flags & MLX5_FLOW_ACTION_COUNT)
3373                return rte_flow_error_set(error, EINVAL,
3374                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3375                                          "duplicate count actions set");
3376        if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3377            !flow_hit_aso_supported(priv->sh, attr))
3378                return rte_flow_error_set(error, EINVAL,
3379                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3380                                          "old age and indirect count combination is not supported");
3381#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3382        return 0;
3383#endif
3384notsup_err:
3385        return rte_flow_error_set
3386                      (error, ENOTSUP,
3387                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3388                       NULL,
3389                       "count action not supported");
3390}
3391
3392/**
3393 * Validate the L2 encap action.
3394 *
3395 * @param[in] dev
3396 *   Pointer to the rte_eth_dev structure.
3397 * @param[in] action_flags
3398 *   Holds the actions detected until now.
3399 * @param[in] action
3400 *   Pointer to the action structure.
3401 * @param[in] attr
3402 *   Pointer to flow attributes.
3403 * @param[out] error
3404 *   Pointer to error structure.
3405 *
3406 * @return
3407 *   0 on success, a negative errno value otherwise and rte_errno is set.
3408 */
3409static int
3410flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3411                                 uint64_t action_flags,
3412                                 const struct rte_flow_action *action,
3413                                 const struct rte_flow_attr *attr,
3414                                 struct rte_flow_error *error)
3415{
3416        const struct mlx5_priv *priv = dev->data->dev_private;
3417
3418        if (!(action->conf))
3419                return rte_flow_error_set(error, EINVAL,
3420                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
3421                                          "configuration cannot be null");
3422        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3423                return rte_flow_error_set(error, EINVAL,
3424                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3425                                          "can only have a single encap action "
3426                                          "in a flow");
3427        if (!attr->transfer && priv->representor)
3428                return rte_flow_error_set(error, ENOTSUP,
3429                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3430                                          "encap action for VF representor "
3431                                          "not supported on NIC table");
3432        return 0;
3433}
3434
3435/**
3436 * Validate a decap action.
3437 *
3438 * @param[in] dev
3439 *   Pointer to the rte_eth_dev structure.
3440 * @param[in] action_flags
3441 *   Holds the actions detected until now.
3442 * @param[in] action
3443 *   Pointer to the action structure.
3444 * @param[in] item_flags
3445 *   Holds the items detected.
3446 * @param[in] attr
3447 *   Pointer to flow attributes
3448 * @param[out] error
3449 *   Pointer to error structure.
3450 *
3451 * @return
3452 *   0 on success, a negative errno value otherwise and rte_errno is set.
3453 */
3454static int
3455flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3456                              uint64_t action_flags,
3457                              const struct rte_flow_action *action,
3458                              const uint64_t item_flags,
3459                              const struct rte_flow_attr *attr,
3460                              struct rte_flow_error *error)
3461{
3462        const struct mlx5_priv *priv = dev->data->dev_private;
3463
3464        if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
3465            !priv->sh->config.decap_en)
3466                return rte_flow_error_set(error, ENOTSUP,
3467                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3468                                          "decap is not enabled");
3469        if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3470                return rte_flow_error_set(error, ENOTSUP,
3471                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3472                                          action_flags &
3473                                          MLX5_FLOW_ACTION_DECAP ? "can only "
3474                                          "have a single decap action" : "decap "
3475                                          "after encap is not supported");
3476        if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3477                return rte_flow_error_set(error, EINVAL,
3478                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3479                                          "can't have decap action after"
3480                                          " modify action");
3481        if (attr->egress)
3482                return rte_flow_error_set(error, ENOTSUP,
3483                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3484                                          NULL,
3485                                          "decap action not supported for "
3486                                          "egress");
3487        if (!attr->transfer && priv->representor)
3488                return rte_flow_error_set(error, ENOTSUP,
3489                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3490                                          "decap action for VF representor "
3491                                          "not supported on NIC table");
3492        if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3493            !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3494                return rte_flow_error_set(error, ENOTSUP,
3495                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3496                                "VXLAN item should be present for VXLAN decap");
3497        return 0;
3498}
3499
3500const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3501
3502/**
3503 * Validate the raw encap and decap actions.
3504 *
3505 * @param[in] dev
3506 *   Pointer to the rte_eth_dev structure.
3507 * @param[in] decap
3508 *   Pointer to the decap action.
3509 * @param[in] encap
3510 *   Pointer to the encap action.
3511 * @param[in] attr
3512 *   Pointer to flow attributes
3513 * @param[in/out] action_flags
3514 *   Holds the actions detected until now.
3515 * @param[out] actions_n
3516 *   pointer to the number of actions counter.
3517 * @param[in] action
3518 *   Pointer to the action structure.
3519 * @param[in] item_flags
3520 *   Holds the items detected.
3521 * @param[out] error
3522 *   Pointer to error structure.
3523 *
3524 * @return
3525 *   0 on success, a negative errno value otherwise and rte_errno is set.
3526 */
3527static int
3528flow_dv_validate_action_raw_encap_decap
3529        (struct rte_eth_dev *dev,
3530         const struct rte_flow_action_raw_decap *decap,
3531         const struct rte_flow_action_raw_encap *encap,
3532         const struct rte_flow_attr *attr, uint64_t *action_flags,
3533         int *actions_n, const struct rte_flow_action *action,
3534         uint64_t item_flags, struct rte_flow_error *error)
3535{
3536        const struct mlx5_priv *priv = dev->data->dev_private;
3537        int ret;
3538
3539        if (encap && (!encap->size || !encap->data))
3540                return rte_flow_error_set(error, EINVAL,
3541                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3542                                          "raw encap data cannot be empty");
3543        if (decap && encap) {
3544                if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3545                    encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3546                        /* L3 encap. */
3547                        decap = NULL;
3548                else if (encap->size <=
3549                           MLX5_ENCAPSULATION_DECISION_SIZE &&
3550                           decap->size >
3551                           MLX5_ENCAPSULATION_DECISION_SIZE)
3552                        /* L3 decap. */
3553                        encap = NULL;
3554                else if (encap->size >
3555                           MLX5_ENCAPSULATION_DECISION_SIZE &&
3556                           decap->size >
3557                           MLX5_ENCAPSULATION_DECISION_SIZE)
3558                        /* 2 L2 actions: encap and decap. */
3559                        ;
3560                else
3561                        return rte_flow_error_set(error,
3562                                ENOTSUP,
3563                                RTE_FLOW_ERROR_TYPE_ACTION,
3564                                NULL, "unsupported too small "
3565                                "raw decap and too small raw "
3566                                "encap combination");
3567        }
3568        if (decap) {
3569                ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3570                                                    item_flags, attr, error);
3571                if (ret < 0)
3572                        return ret;
3573                *action_flags |= MLX5_FLOW_ACTION_DECAP;
3574                ++(*actions_n);
3575        }
3576        if (encap) {
3577                if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3578                        return rte_flow_error_set(error, ENOTSUP,
3579                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3580                                                  NULL,
3581                                                  "small raw encap size");
3582                if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3583                        return rte_flow_error_set(error, EINVAL,
3584                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3585                                                  NULL,
3586                                                  "more than one encap action");
3587                if (!attr->transfer && priv->representor)
3588                        return rte_flow_error_set
3589                                        (error, ENOTSUP,
3590                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3591                                         "encap action for VF representor "
3592                                         "not supported on NIC table");
3593                *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3594                ++(*actions_n);
3595        }
3596        return 0;
3597}
3598
3599/*
3600 * Validate the ASO CT action.
3601 *
3602 * @param[in] dev
3603 *   Pointer to the rte_eth_dev structure.
3604 * @param[in] action_flags
3605 *   Holds the actions detected until now.
3606 * @param[in] item_flags
3607 *   The items found in this flow rule.
3608 * @param[in] attr
3609 *   Pointer to flow attributes.
3610 * @param[out] error
3611 *   Pointer to error structure.
3612 *
3613 * @return
3614 *   0 on success, a negative errno value otherwise and rte_errno is set.
3615 */
3616static int
3617flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3618                               uint64_t action_flags,
3619                               uint64_t item_flags,
3620                               const struct rte_flow_attr *attr,
3621                               struct rte_flow_error *error)
3622{
3623        RTE_SET_USED(dev);
3624
3625        if (attr->group == 0 && !attr->transfer)
3626                return rte_flow_error_set(error, ENOTSUP,
3627                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3628                                          NULL,
3629                                          "Only support non-root table");
3630        if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3631                return rte_flow_error_set(error, ENOTSUP,
3632                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3633                                          "CT cannot follow a fate action");
3634        if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3635            (action_flags & MLX5_FLOW_ACTION_AGE))
3636                return rte_flow_error_set(error, EINVAL,
3637                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3638                                          "Only one ASO action is supported");
3639        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3640                return rte_flow_error_set(error, EINVAL,
3641                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3642                                          "Encap cannot exist before CT");
3643        if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3644                return rte_flow_error_set(error, EINVAL,
3645                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3646                                          "Not a outer TCP packet");
3647        return 0;
3648}
3649
3650int
3651flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3652                             struct mlx5_list_entry *entry, void *cb_ctx)
3653{
3654        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3655        struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3656        struct mlx5_flow_dv_encap_decap_resource *resource;
3657
3658        resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3659                                entry);
3660        if (resource->reformat_type == ctx_resource->reformat_type &&
3661            resource->ft_type == ctx_resource->ft_type &&
3662            resource->flags == ctx_resource->flags &&
3663            resource->size == ctx_resource->size &&
3664            !memcmp((const void *)resource->buf,
3665                    (const void *)ctx_resource->buf,
3666                    resource->size))
3667                return 0;
3668        return -1;
3669}
3670
3671struct mlx5_list_entry *
3672flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3673{
3674        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3675        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3676        struct mlx5dv_dr_domain *domain;
3677        struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3678        struct mlx5_flow_dv_encap_decap_resource *resource;
3679        uint32_t idx;
3680        int ret;
3681
3682        if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3683                domain = sh->fdb_domain;
3684        else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3685                domain = sh->rx_domain;
3686        else
3687                domain = sh->tx_domain;
3688        /* Register new encap/decap resource. */
3689        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3690        if (!resource) {
3691                rte_flow_error_set(ctx->error, ENOMEM,
3692                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3693                                   "cannot allocate resource memory");
3694                return NULL;
3695        }
3696        *resource = *ctx_resource;
3697        resource->idx = idx;
3698        ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3699                                                              domain, resource,
3700                                                             &resource->action);
3701        if (ret) {
3702                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3703                rte_flow_error_set(ctx->error, ENOMEM,
3704                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3705                                   NULL, "cannot create action");
3706                return NULL;
3707        }
3708
3709        return &resource->entry;
3710}
3711
3712struct mlx5_list_entry *
3713flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3714                             void *cb_ctx)
3715{
3716        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3717        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3718        struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3719        uint32_t idx;
3720
3721        cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3722                                           &idx);
3723        if (!cache_resource) {
3724                rte_flow_error_set(ctx->error, ENOMEM,
3725                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3726                                   "cannot allocate resource memory");
3727                return NULL;
3728        }
3729        memcpy(cache_resource, oentry, sizeof(*cache_resource));
3730        cache_resource->idx = idx;
3731        return &cache_resource->entry;
3732}
3733
3734void
3735flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3736{
3737        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3738        struct mlx5_flow_dv_encap_decap_resource *res =
3739                                       container_of(entry, typeof(*res), entry);
3740
3741        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3742}
3743
3744/**
3745 * Find existing encap/decap resource or create and register a new one.
3746 *
3747 * @param[in, out] dev
3748 *   Pointer to rte_eth_dev structure.
3749 * @param[in, out] resource
3750 *   Pointer to encap/decap resource.
3751 * @parm[in, out] dev_flow
3752 *   Pointer to the dev_flow.
3753 * @param[out] error
3754 *   pointer to error structure.
3755 *
3756 * @return
3757 *   0 on success otherwise -errno and errno is set.
3758 */
3759static int
3760flow_dv_encap_decap_resource_register
3761                        (struct rte_eth_dev *dev,
3762                         struct mlx5_flow_dv_encap_decap_resource *resource,
3763                         struct mlx5_flow *dev_flow,
3764                         struct rte_flow_error *error)
3765{
3766        struct mlx5_priv *priv = dev->data->dev_private;
3767        struct mlx5_dev_ctx_shared *sh = priv->sh;
3768        struct mlx5_list_entry *entry;
3769        union {
3770                struct {
3771                        uint32_t ft_type:8;
3772                        uint32_t refmt_type:8;
3773                        /*
3774                         * Header reformat actions can be shared between
3775                         * non-root tables. One bit to indicate non-root
3776                         * table or not.
3777                         */
3778                        uint32_t is_root:1;
3779                        uint32_t reserve:15;
3780                };
3781                uint32_t v32;
3782        } encap_decap_key = {
3783                {
3784                        .ft_type = resource->ft_type,
3785                        .refmt_type = resource->reformat_type,
3786                        .is_root = !!dev_flow->dv.group,
3787                        .reserve = 0,
3788                }
3789        };
3790        struct mlx5_flow_cb_ctx ctx = {
3791                .error = error,
3792                .data = resource,
3793        };
3794        struct mlx5_hlist *encaps_decaps;
3795        uint64_t key64;
3796
3797        encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3798                                "encaps_decaps",
3799                                MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3800                                true, true, sh,
3801                                flow_dv_encap_decap_create_cb,
3802                                flow_dv_encap_decap_match_cb,
3803                                flow_dv_encap_decap_remove_cb,
3804                                flow_dv_encap_decap_clone_cb,
3805                                flow_dv_encap_decap_clone_free_cb,
3806                                error);
3807        if (unlikely(!encaps_decaps))
3808                return -rte_errno;
3809        resource->flags = dev_flow->dv.group ? 0 : 1;
3810        key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3811                                 sizeof(encap_decap_key.v32), 0);
3812        if (resource->reformat_type !=
3813            MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3814            resource->size)
3815                key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3816        entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3817        if (!entry)
3818                return -rte_errno;
3819        resource = container_of(entry, typeof(*resource), entry);
3820        dev_flow->dv.encap_decap = resource;
3821        dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3822        return 0;
3823}
3824
3825/**
3826 * Find existing table jump resource or create and register a new one.
3827 *
3828 * @param[in, out] dev
3829 *   Pointer to rte_eth_dev structure.
3830 * @param[in, out] tbl
3831 *   Pointer to flow table resource.
3832 * @parm[in, out] dev_flow
3833 *   Pointer to the dev_flow.
3834 * @param[out] error
3835 *   pointer to error structure.
3836 *
3837 * @return
3838 *   0 on success otherwise -errno and errno is set.
3839 */
3840static int
3841flow_dv_jump_tbl_resource_register
3842                        (struct rte_eth_dev *dev __rte_unused,
3843                         struct mlx5_flow_tbl_resource *tbl,
3844                         struct mlx5_flow *dev_flow,
3845                         struct rte_flow_error *error __rte_unused)
3846{
3847        struct mlx5_flow_tbl_data_entry *tbl_data =
3848                container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3849
3850        MLX5_ASSERT(tbl);
3851        MLX5_ASSERT(tbl_data->jump.action);
3852        dev_flow->handle->rix_jump = tbl_data->idx;
3853        dev_flow->dv.jump = &tbl_data->jump;
3854        return 0;
3855}
3856
3857int
3858flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3859                         struct mlx5_list_entry *entry, void *cb_ctx)
3860{
3861        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3862        struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3863        struct mlx5_flow_dv_port_id_action_resource *res =
3864                                       container_of(entry, typeof(*res), entry);
3865
3866        return ref->port_id != res->port_id;
3867}
3868
3869struct mlx5_list_entry *
3870flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3871{
3872        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3873        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3874        struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3875        struct mlx5_flow_dv_port_id_action_resource *resource;
3876        uint32_t idx;
3877        int ret;
3878
3879        /* Register new port id action resource. */
3880        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3881        if (!resource) {
3882                rte_flow_error_set(ctx->error, ENOMEM,
3883                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3884                                   "cannot allocate port_id action memory");
3885                return NULL;
3886        }
3887        *resource = *ref;
3888        ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3889                                                        ref->port_id,
3890                                                        &resource->action);
3891        if (ret) {
3892                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3893                rte_flow_error_set(ctx->error, ENOMEM,
3894                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3895                                   "cannot create action");
3896                return NULL;
3897        }
3898        resource->idx = idx;
3899        return &resource->entry;
3900}
3901
3902struct mlx5_list_entry *
3903flow_dv_port_id_clone_cb(void *tool_ctx,
3904                         struct mlx5_list_entry *entry __rte_unused,
3905                         void *cb_ctx)
3906{
3907        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3908        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3909        struct mlx5_flow_dv_port_id_action_resource *resource;
3910        uint32_t idx;
3911
3912        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3913        if (!resource) {
3914                rte_flow_error_set(ctx->error, ENOMEM,
3915                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3916                                   "cannot allocate port_id action memory");
3917                return NULL;
3918        }
3919        memcpy(resource, entry, sizeof(*resource));
3920        resource->idx = idx;
3921        return &resource->entry;
3922}
3923
3924void
3925flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3926{
3927        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3928        struct mlx5_flow_dv_port_id_action_resource *resource =
3929                                  container_of(entry, typeof(*resource), entry);
3930
3931        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3932}
3933
3934/**
3935 * Find existing table port ID resource or create and register a new one.
3936 *
3937 * @param[in, out] dev
3938 *   Pointer to rte_eth_dev structure.
3939 * @param[in, out] ref
3940 *   Pointer to port ID action resource reference.
3941 * @parm[in, out] dev_flow
3942 *   Pointer to the dev_flow.
3943 * @param[out] error
3944 *   pointer to error structure.
3945 *
3946 * @return
3947 *   0 on success otherwise -errno and errno is set.
3948 */
3949static int
3950flow_dv_port_id_action_resource_register
3951                        (struct rte_eth_dev *dev,
3952                         struct mlx5_flow_dv_port_id_action_resource *ref,
3953                         struct mlx5_flow *dev_flow,
3954                         struct rte_flow_error *error)
3955{
3956        struct mlx5_priv *priv = dev->data->dev_private;
3957        struct mlx5_list_entry *entry;
3958        struct mlx5_flow_dv_port_id_action_resource *resource;
3959        struct mlx5_flow_cb_ctx ctx = {
3960                .error = error,
3961                .data = ref,
3962        };
3963
3964        entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3965        if (!entry)
3966                return -rte_errno;
3967        resource = container_of(entry, typeof(*resource), entry);
3968        dev_flow->dv.port_id_action = resource;
3969        dev_flow->handle->rix_port_id_action = resource->idx;
3970        return 0;
3971}
3972
3973int
3974flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3975                           struct mlx5_list_entry *entry, void *cb_ctx)
3976{
3977        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3978        struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3979        struct mlx5_flow_dv_push_vlan_action_resource *res =
3980                                       container_of(entry, typeof(*res), entry);
3981
3982        return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3983}
3984
3985struct mlx5_list_entry *
3986flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3987{
3988        struct mlx5_dev_ctx_shared *sh = tool_ctx;
3989        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3990        struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3991        struct mlx5_flow_dv_push_vlan_action_resource *resource;
3992        struct mlx5dv_dr_domain *domain;
3993        uint32_t idx;
3994        int ret;
3995
3996        /* Register new port id action resource. */
3997        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3998        if (!resource) {
3999                rte_flow_error_set(ctx->error, ENOMEM,
4000                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4001                                   "cannot allocate push_vlan action memory");
4002                return NULL;
4003        }
4004        *resource = *ref;
4005        if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4006                domain = sh->fdb_domain;
4007        else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
4008                domain = sh->rx_domain;
4009        else
4010                domain = sh->tx_domain;
4011        ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
4012                                                        &resource->action);
4013        if (ret) {
4014                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
4015                rte_flow_error_set(ctx->error, ENOMEM,
4016                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4017                                   "cannot create push vlan action");
4018                return NULL;
4019        }
4020        resource->idx = idx;
4021        return &resource->entry;
4022}
4023
4024struct mlx5_list_entry *
4025flow_dv_push_vlan_clone_cb(void *tool_ctx,
4026                           struct mlx5_list_entry *entry __rte_unused,
4027                           void *cb_ctx)
4028{
4029        struct mlx5_dev_ctx_shared *sh = tool_ctx;
4030        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4031        struct mlx5_flow_dv_push_vlan_action_resource *resource;
4032        uint32_t idx;
4033
4034        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
4035        if (!resource) {
4036                rte_flow_error_set(ctx->error, ENOMEM,
4037                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4038                                   "cannot allocate push_vlan action memory");
4039                return NULL;
4040        }
4041        memcpy(resource, entry, sizeof(*resource));
4042        resource->idx = idx;
4043        return &resource->entry;
4044}
4045
4046void
4047flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4048{
4049        struct mlx5_dev_ctx_shared *sh = tool_ctx;
4050        struct mlx5_flow_dv_push_vlan_action_resource *resource =
4051                                  container_of(entry, typeof(*resource), entry);
4052
4053        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
4054}
4055
4056/**
4057 * Find existing push vlan resource or create and register a new one.
4058 *
4059 * @param [in, out] dev
4060 *   Pointer to rte_eth_dev structure.
4061 * @param[in, out] ref
4062 *   Pointer to port ID action resource reference.
4063 * @parm[in, out] dev_flow
4064 *   Pointer to the dev_flow.
4065 * @param[out] error
4066 *   pointer to error structure.
4067 *
4068 * @return
4069 *   0 on success otherwise -errno and errno is set.
4070 */
4071static int
4072flow_dv_push_vlan_action_resource_register
4073                       (struct rte_eth_dev *dev,
4074                        struct mlx5_flow_dv_push_vlan_action_resource *ref,
4075                        struct mlx5_flow *dev_flow,
4076                        struct rte_flow_error *error)
4077{
4078        struct mlx5_priv *priv = dev->data->dev_private;
4079        struct mlx5_flow_dv_push_vlan_action_resource *resource;
4080        struct mlx5_list_entry *entry;
4081        struct mlx5_flow_cb_ctx ctx = {
4082                .error = error,
4083                .data = ref,
4084        };
4085
4086        entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4087        if (!entry)
4088                return -rte_errno;
4089        resource = container_of(entry, typeof(*resource), entry);
4090
4091        dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4092        dev_flow->dv.push_vlan_res = resource;
4093        return 0;
4094}
4095
4096/**
4097 * Get the size of specific rte_flow_item_type hdr size
4098 *
4099 * @param[in] item_type
4100 *   Tested rte_flow_item_type.
4101 *
4102 * @return
4103 *   sizeof struct item_type, 0 if void or irrelevant.
4104 */
4105size_t
4106flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4107{
4108        size_t retval;
4109
4110        switch (item_type) {
4111        case RTE_FLOW_ITEM_TYPE_ETH:
4112                retval = sizeof(struct rte_ether_hdr);
4113                break;
4114        case RTE_FLOW_ITEM_TYPE_VLAN:
4115                retval = sizeof(struct rte_vlan_hdr);
4116                break;
4117        case RTE_FLOW_ITEM_TYPE_IPV4:
4118                retval = sizeof(struct rte_ipv4_hdr);
4119                break;
4120        case RTE_FLOW_ITEM_TYPE_IPV6:
4121                retval = sizeof(struct rte_ipv6_hdr);
4122                break;
4123        case RTE_FLOW_ITEM_TYPE_UDP:
4124                retval = sizeof(struct rte_udp_hdr);
4125                break;
4126        case RTE_FLOW_ITEM_TYPE_TCP:
4127                retval = sizeof(struct rte_tcp_hdr);
4128                break;
4129        case RTE_FLOW_ITEM_TYPE_VXLAN:
4130        case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4131                retval = sizeof(struct rte_vxlan_hdr);
4132                break;
4133        case RTE_FLOW_ITEM_TYPE_GRE:
4134        case RTE_FLOW_ITEM_TYPE_NVGRE:
4135                retval = sizeof(struct rte_gre_hdr);
4136                break;
4137        case RTE_FLOW_ITEM_TYPE_MPLS:
4138                retval = sizeof(struct rte_mpls_hdr);
4139                break;
4140        case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4141        default:
4142                retval = 0;
4143                break;
4144        }
4145        return retval;
4146}
4147
4148#define MLX5_ENCAP_IPV4_VERSION         0x40
4149#define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4150#define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4151#define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4152#define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4153#define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4154#define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4155
4156/**
4157 * Convert the encap action data from list of rte_flow_item to raw buffer
4158 *
4159 * @param[in] items
4160 *   Pointer to rte_flow_item objects list.
4161 * @param[out] buf
4162 *   Pointer to the output buffer.
4163 * @param[out] size
4164 *   Pointer to the output buffer size.
4165 * @param[out] error
4166 *   Pointer to the error structure.
4167 *
4168 * @return
4169 *   0 on success, a negative errno value otherwise and rte_errno is set.
4170 */
4171int
4172flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4173                           size_t *size, struct rte_flow_error *error)
4174{
4175        struct rte_ether_hdr *eth = NULL;
4176        struct rte_vlan_hdr *vlan = NULL;
4177        struct rte_ipv4_hdr *ipv4 = NULL;
4178        struct rte_ipv6_hdr *ipv6 = NULL;
4179        struct rte_udp_hdr *udp = NULL;
4180        struct rte_vxlan_hdr *vxlan = NULL;
4181        struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4182        struct rte_gre_hdr *gre = NULL;
4183        size_t len;
4184        size_t temp_size = 0;
4185
4186        if (!items)
4187                return rte_flow_error_set(error, EINVAL,
4188                                          RTE_FLOW_ERROR_TYPE_ACTION,
4189                                          NULL, "invalid empty data");
4190        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4191                len = flow_dv_get_item_hdr_len(items->type);
4192                if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4193                        return rte_flow_error_set(error, EINVAL,
4194                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4195                                                  (void *)items->type,
4196                                                  "items total size is too big"
4197                                                  " for encap action");
4198                rte_memcpy((void *)&buf[temp_size], items->spec, len);
4199                switch (items->type) {
4200                case RTE_FLOW_ITEM_TYPE_ETH:
4201                        eth = (struct rte_ether_hdr *)&buf[temp_size];
4202                        break;
4203                case RTE_FLOW_ITEM_TYPE_VLAN:
4204                        vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4205                        if (!eth)
4206                                return rte_flow_error_set(error, EINVAL,
4207                                                RTE_FLOW_ERROR_TYPE_ACTION,
4208                                                (void *)items->type,
4209                                                "eth header not found");
4210                        if (!eth->ether_type)
4211                                eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4212                        break;
4213                case RTE_FLOW_ITEM_TYPE_IPV4:
4214                        ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4215                        if (!vlan && !eth)
4216                                return rte_flow_error_set(error, EINVAL,
4217                                                RTE_FLOW_ERROR_TYPE_ACTION,
4218                                                (void *)items->type,
4219                                                "neither eth nor vlan"
4220                                                " header found");
4221                        if (vlan && !vlan->eth_proto)
4222                                vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4223                        else if (eth && !eth->ether_type)
4224                                eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4225                        if (!ipv4->version_ihl)
4226                                ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4227                                                    MLX5_ENCAP_IPV4_IHL_MIN;
4228                        if (!ipv4->time_to_live)
4229                                ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4230                        break;
4231                case RTE_FLOW_ITEM_TYPE_IPV6:
4232                        ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4233                        if (!vlan && !eth)
4234                                return rte_flow_error_set(error, EINVAL,
4235                                                RTE_FLOW_ERROR_TYPE_ACTION,
4236                                                (void *)items->type,
4237                                                "neither eth nor vlan"
4238                                                " header found");
4239                        if (vlan && !vlan->eth_proto)
4240                                vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4241                        else if (eth && !eth->ether_type)
4242                                eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4243                        if (!ipv6->vtc_flow)
4244                                ipv6->vtc_flow =
4245                                        RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4246                        if (!ipv6->hop_limits)
4247                                ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4248                        break;
4249                case RTE_FLOW_ITEM_TYPE_UDP:
4250                        udp = (struct rte_udp_hdr *)&buf[temp_size];
4251                        if (!ipv4 && !ipv6)
4252                                return rte_flow_error_set(error, EINVAL,
4253                                                RTE_FLOW_ERROR_TYPE_ACTION,
4254                                                (void *)items->type,
4255                                                "ip header not found");
4256                        if (ipv4 && !ipv4->next_proto_id)
4257                                ipv4->next_proto_id = IPPROTO_UDP;
4258                        else if (ipv6 && !ipv6->proto)
4259                                ipv6->proto = IPPROTO_UDP;
4260                        break;
4261                case RTE_FLOW_ITEM_TYPE_VXLAN:
4262                        vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4263                        if (!udp)
4264                                return rte_flow_error_set(error, EINVAL,
4265                                                RTE_FLOW_ERROR_TYPE_ACTION,
4266                                                (void *)items->type,
4267                                                "udp header not found");
4268                        if (!udp->dst_port)
4269                                udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4270                        if (!vxlan->vx_flags)
4271                                vxlan->vx_flags =
4272                                        RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4273                        break;
4274                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4275                        vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4276                        if (!udp)
4277                                return rte_flow_error_set(error, EINVAL,
4278                                                RTE_FLOW_ERROR_TYPE_ACTION,
4279                                                (void *)items->type,
4280                                                "udp header not found");
4281                        if (!vxlan_gpe->proto)
4282                                return rte_flow_error_set(error, EINVAL,
4283                                                RTE_FLOW_ERROR_TYPE_ACTION,
4284                                                (void *)items->type,
4285                                                "next protocol not found");
4286                        if (!udp->dst_port)
4287                                udp->dst_port =
4288                                        RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4289                        if (!vxlan_gpe->vx_flags)
4290                                vxlan_gpe->vx_flags =
4291                                                MLX5_ENCAP_VXLAN_GPE_FLAGS;
4292                        break;
4293                case RTE_FLOW_ITEM_TYPE_GRE:
4294                case RTE_FLOW_ITEM_TYPE_NVGRE:
4295                        gre = (struct rte_gre_hdr *)&buf[temp_size];
4296                        if (!gre->proto)
4297                                return rte_flow_error_set(error, EINVAL,
4298                                                RTE_FLOW_ERROR_TYPE_ACTION,
4299                                                (void *)items->type,
4300                                                "next protocol not found");
4301                        if (!ipv4 && !ipv6)
4302                                return rte_flow_error_set(error, EINVAL,
4303                                                RTE_FLOW_ERROR_TYPE_ACTION,
4304                                                (void *)items->type,
4305                                                "ip header not found");
4306                        if (ipv4 && !ipv4->next_proto_id)
4307                                ipv4->next_proto_id = IPPROTO_GRE;
4308                        else if (ipv6 && !ipv6->proto)
4309                                ipv6->proto = IPPROTO_GRE;
4310                        break;
4311                case RTE_FLOW_ITEM_TYPE_VOID:
4312                        break;
4313                default:
4314                        return rte_flow_error_set(error, EINVAL,
4315                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4316                                                  (void *)items->type,
4317                                                  "unsupported item type");
4318                        break;
4319                }
4320                temp_size += len;
4321        }
4322        *size = temp_size;
4323        return 0;
4324}
4325
4326static int
4327flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4328{
4329        struct rte_ether_hdr *eth = NULL;
4330        struct rte_vlan_hdr *vlan = NULL;
4331        struct rte_ipv6_hdr *ipv6 = NULL;
4332        struct rte_udp_hdr *udp = NULL;
4333        char *next_hdr;
4334        uint16_t proto;
4335
4336        eth = (struct rte_ether_hdr *)data;
4337        next_hdr = (char *)(eth + 1);
4338        proto = RTE_BE16(eth->ether_type);
4339
4340        /* VLAN skipping */
4341        while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4342                vlan = (struct rte_vlan_hdr *)next_hdr;
4343                proto = RTE_BE16(vlan->eth_proto);
4344                next_hdr += sizeof(struct rte_vlan_hdr);
4345        }
4346
4347        /* HW calculates IPv4 csum. no need to proceed */
4348        if (proto == RTE_ETHER_TYPE_IPV4)
4349                return 0;
4350
4351        /* non IPv4/IPv6 header. not supported */
4352        if (proto != RTE_ETHER_TYPE_IPV6) {
4353                return rte_flow_error_set(error, ENOTSUP,
4354                                          RTE_FLOW_ERROR_TYPE_ACTION,
4355                                          NULL, "Cannot offload non IPv4/IPv6");
4356        }
4357
4358        ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4359
4360        /* ignore non UDP */
4361        if (ipv6->proto != IPPROTO_UDP)
4362                return 0;
4363
4364        udp = (struct rte_udp_hdr *)(ipv6 + 1);
4365        udp->dgram_cksum = 0;
4366
4367        return 0;
4368}
4369
4370/**
4371 * Convert L2 encap action to DV specification.
4372 *
4373 * @param[in] dev
4374 *   Pointer to rte_eth_dev structure.
4375 * @param[in] action
4376 *   Pointer to action structure.
4377 * @param[in, out] dev_flow
4378 *   Pointer to the mlx5_flow.
4379 * @param[in] transfer
4380 *   Mark if the flow is E-Switch flow.
4381 * @param[out] error
4382 *   Pointer to the error structure.
4383 *
4384 * @return
4385 *   0 on success, a negative errno value otherwise and rte_errno is set.
4386 */
4387static int
4388flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4389                               const struct rte_flow_action *action,
4390                               struct mlx5_flow *dev_flow,
4391                               uint8_t transfer,
4392                               struct rte_flow_error *error)
4393{
4394        const struct rte_flow_item *encap_data;
4395        const struct rte_flow_action_raw_encap *raw_encap_data;
4396        struct mlx5_flow_dv_encap_decap_resource res = {
4397                .reformat_type =
4398                        MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4399                .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4400                                      MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4401        };
4402
4403        if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4404                raw_encap_data =
4405                        (const struct rte_flow_action_raw_encap *)action->conf;
4406                res.size = raw_encap_data->size;
4407                memcpy(res.buf, raw_encap_data->data, res.size);
4408        } else {
4409                if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4410                        encap_data =
4411                                ((const struct rte_flow_action_vxlan_encap *)
4412                                                action->conf)->definition;
4413                else
4414                        encap_data =
4415                                ((const struct rte_flow_action_nvgre_encap *)
4416                                                action->conf)->definition;
4417                if (flow_dv_convert_encap_data(encap_data, res.buf,
4418                                               &res.size, error))
4419                        return -rte_errno;
4420        }
4421        if (flow_dv_zero_encap_udp_csum(res.buf, error))
4422                return -rte_errno;
4423        if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4424                return rte_flow_error_set(error, EINVAL,
4425                                          RTE_FLOW_ERROR_TYPE_ACTION,
4426                                          NULL, "can't create L2 encap action");
4427        return 0;
4428}
4429
4430/**
4431 * Convert L2 decap action to DV specification.
4432 *
4433 * @param[in] dev
4434 *   Pointer to rte_eth_dev structure.
4435 * @param[in, out] dev_flow
4436 *   Pointer to the mlx5_flow.
4437 * @param[in] transfer
4438 *   Mark if the flow is E-Switch flow.
4439 * @param[out] error
4440 *   Pointer to the error structure.
4441 *
4442 * @return
4443 *   0 on success, a negative errno value otherwise and rte_errno is set.
4444 */
4445static int
4446flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4447                               struct mlx5_flow *dev_flow,
4448                               uint8_t transfer,
4449                               struct rte_flow_error *error)
4450{
4451        struct mlx5_flow_dv_encap_decap_resource res = {
4452                .size = 0,
4453                .reformat_type =
4454                        MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4455                .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4456                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4457        };
4458
4459        if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4460                return rte_flow_error_set(error, EINVAL,
4461                                          RTE_FLOW_ERROR_TYPE_ACTION,
4462                                          NULL, "can't create L2 decap action");
4463        return 0;
4464}
4465
4466/**
4467 * Convert raw decap/encap (L3 tunnel) action to DV specification.
4468 *
4469 * @param[in] dev
4470 *   Pointer to rte_eth_dev structure.
4471 * @param[in] action
4472 *   Pointer to action structure.
4473 * @param[in, out] dev_flow
4474 *   Pointer to the mlx5_flow.
4475 * @param[in] attr
4476 *   Pointer to the flow attributes.
4477 * @param[out] error
4478 *   Pointer to the error structure.
4479 *
4480 * @return
4481 *   0 on success, a negative errno value otherwise and rte_errno is set.
4482 */
4483static int
4484flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4485                                const struct rte_flow_action *action,
4486                                struct mlx5_flow *dev_flow,
4487                                const struct rte_flow_attr *attr,
4488                                struct rte_flow_error *error)
4489{
4490        const struct rte_flow_action_raw_encap *encap_data;
4491        struct mlx5_flow_dv_encap_decap_resource res;
4492
4493        memset(&res, 0, sizeof(res));
4494        encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4495        res.size = encap_data->size;
4496        memcpy(res.buf, encap_data->data, res.size);
4497        res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4498                MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4499                MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4500        if (attr->transfer)
4501                res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4502        else
4503                res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4504                                             MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4505        if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4506                return rte_flow_error_set(error, EINVAL,
4507                                          RTE_FLOW_ERROR_TYPE_ACTION,
4508                                          NULL, "can't create encap action");
4509        return 0;
4510}
4511
4512/**
4513 * Create action push VLAN.
4514 *
4515 * @param[in] dev
4516 *   Pointer to rte_eth_dev structure.
4517 * @param[in] attr
4518 *   Pointer to the flow attributes.
4519 * @param[in] vlan
4520 *   Pointer to the vlan to push to the Ethernet header.
4521 * @param[in, out] dev_flow
4522 *   Pointer to the mlx5_flow.
4523 * @param[out] error
4524 *   Pointer to the error structure.
4525 *
4526 * @return
4527 *   0 on success, a negative errno value otherwise and rte_errno is set.
4528 */
4529static int
4530flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4531                                const struct rte_flow_attr *attr,
4532                                const struct rte_vlan_hdr *vlan,
4533                                struct mlx5_flow *dev_flow,
4534                                struct rte_flow_error *error)
4535{
4536        struct mlx5_flow_dv_push_vlan_action_resource res;
4537
4538        memset(&res, 0, sizeof(res));
4539        res.vlan_tag =
4540                rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4541                                 vlan->vlan_tci);
4542        if (attr->transfer)
4543                res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4544        else
4545                res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4546                                             MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4547        return flow_dv_push_vlan_action_resource_register
4548                                            (dev, &res, dev_flow, error);
4549}
4550
4551/**
4552 * Validate the modify-header actions.
4553 *
4554 * @param[in] action_flags
4555 *   Holds the actions detected until now.
4556 * @param[in] action
4557 *   Pointer to the modify action.
4558 * @param[out] error
4559 *   Pointer to error structure.
4560 *
4561 * @return
4562 *   0 on success, a negative errno value otherwise and rte_errno is set.
4563 */
4564static int
4565flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4566                                   const struct rte_flow_action *action,
4567                                   struct rte_flow_error *error)
4568{
4569        if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4570                return rte_flow_error_set(error, EINVAL,
4571                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4572                                          NULL, "action configuration not set");
4573        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4574                return rte_flow_error_set(error, EINVAL,
4575                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4576                                          "can't have encap action before"
4577                                          " modify action");
4578        return 0;
4579}
4580
4581/**
4582 * Validate the modify-header MAC address actions.
4583 *
4584 * @param[in] action_flags
4585 *   Holds the actions detected until now.
4586 * @param[in] action
4587 *   Pointer to the modify action.
4588 * @param[in] item_flags
4589 *   Holds the items detected.
4590 * @param[out] error
4591 *   Pointer to error structure.
4592 *
4593 * @return
4594 *   0 on success, a negative errno value otherwise and rte_errno is set.
4595 */
4596static int
4597flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4598                                   const struct rte_flow_action *action,
4599                                   const uint64_t item_flags,
4600                                   struct rte_flow_error *error)
4601{
4602        int ret = 0;
4603
4604        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4605        if (!ret) {
4606                if (!(item_flags & MLX5_FLOW_LAYER_L2))
4607                        return rte_flow_error_set(error, EINVAL,
4608                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4609                                                  NULL,
4610                                                  "no L2 item in pattern");
4611        }
4612        return ret;
4613}
4614
4615/**
4616 * Validate the modify-header IPv4 address actions.
4617 *
4618 * @param[in] action_flags
4619 *   Holds the actions detected until now.
4620 * @param[in] action
4621 *   Pointer to the modify action.
4622 * @param[in] item_flags
4623 *   Holds the items detected.
4624 * @param[out] error
4625 *   Pointer to error structure.
4626 *
4627 * @return
4628 *   0 on success, a negative errno value otherwise and rte_errno is set.
4629 */
4630static int
4631flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4632                                    const struct rte_flow_action *action,
4633                                    const uint64_t item_flags,
4634                                    struct rte_flow_error *error)
4635{
4636        int ret = 0;
4637        uint64_t layer;
4638
4639        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4640        if (!ret) {
4641                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4642                                 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4643                                 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4644                if (!(item_flags & layer))
4645                        return rte_flow_error_set(error, EINVAL,
4646                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4647                                                  NULL,
4648                                                  "no ipv4 item in pattern");
4649        }
4650        return ret;
4651}
4652
4653/**
4654 * Validate the modify-header IPv6 address actions.
4655 *
4656 * @param[in] action_flags
4657 *   Holds the actions detected until now.
4658 * @param[in] action
4659 *   Pointer to the modify action.
4660 * @param[in] item_flags
4661 *   Holds the items detected.
4662 * @param[out] error
4663 *   Pointer to error structure.
4664 *
4665 * @return
4666 *   0 on success, a negative errno value otherwise and rte_errno is set.
4667 */
4668static int
4669flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4670                                    const struct rte_flow_action *action,
4671                                    const uint64_t item_flags,
4672                                    struct rte_flow_error *error)
4673{
4674        int ret = 0;
4675        uint64_t layer;
4676
4677        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4678        if (!ret) {
4679                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4680                                 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4681                                 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4682                if (!(item_flags & layer))
4683                        return rte_flow_error_set(error, EINVAL,
4684                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4685                                                  NULL,
4686                                                  "no ipv6 item in pattern");
4687        }
4688        return ret;
4689}
4690
4691/**
4692 * Validate the modify-header TP actions.
4693 *
4694 * @param[in] action_flags
4695 *   Holds the actions detected until now.
4696 * @param[in] action
4697 *   Pointer to the modify action.
4698 * @param[in] item_flags
4699 *   Holds the items detected.
4700 * @param[out] error
4701 *   Pointer to error structure.
4702 *
4703 * @return
4704 *   0 on success, a negative errno value otherwise and rte_errno is set.
4705 */
4706static int
4707flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4708                                  const struct rte_flow_action *action,
4709                                  const uint64_t item_flags,
4710                                  struct rte_flow_error *error)
4711{
4712        int ret = 0;
4713        uint64_t layer;
4714
4715        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4716        if (!ret) {
4717                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4718                                 MLX5_FLOW_LAYER_INNER_L4 :
4719                                 MLX5_FLOW_LAYER_OUTER_L4;
4720                if (!(item_flags & layer))
4721                        return rte_flow_error_set(error, EINVAL,
4722                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4723                                                  NULL, "no transport layer "
4724                                                  "in pattern");
4725        }
4726        return ret;
4727}
4728
4729/**
4730 * Validate the modify-header actions of increment/decrement
4731 * TCP Sequence-number.
4732 *
4733 * @param[in] action_flags
4734 *   Holds the actions detected until now.
4735 * @param[in] action
4736 *   Pointer to the modify action.
4737 * @param[in] item_flags
4738 *   Holds the items detected.
4739 * @param[out] error
4740 *   Pointer to error structure.
4741 *
4742 * @return
4743 *   0 on success, a negative errno value otherwise and rte_errno is set.
4744 */
4745static int
4746flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4747                                       const struct rte_flow_action *action,
4748                                       const uint64_t item_flags,
4749                                       struct rte_flow_error *error)
4750{
4751        int ret = 0;
4752        uint64_t layer;
4753
4754        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4755        if (!ret) {
4756                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4757                                 MLX5_FLOW_LAYER_INNER_L4_TCP :
4758                                 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4759                if (!(item_flags & layer))
4760                        return rte_flow_error_set(error, EINVAL,
4761                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4762                                                  NULL, "no TCP item in"
4763                                                  " pattern");
4764                if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4765                        (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4766                    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4767                        (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4768                        return rte_flow_error_set(error, EINVAL,
4769                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4770                                                  NULL,
4771                                                  "cannot decrease and increase"
4772                                                  " TCP sequence number"
4773                                                  " at the same time");
4774        }
4775        return ret;
4776}
4777
4778/**
4779 * Validate the modify-header actions of increment/decrement
4780 * TCP Acknowledgment number.
4781 *
4782 * @param[in] action_flags
4783 *   Holds the actions detected until now.
4784 * @param[in] action
4785 *   Pointer to the modify action.
4786 * @param[in] item_flags
4787 *   Holds the items detected.
4788 * @param[out] error
4789 *   Pointer to error structure.
4790 *
4791 * @return
4792 *   0 on success, a negative errno value otherwise and rte_errno is set.
4793 */
4794static int
4795flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4796                                       const struct rte_flow_action *action,
4797                                       const uint64_t item_flags,
4798                                       struct rte_flow_error *error)
4799{
4800        int ret = 0;
4801        uint64_t layer;
4802
4803        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4804        if (!ret) {
4805                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4806                                 MLX5_FLOW_LAYER_INNER_L4_TCP :
4807                                 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4808                if (!(item_flags & layer))
4809                        return rte_flow_error_set(error, EINVAL,
4810                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4811                                                  NULL, "no TCP item in"
4812                                                  " pattern");
4813                if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4814                        (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4815                    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4816                        (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4817                        return rte_flow_error_set(error, EINVAL,
4818                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4819                                                  NULL,
4820                                                  "cannot decrease and increase"
4821                                                  " TCP acknowledgment number"
4822                                                  " at the same time");
4823        }
4824        return ret;
4825}
4826
4827/**
4828 * Validate the modify-header TTL actions.
4829 *
4830 * @param[in] action_flags
4831 *   Holds the actions detected until now.
4832 * @param[in] action
4833 *   Pointer to the modify action.
4834 * @param[in] item_flags
4835 *   Holds the items detected.
4836 * @param[out] error
4837 *   Pointer to error structure.
4838 *
4839 * @return
4840 *   0 on success, a negative errno value otherwise and rte_errno is set.
4841 */
4842static int
4843flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4844                                   const struct rte_flow_action *action,
4845                                   const uint64_t item_flags,
4846                                   struct rte_flow_error *error)
4847{
4848        int ret = 0;
4849        uint64_t layer;
4850
4851        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4852        if (!ret) {
4853                layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4854                                 MLX5_FLOW_LAYER_INNER_L3 :
4855                                 MLX5_FLOW_LAYER_OUTER_L3;
4856                if (!(item_flags & layer))
4857                        return rte_flow_error_set(error, EINVAL,
4858                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4859                                                  NULL,
4860                                                  "no IP protocol in pattern");
4861        }
4862        return ret;
4863}
4864
4865/**
4866 * Validate the generic modify field actions.
4867 * @param[in] dev
4868 *   Pointer to the rte_eth_dev structure.
4869 * @param[in] action_flags
4870 *   Holds the actions detected until now.
4871 * @param[in] action
4872 *   Pointer to the modify action.
4873 * @param[in] attr
4874 *   Pointer to the flow attributes.
4875 * @param[out] error
4876 *   Pointer to error structure.
4877 *
4878 * @return
4879 *   Number of header fields to modify (0 or more) on success,
4880 *   a negative errno value otherwise and rte_errno is set.
4881 */
4882static int
4883flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4884                                   const uint64_t action_flags,
4885                                   const struct rte_flow_action *action,
4886                                   const struct rte_flow_attr *attr,
4887                                   struct rte_flow_error *error)
4888{
4889        int ret = 0;
4890        struct mlx5_priv *priv = dev->data->dev_private;
4891        struct mlx5_sh_config *config = &priv->sh->config;
4892        struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
4893        const struct rte_flow_action_modify_field *action_modify_field =
4894                action->conf;
4895        uint32_t dst_width = mlx5_flow_item_field_width(dev,
4896                                action_modify_field->dst.field,
4897                                -1, attr, error);
4898        uint32_t src_width = mlx5_flow_item_field_width(dev,
4899                                action_modify_field->src.field,
4900                                dst_width, attr, error);
4901
4902        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4903        if (ret)
4904                return ret;
4905
4906        if (action_modify_field->width == 0)
4907                return rte_flow_error_set(error, EINVAL,
4908                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4909                                "no bits are requested to be modified");
4910        else if (action_modify_field->width > dst_width ||
4911                 action_modify_field->width > src_width)
4912                return rte_flow_error_set(error, EINVAL,
4913                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4914                                "cannot modify more bits than"
4915                                " the width of a field");
4916        if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4917            action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4918                if ((action_modify_field->dst.offset +
4919                     action_modify_field->width > dst_width) ||
4920                    (action_modify_field->dst.offset % 32))
4921                        return rte_flow_error_set(error, EINVAL,
4922                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4923                                        "destination offset is too big"
4924                                        " or not aligned to 4 bytes");
4925                if (action_modify_field->dst.level &&
4926                    action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4927                        return rte_flow_error_set(error, ENOTSUP,
4928                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4929                                        "inner header fields modification"
4930                                        " is not supported");
4931        }
4932        if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4933            action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4934                if (!attr->transfer && !attr->group)
4935                        return rte_flow_error_set(error, ENOTSUP,
4936                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4937                                        "modify field action is not"
4938                                        " supported for group 0");
4939                if ((action_modify_field->src.offset +
4940                     action_modify_field->width > src_width) ||
4941                    (action_modify_field->src.offset % 32))
4942                        return rte_flow_error_set(error, EINVAL,
4943                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4944                                        "source offset is too big"
4945                                        " or not aligned to 4 bytes");
4946                if (action_modify_field->src.level &&
4947                    action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4948                        return rte_flow_error_set(error, ENOTSUP,
4949                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4950                                        "inner header fields modification"
4951                                        " is not supported");
4952        }
4953        if ((action_modify_field->dst.field ==
4954             action_modify_field->src.field) &&
4955            (action_modify_field->dst.level ==
4956             action_modify_field->src.level))
4957                return rte_flow_error_set(error, EINVAL,
4958                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4959                                "source and destination fields"
4960                                " cannot be the same");
4961        if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4962            action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4963            action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4964                return rte_flow_error_set(error, EINVAL,
4965                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4966                                "mark, immediate value or a pointer to it"
4967                                " cannot be used as a destination");
4968        if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4969            action_modify_field->src.field == RTE_FLOW_FIELD_START)
4970                return rte_flow_error_set(error, ENOTSUP,
4971                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4972                                "modifications of an arbitrary"
4973                                " place in a packet is not supported");
4974        if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4975            action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4976                return rte_flow_error_set(error, ENOTSUP,
4977                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4978                                "modifications of the 802.1Q Tag"
4979                                " Identifier is not supported");
4980        if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4981            action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4982                return rte_flow_error_set(error, ENOTSUP,
4983                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4984                                "modifications of the VXLAN Network"
4985                                " Identifier is not supported");
4986        if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4987            action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4988                return rte_flow_error_set(error, ENOTSUP,
4989                                RTE_FLOW_ERROR_TYPE_ACTION, action,
4990                                "modifications of the GENEVE Network"
4991                                " Identifier is not supported");
4992        if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4993            action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4994                if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4995                    !mlx5_flow_ext_mreg_supported(dev))
4996                        return rte_flow_error_set(error, ENOTSUP,
4997                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
4998                                        "cannot modify mark in legacy mode"
4999                                        " or without extensive registers");
5000        if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
5001            action_modify_field->src.field == RTE_FLOW_FIELD_META) {
5002                if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5003                    !mlx5_flow_ext_mreg_supported(dev))
5004                        return rte_flow_error_set(error, ENOTSUP,
5005                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
5006                                        "cannot modify meta without"
5007                                        " extensive registers support");
5008                ret = flow_dv_get_metadata_reg(dev, attr, error);
5009                if (ret < 0 || ret == REG_NON)
5010                        return rte_flow_error_set(error, ENOTSUP,
5011                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
5012                                        "cannot modify meta without"
5013                                        " extensive registers available");
5014        }
5015        if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
5016                return rte_flow_error_set(error, ENOTSUP,
5017                                RTE_FLOW_ERROR_TYPE_ACTION, action,
5018                                "add and sub operations"
5019                                " are not supported");
5020        if (action_modify_field->dst.field == RTE_FLOW_FIELD_IPV4_ECN ||
5021            action_modify_field->src.field == RTE_FLOW_FIELD_IPV4_ECN ||
5022            action_modify_field->dst.field == RTE_FLOW_FIELD_IPV6_ECN ||
5023            action_modify_field->src.field == RTE_FLOW_FIELD_IPV6_ECN)
5024                if (!hca_attr->modify_outer_ip_ecn &&
5025                    !attr->transfer && !attr->group)
5026                        return rte_flow_error_set(error, ENOTSUP,
5027                                RTE_FLOW_ERROR_TYPE_ACTION, action,
5028                                "modifications of the ECN for current firmware is not supported");
5029        return (action_modify_field->width / 32) +
5030               !!(action_modify_field->width % 32);
5031}
5032
5033/**
5034 * Validate jump action.
5035 *
5036 * @param[in] action
5037 *   Pointer to the jump action.
5038 * @param[in] action_flags
5039 *   Holds the actions detected until now.
5040 * @param[in] attributes
5041 *   Pointer to flow attributes
5042 * @param[in] external
5043 *   Action belongs to flow rule created by request external to PMD.
5044 * @param[out] error
5045 *   Pointer to error structure.
5046 *
5047 * @return
5048 *   0 on success, a negative errno value otherwise and rte_errno is set.
5049 */
5050static int
5051flow_dv_validate_action_jump(struct rte_eth_dev *dev,
5052                             const struct mlx5_flow_tunnel *tunnel,
5053                             const struct rte_flow_action *action,
5054                             uint64_t action_flags,
5055                             const struct rte_flow_attr *attributes,
5056                             bool external, struct rte_flow_error *error)
5057{
5058        uint32_t target_group, table = 0;
5059        int ret = 0;
5060        struct flow_grp_info grp_info = {
5061                .external = !!external,
5062                .transfer = !!attributes->transfer,
5063                .fdb_def_rule = 1,
5064                .std_tbl_fix = 0
5065        };
5066        if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5067                            MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5068                return rte_flow_error_set(error, EINVAL,
5069                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5070                                          "can't have 2 fate actions in"
5071                                          " same flow");
5072        if (!action->conf)
5073                return rte_flow_error_set(error, EINVAL,
5074                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5075                                          NULL, "action configuration not set");
5076        target_group =
5077                ((const struct rte_flow_action_jump *)action->conf)->group;
5078        ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
5079                                       &grp_info, error);
5080        if (ret)
5081                return ret;
5082        if (attributes->group == target_group &&
5083            !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5084                              MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5085                return rte_flow_error_set(error, EINVAL,
5086                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087                                          "target group must be other than"
5088                                          " the current flow group");
5089        if (table == 0)
5090                return rte_flow_error_set(error, EINVAL,
5091                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5092                                          NULL, "root table shouldn't be destination");
5093        return 0;
5094}
5095
5096/*
5097 * Validate action PORT_ID / REPRESENTED_PORT.
5098 *
5099 * @param[in] dev
5100 *   Pointer to rte_eth_dev structure.
5101 * @param[in] action_flags
5102 *   Bit-fields that holds the actions detected until now.
5103 * @param[in] action
5104 *   PORT_ID / REPRESENTED_PORT action structure.
5105 * @param[in] attr
5106 *   Attributes of flow that includes this action.
5107 * @param[out] error
5108 *   Pointer to error structure.
5109 *
5110 * @return
5111 *   0 on success, a negative errno value otherwise and rte_errno is set.
5112 */
5113static int
5114flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5115                                uint64_t action_flags,
5116                                const struct rte_flow_action *action,
5117                                const struct rte_flow_attr *attr,
5118                                struct rte_flow_error *error)
5119{
5120        const struct rte_flow_action_port_id *port_id;
5121        const struct rte_flow_action_ethdev *ethdev;
5122        struct mlx5_priv *act_priv;
5123        struct mlx5_priv *dev_priv;
5124        uint16_t port;
5125
5126        if (!attr->transfer)
5127                return rte_flow_error_set(error, ENOTSUP,
5128                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5129                                          NULL,
5130                                          "port action is valid in transfer"
5131                                          " mode only");
5132        if (!action || !action->conf)
5133                return rte_flow_error_set(error, ENOTSUP,
5134                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5135                                          NULL,
5136                                          "port action parameters must be"
5137                                          " specified");
5138        if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5139                            MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5140                return rte_flow_error_set(error, EINVAL,
5141                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5142                                          "can have only one fate actions in"
5143                                          " a flow");
5144        dev_priv = mlx5_dev_to_eswitch_info(dev);
5145        if (!dev_priv)
5146                return rte_flow_error_set(error, rte_errno,
5147                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5148                                          NULL,
5149                                          "failed to obtain E-Switch info");
5150        switch (action->type) {
5151        case RTE_FLOW_ACTION_TYPE_PORT_ID:
5152                port_id = action->conf;
5153                port = port_id->original ? dev->data->port_id : port_id->id;
5154                break;
5155        case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5156                ethdev = action->conf;
5157                port = ethdev->port_id;
5158                break;
5159        default:
5160                MLX5_ASSERT(false);
5161                return rte_flow_error_set
5162                                (error, EINVAL,
5163                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
5164                                 "unknown E-Switch action");
5165        }
5166        act_priv = mlx5_port_to_eswitch_info(port, false);
5167        if (!act_priv)
5168                return rte_flow_error_set
5169                                (error, rte_errno,
5170                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5171                                 "failed to obtain E-Switch port id for port");
5172        if (act_priv->domain_id != dev_priv->domain_id)
5173                return rte_flow_error_set
5174                                (error, EINVAL,
5175                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5176                                 "port does not belong to"
5177                                 " E-Switch being configured");
5178        return 0;
5179}
5180
5181/**
5182 * Get the maximum number of modify header actions.
5183 *
5184 * @param dev
5185 *   Pointer to rte_eth_dev structure.
5186 * @param root
5187 *   Whether action is on root table.
5188 *
5189 * @return
5190 *   Max number of modify header actions device can support.
5191 */
5192static inline unsigned int
5193flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5194                              bool root)
5195{
5196        /*
5197         * There's no way to directly query the max capacity from FW.
5198         * The maximal value on root table should be assumed to be supported.
5199         */
5200        if (!root)
5201                return MLX5_MAX_MODIFY_NUM;
5202        else
5203                return MLX5_ROOT_TBL_MODIFY_NUM;
5204}
5205
5206/**
5207 * Validate the meter action.
5208 *
5209 * @param[in] dev
5210 *   Pointer to rte_eth_dev structure.
5211 * @param[in] action_flags
5212 *   Bit-fields that holds the actions detected until now.
5213 * @param[in] item_flags
5214 *   Holds the items detected.
5215 * @param[in] action
5216 *   Pointer to the meter action.
5217 * @param[in] attr
5218 *   Attributes of flow that includes this action.
5219 * @param[in] port_id_item
5220 *   Pointer to item indicating port id.
5221 * @param[out] error
5222 *   Pointer to error structure.
5223 *
5224 * @return
5225 *   0 on success, a negative errno value otherwise and rte_errno is set.
5226 */
5227static int
5228mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5229                                uint64_t action_flags, uint64_t item_flags,
5230                                const struct rte_flow_action *action,
5231                                const struct rte_flow_attr *attr,
5232                                const struct rte_flow_item *port_id_item,
5233                                bool *def_policy,
5234                                struct rte_flow_error *error)
5235{
5236        struct mlx5_priv *priv = dev->data->dev_private;
5237        const struct rte_flow_action_meter *am = action->conf;
5238        struct mlx5_flow_meter_info *fm;
5239        struct mlx5_flow_meter_policy *mtr_policy;
5240        struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5241
5242        if (!am)
5243                return rte_flow_error_set(error, EINVAL,
5244                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5245                                          "meter action conf is NULL");
5246
5247        if (action_flags & MLX5_FLOW_ACTION_METER)
5248                return rte_flow_error_set(error, ENOTSUP,
5249                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5250                                          "meter chaining not support");
5251        if (action_flags & MLX5_FLOW_ACTION_JUMP)
5252                return rte_flow_error_set(error, ENOTSUP,
5253                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5254                                          "meter with jump not support");
5255        if (!priv->mtr_en)
5256                return rte_flow_error_set(error, ENOTSUP,
5257                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5258                                          NULL,
5259                                          "meter action not supported");
5260        fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5261        if (!fm)
5262                return rte_flow_error_set(error, EINVAL,
5263                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5264                                          "Meter not found");
5265        /* aso meter can always be shared by different domains */
5266        if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5267            !(fm->transfer == attr->transfer ||
5268              (!fm->ingress && !attr->ingress && attr->egress) ||
5269              (!fm->egress && !attr->egress && attr->ingress)))
5270                return rte_flow_error_set(error, EINVAL,
5271                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5272                        "Flow attributes domain are either invalid "
5273                        "or have a domain conflict with current "
5274                        "meter attributes");
5275        if (fm->def_policy) {
5276                if (!((attr->transfer &&
5277                        mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5278                        (attr->egress &&
5279                        mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5280                        (attr->ingress &&
5281                        mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5282                        return rte_flow_error_set(error, EINVAL,
5283                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5284                                          "Flow attributes domain "
5285                                          "have a conflict with current "
5286                                          "meter domain attributes");
5287                *def_policy = true;
5288        } else {
5289                mtr_policy = mlx5_flow_meter_policy_find(dev,
5290                                                fm->policy_id, NULL);
5291                if (!mtr_policy)
5292                        return rte_flow_error_set(error, EINVAL,
5293                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5294                                          "Invalid policy id for meter ");
5295                if (!((attr->transfer && mtr_policy->transfer) ||
5296                        (attr->egress && mtr_policy->egress) ||
5297                        (attr->ingress && mtr_policy->ingress)))
5298                        return rte_flow_error_set(error, EINVAL,
5299                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5300                                          "Flow attributes domain "
5301                                          "have a conflict with current "
5302                                          "meter domain attributes");
5303                if (attr->transfer && mtr_policy->dev) {
5304                        /**
5305                         * When policy has fate action of port_id,
5306                         * the flow should have the same src port as policy.
5307                         */
5308                        struct mlx5_priv *policy_port_priv =
5309                                        mtr_policy->dev->data->dev_private;
5310                        uint16_t flow_src_port = priv->representor_id;
5311
5312                        if (port_id_item) {
5313                                if (mlx5_flow_get_item_vport_id(dev, port_id_item,
5314                                                                &flow_src_port, error))
5315                                        return -rte_errno;
5316                        }
5317                        if (flow_src_port != policy_port_priv->representor_id)
5318                                return rte_flow_error_set(error,
5319                                                rte_errno,
5320                                                RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5321                                                NULL,
5322                                                "Flow and meter policy "
5323                                                "have different src port.");
5324                } else if (mtr_policy->is_rss) {
5325                        struct mlx5_flow_meter_policy *fp;
5326                        struct mlx5_meter_policy_action_container *acg;
5327                        struct mlx5_meter_policy_action_container *acy;
5328                        const struct rte_flow_action *rss_act;
5329                        int ret;
5330
5331                        fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5332                                                                mtr_policy);
5333                        if (fp == NULL)
5334                                return rte_flow_error_set(error, EINVAL,
5335                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5336                                                  "Unable to get the final "
5337                                                  "policy in the hierarchy");
5338                        acg = &fp->act_cnt[RTE_COLOR_GREEN];
5339                        acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5340                        MLX5_ASSERT(acg->fate_action ==
5341                                    MLX5_FLOW_FATE_SHARED_RSS ||
5342                                    acy->fate_action ==
5343                                    MLX5_FLOW_FATE_SHARED_RSS);
5344                        if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5345                                rss_act = acg->rss;
5346                        else
5347                                rss_act = acy->rss;
5348                        ret = mlx5_flow_validate_action_rss(rss_act,
5349                                        action_flags, dev, attr,
5350                                        item_flags, error);
5351                        if (ret)
5352                                return ret;
5353                }
5354                *def_policy = false;
5355        }
5356        return 0;
5357}
5358
5359/**
5360 * Validate the age action.
5361 *
5362 * @param[in] action_flags
5363 *   Holds the actions detected until now.
5364 * @param[in] action
5365 *   Pointer to the age action.
5366 * @param[in] dev
5367 *   Pointer to the Ethernet device structure.
5368 * @param[out] error
5369 *   Pointer to error structure.
5370 *
5371 * @return
5372 *   0 on success, a negative errno value otherwise and rte_errno is set.
5373 */
5374static int
5375flow_dv_validate_action_age(uint64_t action_flags,
5376                            const struct rte_flow_action *action,
5377                            struct rte_eth_dev *dev,
5378                            struct rte_flow_error *error)
5379{
5380        struct mlx5_priv *priv = dev->data->dev_private;
5381        const struct rte_flow_action_age *age = action->conf;
5382
5383        if (!priv->sh->cdev->config.devx ||
5384            (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
5385                return rte_flow_error_set(error, ENOTSUP,
5386                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5387                                          NULL,
5388                                          "age action not supported");
5389        if (!(action->conf))
5390                return rte_flow_error_set(error, EINVAL,
5391                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5392                                          "configuration cannot be null");
5393        if (!(age->timeout))
5394                return rte_flow_error_set(error, EINVAL,
5395                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5396                                          "invalid timeout value 0");
5397        if (action_flags & MLX5_FLOW_ACTION_AGE)
5398                return rte_flow_error_set(error, EINVAL,
5399                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5400                                          "duplicate age actions set");
5401        return 0;
5402}
5403
5404/**
5405 * Validate the modify-header IPv4 DSCP actions.
5406 *
5407 * @param[in] action_flags
5408 *   Holds the actions detected until now.
5409 * @param[in] action
5410 *   Pointer to the modify action.
5411 * @param[in] item_flags
5412 *   Holds the items detected.
5413 * @param[out] error
5414 *   Pointer to error structure.
5415 *
5416 * @return
5417 *   0 on success, a negative errno value otherwise and rte_errno is set.
5418 */
5419static int
5420flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5421                                         const struct rte_flow_action *action,
5422                                         const uint64_t item_flags,
5423                                         struct rte_flow_error *error)
5424{
5425        int ret = 0;
5426
5427        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5428        if (!ret) {
5429                if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5430                        return rte_flow_error_set(error, EINVAL,
5431                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5432                                                  NULL,
5433                                                  "no ipv4 item in pattern");
5434        }
5435        return ret;
5436}
5437
5438/**
5439 * Validate the modify-header IPv6 DSCP actions.
5440 *
5441 * @param[in] action_flags
5442 *   Holds the actions detected until now.
5443 * @param[in] action
5444 *   Pointer to the modify action.
5445 * @param[in] item_flags
5446 *   Holds the items detected.
5447 * @param[out] error
5448 *   Pointer to error structure.
5449 *
5450 * @return
5451 *   0 on success, a negative errno value otherwise and rte_errno is set.
5452 */
5453static int
5454flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5455                                         const struct rte_flow_action *action,
5456                                         const uint64_t item_flags,
5457                                         struct rte_flow_error *error)
5458{
5459        int ret = 0;
5460
5461        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5462        if (!ret) {
5463                if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5464                        return rte_flow_error_set(error, EINVAL,
5465                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5466                                                  NULL,
5467                                                  "no ipv6 item in pattern");
5468        }
5469        return ret;
5470}
5471
5472int
5473flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5474                        struct mlx5_list_entry *entry, void *cb_ctx)
5475{
5476        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5477        struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5478        struct mlx5_flow_dv_modify_hdr_resource *resource =
5479                                  container_of(entry, typeof(*resource), entry);
5480        uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5481
5482        key_len += ref->actions_num * sizeof(ref->actions[0]);
5483        return ref->actions_num != resource->actions_num ||
5484               memcmp(&ref->ft_type, &resource->ft_type, key_len);
5485}
5486
5487static struct mlx5_indexed_pool *
5488flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5489{
5490        struct mlx5_indexed_pool *ipool = __atomic_load_n
5491                                     (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5492
5493        if (!ipool) {
5494                struct mlx5_indexed_pool *expected = NULL;
5495                struct mlx5_indexed_pool_config cfg =
5496                    (struct mlx5_indexed_pool_config) {
5497                       .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5498                                                                   (index + 1) *
5499                                           sizeof(struct mlx5_modification_cmd),
5500                       .trunk_size = 64,
5501                       .grow_trunk = 3,
5502                       .grow_shift = 2,
5503                       .need_lock = 1,
5504                       .release_mem_en = !!sh->config.reclaim_mode,
5505                       .per_core_cache =
5506                                       sh->config.reclaim_mode ? 0 : (1 << 16),
5507                       .malloc = mlx5_malloc,
5508                       .free = mlx5_free,
5509                       .type = "mlx5_modify_action_resource",
5510                };
5511
5512                cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5513                ipool = mlx5_ipool_create(&cfg);
5514                if (!ipool)
5515                        return NULL;
5516                if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5517                                                 &expected, ipool, false,
5518                                                 __ATOMIC_SEQ_CST,
5519                                                 __ATOMIC_SEQ_CST)) {
5520                        mlx5_ipool_destroy(ipool);
5521                        ipool = __atomic_load_n(&sh->mdh_ipools[index],
5522                                                __ATOMIC_SEQ_CST);
5523                }
5524        }
5525        return ipool;
5526}
5527
5528struct mlx5_list_entry *
5529flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5530{
5531        struct mlx5_dev_ctx_shared *sh = tool_ctx;
5532        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5533        struct mlx5dv_dr_domain *ns;
5534        struct mlx5_flow_dv_modify_hdr_resource *entry;
5535        struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5536        struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5537                                                          ref->actions_num - 1);
5538        int ret;
5539        uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5540        uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5541        uint32_t idx;
5542
5543        if (unlikely(!ipool)) {
5544                rte_flow_error_set(ctx->error, ENOMEM,
5545                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5546                                   NULL, "cannot allocate modify ipool");
5547                return NULL;
5548        }
5549        entry = mlx5_ipool_zmalloc(ipool, &idx);
5550        if (!entry) {
5551                rte_flow_error_set(ctx->error, ENOMEM,
5552                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5553                                   "cannot allocate resource memory");
5554                return NULL;
5555        }
5556        rte_memcpy(&entry->ft_type,
5557                   RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5558                   key_len + data_len);
5559        if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5560                ns = sh->fdb_domain;
5561        else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5562                ns = sh->tx_domain;
5563        else
5564                ns = sh->rx_domain;
5565        ret = mlx5_flow_os_create_flow_action_modify_header
5566                                        (sh->cdev->ctx, ns, entry,
5567                                         data_len, &entry->action);
5568        if (ret) {
5569                mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5570                rte_flow_error_set(ctx->error, ENOMEM,
5571                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5572                                   NULL, "cannot create modification action");
5573                return NULL;
5574        }
5575        entry->idx = idx;
5576        return &entry->entry;
5577}
5578
5579struct mlx5_list_entry *
5580flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5581                        void *cb_ctx)
5582{
5583        struct mlx5_dev_ctx_shared *sh = tool_ctx;
5584        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5585        struct mlx5_flow_dv_modify_hdr_resource *entry;
5586        struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5587        uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5588        uint32_t idx;
5589
5590        entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5591                                  &idx);
5592        if (!entry) {
5593                rte_flow_error_set(ctx->error, ENOMEM,
5594                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5595                                   "cannot allocate resource memory");
5596                return NULL;
5597        }
5598        memcpy(entry, oentry, sizeof(*entry) + data_len);
5599        entry->idx = idx;
5600        return &entry->entry;
5601}
5602
5603void
5604flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5605{
5606        struct mlx5_dev_ctx_shared *sh = tool_ctx;
5607        struct mlx5_flow_dv_modify_hdr_resource *res =
5608                container_of(entry, typeof(*res), entry);
5609
5610        mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5611}
5612
5613/**
5614 * Validate the sample action.
5615 *
5616 * @param[in, out] action_flags
5617 *   Holds the actions detected until now.
5618 * @param[in] action
5619 *   Pointer to the sample action.
5620 * @param[in] dev
5621 *   Pointer to the Ethernet device structure.
5622 * @param[in] attr
5623 *   Attributes of flow that includes this action.
5624 * @param[in] item_flags
5625 *   Holds the items detected.
5626 * @param[in] rss
5627 *   Pointer to the RSS action.
5628 * @param[out] sample_rss
5629 *   Pointer to the RSS action in sample action list.
5630 * @param[out] count
5631 *   Pointer to the COUNT action in sample action list.
5632 * @param[out] fdb_mirror_limit
5633 *   Pointer to the FDB mirror limitation flag.
5634 * @param[out] error
5635 *   Pointer to error structure.
5636 *
5637 * @return
5638 *   0 on success, a negative errno value otherwise and rte_errno is set.
5639 */
5640static int
5641flow_dv_validate_action_sample(uint64_t *action_flags,
5642                               const struct rte_flow_action *action,
5643                               struct rte_eth_dev *dev,
5644                               const struct rte_flow_attr *attr,
5645                               uint64_t item_flags,
5646                               const struct rte_flow_action_rss *rss,
5647                               const struct rte_flow_action_rss **sample_rss,
5648                               const struct rte_flow_action_count **count,
5649                               int *fdb_mirror_limit,
5650                               struct rte_flow_error *error)
5651{
5652        struct mlx5_priv *priv = dev->data->dev_private;
5653        struct mlx5_sh_config *dev_conf = &priv->sh->config;
5654        const struct rte_flow_action_sample *sample = action->conf;
5655        const struct rte_flow_action *act;
5656        uint64_t sub_action_flags = 0;
5657        uint16_t queue_index = 0xFFFF;
5658        int actions_n = 0;
5659        int ret;
5660
5661        if (!sample)
5662                return rte_flow_error_set(error, EINVAL,
5663                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5664                                          "configuration cannot be NULL");
5665        if (sample->ratio == 0)
5666                return rte_flow_error_set(error, EINVAL,
5667                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5668                                          "ratio value starts from 1");
5669        if (!priv->sh->cdev->config.devx ||
5670            (sample->ratio > 0 && !priv->sampler_en))
5671                return rte_flow_error_set(error, ENOTSUP,
5672                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5673                                          NULL,
5674                                          "sample action not supported");
5675        if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5676                return rte_flow_error_set(error, EINVAL,
5677                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5678                                          "Multiple sample actions not "
5679                                          "supported");
5680        if (*action_flags & MLX5_FLOW_ACTION_METER)
5681                return rte_flow_error_set(error, EINVAL,
5682                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5683                                          "wrong action order, meter should "
5684                                          "be after sample action");
5685        if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5686                return rte_flow_error_set(error, EINVAL,
5687                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5688                                          "wrong action order, jump should "
5689                                          "be after sample action");
5690        if (*action_flags & MLX5_FLOW_ACTION_CT)
5691                return rte_flow_error_set(error, EINVAL,
5692                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
5693                                          "Sample after CT not supported");
5694        act = sample->actions;
5695        for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5696                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5697                        return rte_flow_error_set(error, ENOTSUP,
5698                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5699                                                  act, "too many actions");
5700                switch (act->type) {
5701                case RTE_FLOW_ACTION_TYPE_QUEUE:
5702                        ret = mlx5_flow_validate_action_queue(act,
5703                                                              sub_action_flags,
5704                                                              dev,
5705                                                              attr, error);
5706                        if (ret < 0)
5707                                return ret;
5708                        queue_index = ((const struct rte_flow_action_queue *)
5709                                                        (act->conf))->index;
5710                        sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5711                        ++actions_n;
5712                        break;
5713                case RTE_FLOW_ACTION_TYPE_RSS:
5714                        *sample_rss = act->conf;
5715                        ret = mlx5_flow_validate_action_rss(act,
5716                                                            sub_action_flags,
5717                                                            dev, attr,
5718                                                            item_flags,
5719                                                            error);
5720                        if (ret < 0)
5721                                return ret;
5722                        if (rss && *sample_rss &&
5723                            ((*sample_rss)->level != rss->level ||
5724                            (*sample_rss)->types != rss->types))
5725                                return rte_flow_error_set(error, ENOTSUP,
5726                                        RTE_FLOW_ERROR_TYPE_ACTION,
5727                                        NULL,
5728                                        "Can't use the different RSS types "
5729                                        "or level in the same flow");
5730                        if (*sample_rss != NULL && (*sample_rss)->queue_num)
5731                                queue_index = (*sample_rss)->queue[0];
5732                        sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5733                        ++actions_n;
5734                        break;
5735                case RTE_FLOW_ACTION_TYPE_MARK:
5736                        ret = flow_dv_validate_action_mark(dev, act,
5737                                                           sub_action_flags,
5738                                                           attr, error);
5739                        if (ret < 0)
5740                                return ret;
5741                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5742                                sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5743                                                MLX5_FLOW_ACTION_MARK_EXT;
5744                        else
5745                                sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5746                        ++actions_n;
5747                        break;
5748                case RTE_FLOW_ACTION_TYPE_COUNT:
5749                        ret = flow_dv_validate_action_count
5750                                (dev, false, *action_flags | sub_action_flags,
5751                                 attr, error);
5752                        if (ret < 0)
5753                                return ret;
5754                        *count = act->conf;
5755                        sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5756                        *action_flags |= MLX5_FLOW_ACTION_COUNT;
5757                        ++actions_n;
5758                        break;
5759                case RTE_FLOW_ACTION_TYPE_PORT_ID:
5760                case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5761                        ret = flow_dv_validate_action_port_id(dev,
5762                                                              sub_action_flags,
5763                                                              act,
5764                                                              attr,
5765                                                              error);
5766                        if (ret)
5767                                return ret;
5768                        sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5769                        ++actions_n;
5770                        break;
5771                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5772                        ret = flow_dv_validate_action_raw_encap_decap
5773                                (dev, NULL, act->conf, attr, &sub_action_flags,
5774                                 &actions_n, action, item_flags, error);
5775                        if (ret < 0)
5776                                return ret;
5777                        ++actions_n;
5778                        break;
5779                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5780                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5781                        ret = flow_dv_validate_action_l2_encap(dev,
5782                                                               sub_action_flags,
5783                                                               act, attr,
5784                                                               error);
5785                        if (ret < 0)
5786                                return ret;
5787                        sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5788                        ++actions_n;
5789                        break;
5790                default:
5791                        return rte_flow_error_set(error, ENOTSUP,
5792                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5793                                                  NULL,
5794                                                  "Doesn't support optional "
5795                                                  "action");
5796                }
5797        }
5798        if (attr->ingress && !attr->transfer) {
5799                if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5800                                          MLX5_FLOW_ACTION_RSS)))
5801                        return rte_flow_error_set(error, EINVAL,
5802                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5803                                                  NULL,
5804                                                  "Ingress must has a dest "
5805                                                  "QUEUE for Sample");
5806        } else if (attr->egress && !attr->transfer) {
5807                return rte_flow_error_set(error, ENOTSUP,
5808                                          RTE_FLOW_ERROR_TYPE_ACTION,
5809                                          NULL,
5810                                          "Sample Only support Ingress "
5811                                          "or E-Switch");
5812        } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5813                MLX5_ASSERT(attr->transfer);
5814                if (sample->ratio > 1)
5815                        return rte_flow_error_set(error, ENOTSUP,
5816                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5817                                                  NULL,
5818                                                  "E-Switch doesn't support "
5819                                                  "any optional action "
5820                                                  "for sampling");
5821                if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5822                        return rte_flow_error_set(error, ENOTSUP,
5823                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5824                                                  NULL,
5825                                                  "unsupported action QUEUE");
5826                if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5827                        return rte_flow_error_set(error, ENOTSUP,
5828                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5829                                                  NULL,
5830                                                  "unsupported action QUEUE");
5831                if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5832                        return rte_flow_error_set(error, EINVAL,
5833                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5834                                                  NULL,
5835                                                  "E-Switch must has a dest "
5836                                                  "port for mirroring");
5837                if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
5838                     priv->representor_id != UINT16_MAX)
5839                        *fdb_mirror_limit = 1;
5840        }
5841        /* Continue validation for Xcap actions.*/
5842        if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5843            (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
5844                if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5845                     MLX5_FLOW_XCAP_ACTIONS)
5846                        return rte_flow_error_set(error, ENOTSUP,
5847                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5848                                                  NULL, "encap and decap "
5849                                                  "combination aren't "
5850                                                  "supported");
5851                if (!attr->transfer && attr->ingress && (sub_action_flags &
5852                                                        MLX5_FLOW_ACTION_ENCAP))
5853                        return rte_flow_error_set(error, ENOTSUP,
5854                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5855                                                  NULL, "encap is not supported"
5856                                                  " for ingress traffic");
5857        }
5858        return 0;
5859}
5860
5861/**
5862 * Find existing modify-header resource or create and register a new one.
5863 *
5864 * @param dev[in, out]
5865 *   Pointer to rte_eth_dev structure.
5866 * @param[in, out] resource
5867 *   Pointer to modify-header resource.
5868 * @parm[in, out] dev_flow
5869 *   Pointer to the dev_flow.
5870 * @param[out] error
5871 *   pointer to error structure.
5872 *
5873 * @return
5874 *   0 on success otherwise -errno and errno is set.
5875 */
5876static int
5877flow_dv_modify_hdr_resource_register
5878                        (struct rte_eth_dev *dev,
5879                         struct mlx5_flow_dv_modify_hdr_resource *resource,
5880                         struct mlx5_flow *dev_flow,
5881                         struct rte_flow_error *error)
5882{
5883        struct mlx5_priv *priv = dev->data->dev_private;
5884        struct mlx5_dev_ctx_shared *sh = priv->sh;
5885        uint32_t key_len = sizeof(*resource) -
5886                           offsetof(typeof(*resource), ft_type) +
5887                           resource->actions_num * sizeof(resource->actions[0]);
5888        struct mlx5_list_entry *entry;
5889        struct mlx5_flow_cb_ctx ctx = {
5890                .error = error,
5891                .data = resource,
5892        };
5893        struct mlx5_hlist *modify_cmds;
5894        uint64_t key64;
5895
5896        modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5897                                "hdr_modify",
5898                                MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5899                                true, false, sh,
5900                                flow_dv_modify_create_cb,
5901                                flow_dv_modify_match_cb,
5902                                flow_dv_modify_remove_cb,
5903                                flow_dv_modify_clone_cb,
5904                                flow_dv_modify_clone_free_cb,
5905                                error);
5906        if (unlikely(!modify_cmds))
5907                return -rte_errno;
5908        resource->root = !dev_flow->dv.group;
5909        if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5910                                                                resource->root))
5911                return rte_flow_error_set(error, EOVERFLOW,
5912                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5913                                          "too many modify header items");
5914        key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5915        entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5916        if (!entry)
5917                return -rte_errno;
5918        resource = container_of(entry, typeof(*resource), entry);
5919        dev_flow->handle->dvh.modify_hdr = resource;
5920        return 0;
5921}
5922
5923/**
5924 * Get DV flow counter by index.
5925 *
5926 * @param[in] dev
5927 *   Pointer to the Ethernet device structure.
5928 * @param[in] idx
5929 *   mlx5 flow counter index in the container.
5930 * @param[out] ppool
5931 *   mlx5 flow counter pool in the container.
5932 *
5933 * @return
5934 *   Pointer to the counter, NULL otherwise.
5935 */
5936static struct mlx5_flow_counter *
5937flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5938                           uint32_t idx,
5939                           struct mlx5_flow_counter_pool **ppool)
5940{
5941        struct mlx5_priv *priv = dev->data->dev_private;
5942        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5943        struct mlx5_flow_counter_pool *pool;
5944
5945        /* Decrease to original index and clear shared bit. */
5946        idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5947        MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5948        pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5949        MLX5_ASSERT(pool);
5950        if (ppool)
5951                *ppool = pool;
5952        return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5953}
5954
5955/**
5956 * Check the devx counter belongs to the pool.
5957 *
5958 * @param[in] pool
5959 *   Pointer to the counter pool.
5960 * @param[in] id
5961 *   The counter devx ID.
5962 *
5963 * @return
5964 *   True if counter belongs to the pool, false otherwise.
5965 */
5966static bool
5967flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5968{
5969        int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5970                   MLX5_COUNTERS_PER_POOL;
5971
5972        if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5973                return true;
5974        return false;
5975}
5976
5977/**
5978 * Get a pool by devx counter ID.
5979 *
5980 * @param[in] cmng
5981 *   Pointer to the counter management.
5982 * @param[in] id
5983 *   The counter devx ID.
5984 *
5985 * @return
5986 *   The counter pool pointer if exists, NULL otherwise,
5987 */
5988static struct mlx5_flow_counter_pool *
5989flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5990{
5991        uint32_t i;
5992        struct mlx5_flow_counter_pool *pool = NULL;
5993
5994        rte_spinlock_lock(&cmng->pool_update_sl);
5995        /* Check last used pool. */
5996        if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5997            flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5998                pool = cmng->pools[cmng->last_pool_idx];
5999                goto out;
6000        }
6001        /* ID out of range means no suitable pool in the container. */
6002        if (id > cmng->max_id || id < cmng->min_id)
6003                goto out;
6004        /*
6005         * Find the pool from the end of the container, since mostly counter
6006         * ID is sequence increasing, and the last pool should be the needed
6007         * one.
6008         */
6009        i = cmng->n_valid;
6010        while (i--) {
6011                struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
6012
6013                if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
6014                        pool = pool_tmp;
6015                        break;
6016                }
6017        }
6018out:
6019        rte_spinlock_unlock(&cmng->pool_update_sl);
6020        return pool;
6021}
6022
6023/**
6024 * Resize a counter container.
6025 *
6026 * @param[in] dev
6027 *   Pointer to the Ethernet device structure.
6028 *
6029 * @return
6030 *   0 on success, otherwise negative errno value and rte_errno is set.
6031 */
6032static int
6033flow_dv_container_resize(struct rte_eth_dev *dev)
6034{
6035        struct mlx5_priv *priv = dev->data->dev_private;
6036        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6037        void *old_pools = cmng->pools;
6038        uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
6039        uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
6040        void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6041
6042        if (!pools) {
6043                rte_errno = ENOMEM;
6044                return -ENOMEM;
6045        }
6046        if (old_pools)
6047                memcpy(pools, old_pools, cmng->n *
6048                                       sizeof(struct mlx5_flow_counter_pool *));
6049        cmng->n = resize;
6050        cmng->pools = pools;
6051        if (old_pools)
6052                mlx5_free(old_pools);
6053        return 0;
6054}
6055
6056/**
6057 * Query a devx flow counter.
6058 *
6059 * @param[in] dev
6060 *   Pointer to the Ethernet device structure.
6061 * @param[in] counter
6062 *   Index to the flow counter.
6063 * @param[out] pkts
6064 *   The statistics value of packets.
6065 * @param[out] bytes
6066 *   The statistics value of bytes.
6067 *
6068 * @return
6069 *   0 on success, otherwise a negative errno value and rte_errno is set.
6070 */
6071static inline int
6072_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
6073                     uint64_t *bytes)
6074{
6075        struct mlx5_priv *priv = dev->data->dev_private;
6076        struct mlx5_flow_counter_pool *pool = NULL;
6077        struct mlx5_flow_counter *cnt;
6078        int offset;
6079
6080        cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6081        MLX5_ASSERT(pool);
6082        if (priv->sh->cmng.counter_fallback)
6083                return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6084                                        0, pkts, bytes, 0, NULL, NULL, 0);
6085        rte_spinlock_lock(&pool->sl);
6086        if (!pool->raw) {
6087                *pkts = 0;
6088                *bytes = 0;
6089        } else {
6090                offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6091                *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6092                *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6093        }
6094        rte_spinlock_unlock(&pool->sl);
6095        return 0;
6096}
6097
6098/**
6099 * Create and initialize a new counter pool.
6100 *
6101 * @param[in] dev
6102 *   Pointer to the Ethernet device structure.
6103 * @param[out] dcs
6104 *   The devX counter handle.
6105 * @param[in] age
6106 *   Whether the pool is for counter that was allocated for aging.
6107 * @param[in/out] cont_cur
6108 *   Pointer to the container pointer, it will be update in pool resize.
6109 *
6110 * @return
6111 *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6112 */
6113static struct mlx5_flow_counter_pool *
6114flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6115                    uint32_t age)
6116{
6117        struct mlx5_priv *priv = dev->data->dev_private;
6118        struct mlx5_flow_counter_pool *pool;
6119        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6120        bool fallback = priv->sh->cmng.counter_fallback;
6121        uint32_t size = sizeof(*pool);
6122
6123        size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6124        size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6125        pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6126        if (!pool) {
6127                rte_errno = ENOMEM;
6128                return NULL;
6129        }
6130        pool->raw = NULL;
6131        pool->is_aged = !!age;
6132        pool->query_gen = 0;
6133        pool->min_dcs = dcs;
6134        rte_spinlock_init(&pool->sl);
6135        rte_spinlock_init(&pool->csl);
6136        TAILQ_INIT(&pool->counters[0]);
6137        TAILQ_INIT(&pool->counters[1]);
6138        pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6139        rte_spinlock_lock(&cmng->pool_update_sl);
6140        pool->index = cmng->n_valid;
6141        if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6142                mlx5_free(pool);
6143                rte_spinlock_unlock(&cmng->pool_update_sl);
6144                return NULL;
6145        }
6146        cmng->pools[pool->index] = pool;
6147        cmng->n_valid++;
6148        if (unlikely(fallback)) {
6149                int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6150
6151                if (base < cmng->min_id)
6152                        cmng->min_id = base;
6153                if (base > cmng->max_id)
6154                        cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6155                cmng->last_pool_idx = pool->index;
6156        }
6157        rte_spinlock_unlock(&cmng->pool_update_sl);
6158        return pool;
6159}
6160
6161/**
6162 * Prepare a new counter and/or a new counter pool.
6163 *
6164 * @param[in] dev
6165 *   Pointer to the Ethernet device structure.
6166 * @param[out] cnt_free
6167 *   Where to put the pointer of a new counter.
6168 * @param[in] age
6169 *   Whether the pool is for counter that was allocated for aging.
6170 *
6171 * @return
6172 *   The counter pool pointer and @p cnt_free is set on success,
6173 *   NULL otherwise and rte_errno is set.
6174 */
6175static struct mlx5_flow_counter_pool *
6176flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6177                             struct mlx5_flow_counter **cnt_free,
6178                             uint32_t age)
6179{
6180        struct mlx5_priv *priv = dev->data->dev_private;
6181        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6182        struct mlx5_flow_counter_pool *pool;
6183        struct mlx5_counters tmp_tq;
6184        struct mlx5_devx_obj *dcs = NULL;
6185        struct mlx5_flow_counter *cnt;
6186        enum mlx5_counter_type cnt_type =
6187                        age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6188        bool fallback = priv->sh->cmng.counter_fallback;
6189        uint32_t i;
6190
6191        if (fallback) {
6192                /* bulk_bitmap must be 0 for single counter allocation. */
6193                dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6194                if (!dcs)
6195                        return NULL;
6196                pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6197                if (!pool) {
6198                        pool = flow_dv_pool_create(dev, dcs, age);
6199                        if (!pool) {
6200                                mlx5_devx_cmd_destroy(dcs);
6201                                return NULL;
6202                        }
6203                }
6204                i = dcs->id % MLX5_COUNTERS_PER_POOL;
6205                cnt = MLX5_POOL_GET_CNT(pool, i);
6206                cnt->pool = pool;
6207                cnt->dcs_when_free = dcs;
6208                *cnt_free = cnt;
6209                return pool;
6210        }
6211        dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6212        if (!dcs) {
6213                rte_errno = ENODATA;
6214                return NULL;
6215        }
6216        pool = flow_dv_pool_create(dev, dcs, age);
6217        if (!pool) {
6218                mlx5_devx_cmd_destroy(dcs);
6219                return NULL;
6220        }
6221        TAILQ_INIT(&tmp_tq);
6222        for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6223                cnt = MLX5_POOL_GET_CNT(pool, i);
6224                cnt->pool = pool;
6225                TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6226        }
6227        rte_spinlock_lock(&cmng->csl[cnt_type]);
6228        TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6229        rte_spinlock_unlock(&cmng->csl[cnt_type]);
6230        *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6231        (*cnt_free)->pool = pool;
6232        return pool;
6233}
6234
6235/**
6236 * Allocate a flow counter.
6237 *
6238 * @param[in] dev
6239 *   Pointer to the Ethernet device structure.
6240 * @param[in] age
6241 *   Whether the counter was allocated for aging.
6242 *
6243 * @return
6244 *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6245 */
6246static uint32_t
6247flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6248{
6249        struct mlx5_priv *priv = dev->data->dev_private;
6250        struct mlx5_flow_counter_pool *pool = NULL;
6251        struct mlx5_flow_counter *cnt_free = NULL;
6252        bool fallback = priv->sh->cmng.counter_fallback;
6253        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6254        enum mlx5_counter_type cnt_type =
6255                        age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6256        uint32_t cnt_idx;
6257
6258        if (!priv->sh->cdev->config.devx) {
6259                rte_errno = ENOTSUP;
6260                return 0;
6261        }
6262        /* Get free counters from container. */
6263        rte_spinlock_lock(&cmng->csl[cnt_type]);
6264        cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6265        if (cnt_free)
6266                TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6267        rte_spinlock_unlock(&cmng->csl[cnt_type]);
6268        if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6269                goto err;
6270        pool = cnt_free->pool;
6271        if (fallback)
6272                cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6273        /* Create a DV counter action only in the first time usage. */
6274        if (!cnt_free->action) {
6275                uint16_t offset;
6276                struct mlx5_devx_obj *dcs;
6277                int ret;
6278
6279                if (!fallback) {
6280                        offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6281                        dcs = pool->min_dcs;
6282                } else {
6283                        offset = 0;
6284                        dcs = cnt_free->dcs_when_free;
6285                }
6286                ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6287                                                            &cnt_free->action);
6288                if (ret) {
6289                        rte_errno = errno;
6290                        goto err;
6291                }
6292        }
6293        cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6294                                MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6295        /* Update the counter reset values. */
6296        if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6297                                 &cnt_free->bytes))
6298                goto err;
6299        if (!fallback && !priv->sh->cmng.query_thread_on)
6300                /* Start the asynchronous batch query by the host thread. */
6301                mlx5_set_query_alarm(priv->sh);
6302        /*
6303         * When the count action isn't shared (by ID), shared_info field is
6304         * used for indirect action API's refcnt.
6305         * When the counter action is not shared neither by ID nor by indirect
6306         * action API, shared info must be 1.
6307         */
6308        cnt_free->shared_info.refcnt = 1;
6309        return cnt_idx;
6310err:
6311        if (cnt_free) {
6312                cnt_free->pool = pool;
6313                if (fallback)
6314                        cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6315                rte_spinlock_lock(&cmng->csl[cnt_type]);
6316                TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6317                rte_spinlock_unlock(&cmng->csl[cnt_type]);
6318        }
6319        return 0;
6320}
6321
6322/**
6323 * Get age param from counter index.
6324 *
6325 * @param[in] dev
6326 *   Pointer to the Ethernet device structure.
6327 * @param[in] counter
6328 *   Index to the counter handler.
6329 *
6330 * @return
6331 *   The aging parameter specified for the counter index.
6332 */
6333static struct mlx5_age_param*
6334flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6335                                uint32_t counter)
6336{
6337        struct mlx5_flow_counter *cnt;
6338        struct mlx5_flow_counter_pool *pool = NULL;
6339
6340        flow_dv_counter_get_by_idx(dev, counter, &pool);
6341        counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6342        cnt = MLX5_POOL_GET_CNT(pool, counter);
6343        return MLX5_CNT_TO_AGE(cnt);
6344}
6345
6346/**
6347 * Remove a flow counter from aged counter list.
6348 *
6349 * @param[in] dev
6350 *   Pointer to the Ethernet device structure.
6351 * @param[in] counter
6352 *   Index to the counter handler.
6353 * @param[in] cnt
6354 *   Pointer to the counter handler.
6355 */
6356static void
6357flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6358                                uint32_t counter, struct mlx5_flow_counter *cnt)
6359{
6360        struct mlx5_age_info *age_info;
6361        struct mlx5_age_param *age_param;
6362        struct mlx5_priv *priv = dev->data->dev_private;
6363        uint16_t expected = AGE_CANDIDATE;
6364
6365        age_info = GET_PORT_AGE_INFO(priv);
6366        age_param = flow_dv_counter_idx_get_age(dev, counter);
6367        if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6368                                         AGE_FREE, false, __ATOMIC_RELAXED,
6369                                         __ATOMIC_RELAXED)) {
6370                /**
6371                 * We need the lock even it is age timeout,
6372                 * since counter may still in process.
6373                 */
6374                rte_spinlock_lock(&age_info->aged_sl);
6375                TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6376                rte_spinlock_unlock(&age_info->aged_sl);
6377                __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6378        }
6379}
6380
6381/**
6382 * Release a flow counter.
6383 *
6384 * @param[in] dev
6385 *   Pointer to the Ethernet device structure.
6386 * @param[in] counter
6387 *   Index to the counter handler.
6388 */
6389static void
6390flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6391{
6392        struct mlx5_priv *priv = dev->data->dev_private;
6393        struct mlx5_flow_counter_pool *pool = NULL;
6394        struct mlx5_flow_counter *cnt;
6395        enum mlx5_counter_type cnt_type;
6396
6397        if (!counter)
6398                return;
6399        cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6400        MLX5_ASSERT(pool);
6401        if (pool->is_aged) {
6402                flow_dv_counter_remove_from_age(dev, counter, cnt);
6403        } else {
6404                /*
6405                 * If the counter action is shared by indirect action API,
6406                 * the atomic function reduces its references counter.
6407                 * If after the reduction the action is still referenced, the
6408                 * function returns here and does not release it.
6409                 * When the counter action is not shared by
6410                 * indirect action API, shared info is 1 before the reduction,
6411                 * so this condition is failed and function doesn't return here.
6412                 */
6413                if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6414                                       __ATOMIC_RELAXED))
6415                        return;
6416        }
6417        cnt->pool = pool;
6418        /*
6419         * Put the counter back to list to be updated in none fallback mode.
6420         * Currently, we are using two list alternately, while one is in query,
6421         * add the freed counter to the other list based on the pool query_gen
6422         * value. After query finishes, add counter the list to the global
6423         * container counter list. The list changes while query starts. In
6424         * this case, lock will not be needed as query callback and release
6425         * function both operate with the different list.
6426         */
6427        if (!priv->sh->cmng.counter_fallback) {
6428                rte_spinlock_lock(&pool->csl);
6429                TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6430                rte_spinlock_unlock(&pool->csl);
6431        } else {
6432                cnt->dcs_when_free = cnt->dcs_when_active;
6433                cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6434                                           MLX5_COUNTER_TYPE_ORIGIN;
6435                rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6436                TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6437                                  cnt, next);
6438                rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6439        }
6440}
6441
6442/**
6443 * Resize a meter id container.
6444 *
6445 * @param[in] dev
6446 *   Pointer to the Ethernet device structure.
6447 *
6448 * @return
6449 *   0 on success, otherwise negative errno value and rte_errno is set.
6450 */
6451static int
6452flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6453{
6454        struct mlx5_priv *priv = dev->data->dev_private;
6455        struct mlx5_aso_mtr_pools_mng *pools_mng =
6456                                &priv->sh->mtrmng->pools_mng;
6457        void *old_pools = pools_mng->pools;
6458        uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6459        uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6460        void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6461
6462        if (!pools) {
6463                rte_errno = ENOMEM;
6464                return -ENOMEM;
6465        }
6466        if (!pools_mng->n)
6467                if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6468                        mlx5_free(pools);
6469                        return -ENOMEM;
6470                }
6471        if (old_pools)
6472                memcpy(pools, old_pools, pools_mng->n *
6473                                       sizeof(struct mlx5_aso_mtr_pool *));
6474        pools_mng->n = resize;
6475        pools_mng->pools = pools;
6476        if (old_pools)
6477                mlx5_free(old_pools);
6478        return 0;
6479}
6480
6481/**
6482 * Prepare a new meter and/or a new meter pool.
6483 *
6484 * @param[in] dev
6485 *   Pointer to the Ethernet device structure.
6486 * @param[out] mtr_free
6487 *   Where to put the pointer of a new meter.g.
6488 *
6489 * @return
6490 *   The meter pool pointer and @mtr_free is set on success,
6491 *   NULL otherwise and rte_errno is set.
6492 */
6493static struct mlx5_aso_mtr_pool *
6494flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6495{
6496        struct mlx5_priv *priv = dev->data->dev_private;
6497        struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6498        struct mlx5_aso_mtr_pool *pool = NULL;
6499        struct mlx5_devx_obj *dcs = NULL;
6500        uint32_t i;
6501        uint32_t log_obj_size;
6502
6503        log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6504        dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6505                                                      priv->sh->cdev->pdn,
6506                                                      log_obj_size);
6507        if (!dcs) {
6508                rte_errno = ENODATA;
6509                return NULL;
6510        }
6511        pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6512        if (!pool) {
6513                rte_errno = ENOMEM;
6514                claim_zero(mlx5_devx_cmd_destroy(dcs));
6515                return NULL;
6516        }
6517        pool->devx_obj = dcs;
6518        rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6519        pool->index = pools_mng->n_valid;
6520        if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6521                mlx5_free(pool);
6522                claim_zero(mlx5_devx_cmd_destroy(dcs));
6523                rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6524                return NULL;
6525        }
6526        pools_mng->pools[pool->index] = pool;
6527        pools_mng->n_valid++;
6528        rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6529        for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6530                pool->mtrs[i].offset = i;
6531                LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6532        }
6533        pool->mtrs[0].offset = 0;
6534        *mtr_free = &pool->mtrs[0];
6535        return pool;
6536}
6537
6538/**
6539 * Release a flow meter into pool.
6540 *
6541 * @param[in] dev
6542 *   Pointer to the Ethernet device structure.
6543 * @param[in] mtr_idx
6544 *   Index to aso flow meter.
6545 */
6546static void
6547flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6548{
6549        struct mlx5_priv *priv = dev->data->dev_private;
6550        struct mlx5_aso_mtr_pools_mng *pools_mng =
6551                                &priv->sh->mtrmng->pools_mng;
6552        struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6553
6554        MLX5_ASSERT(aso_mtr);
6555        rte_spinlock_lock(&pools_mng->mtrsl);
6556        memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6557        aso_mtr->state = ASO_METER_FREE;
6558        LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6559        rte_spinlock_unlock(&pools_mng->mtrsl);
6560}
6561
6562/**
6563 * Allocate a aso flow meter.
6564 *
6565 * @param[in] dev
6566 *   Pointer to the Ethernet device structure.
6567 *
6568 * @return
6569 *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6570 */
6571static uint32_t
6572flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6573{
6574        struct mlx5_priv *priv = dev->data->dev_private;
6575        struct mlx5_aso_mtr *mtr_free = NULL;
6576        struct mlx5_aso_mtr_pools_mng *pools_mng =
6577                                &priv->sh->mtrmng->pools_mng;
6578        struct mlx5_aso_mtr_pool *pool;
6579        uint32_t mtr_idx = 0;
6580
6581        if (!priv->sh->cdev->config.devx) {
6582                rte_errno = ENOTSUP;
6583                return 0;
6584        }
6585        /* Allocate the flow meter memory. */
6586        /* Get free meters from management. */
6587        rte_spinlock_lock(&pools_mng->mtrsl);
6588        mtr_free = LIST_FIRST(&pools_mng->meters);
6589        if (mtr_free)
6590                LIST_REMOVE(mtr_free, next);
6591        if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6592                rte_spinlock_unlock(&pools_mng->mtrsl);
6593                return 0;
6594        }
6595        mtr_free->state = ASO_METER_WAIT;
6596        rte_spinlock_unlock(&pools_mng->mtrsl);
6597        pool = container_of(mtr_free,
6598                        struct mlx5_aso_mtr_pool,
6599                        mtrs[mtr_free->offset]);
6600        mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6601        if (!mtr_free->fm.meter_action_g) {
6602#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6603                struct rte_flow_error error;
6604                uint8_t reg_id;
6605
6606                reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6607                mtr_free->fm.meter_action_g =
6608                        mlx5_glue->dv_create_flow_action_aso
6609                                                (priv->sh->rx_domain,
6610                                                 pool->devx_obj->obj,
6611                                                 mtr_free->offset,
6612                                                 (1 << MLX5_FLOW_COLOR_GREEN),
6613                                                 reg_id - REG_C_0);
6614#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6615                if (!mtr_free->fm.meter_action_g) {
6616                        flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6617                        return 0;
6618                }
6619        }
6620        return mtr_idx;
6621}
6622
6623/**
6624 * Verify the @p attributes will be correctly understood by the NIC and store
6625 * them in the @p flow if everything is correct.
6626 *
6627 * @param[in] dev
6628 *   Pointer to dev struct.
6629 * @param[in] attributes
6630 *   Pointer to flow attributes
6631 * @param[in] external
6632 *   This flow rule is created by request external to PMD.
6633 * @param[out] error
6634 *   Pointer to error structure.
6635 *
6636 * @return
6637 *   - 0 on success and non root table.
6638 *   - 1 on success and root table.
6639 *   - a negative errno value otherwise and rte_errno is set.
6640 */
6641static int
6642flow_dv_validate_attributes(struct rte_eth_dev *dev,
6643                            const struct mlx5_flow_tunnel *tunnel,
6644                            const struct rte_flow_attr *attributes,
6645                            const struct flow_grp_info *grp_info,
6646                            struct rte_flow_error *error)
6647{
6648        struct mlx5_priv *priv = dev->data->dev_private;
6649        uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6650        int ret = 0;
6651
6652#ifndef HAVE_MLX5DV_DR
6653        RTE_SET_USED(tunnel);
6654        RTE_SET_USED(grp_info);
6655        if (attributes->group)
6656                return rte_flow_error_set(error, ENOTSUP,
6657                                          RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6658                                          NULL,
6659                                          "groups are not supported");
6660#else
6661        uint32_t table = 0;
6662
6663        ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6664                                       grp_info, error);
6665        if (ret)
6666                return ret;
6667        if (!table)
6668                ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6669#endif
6670        if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6671            attributes->priority > lowest_priority)
6672                return rte_flow_error_set(error, ENOTSUP,
6673                                          RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6674                                          NULL,
6675                                          "priority out of range");
6676        if (attributes->transfer) {
6677                if (!priv->sh->config.dv_esw_en)
6678                        return rte_flow_error_set
6679                                (error, ENOTSUP,
6680                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6681                                 "E-Switch dr is not supported");
6682                if (attributes->egress)
6683                        return rte_flow_error_set
6684                                (error, ENOTSUP,
6685                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6686                                 "egress is not supported");
6687        }
6688        if (!(attributes->egress ^ attributes->ingress))
6689                return rte_flow_error_set(error, ENOTSUP,
6690                                          RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6691                                          "must specify exactly one of "
6692                                          "ingress or egress");
6693        return ret;
6694}
6695
6696static int
6697validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6698                        int64_t pattern_flags, uint64_t l3_flags,
6699                        uint64_t l4_flags, uint64_t ip4_flag,
6700                        struct rte_flow_error *error)
6701{
6702        if (mask->l3_ok && !(pattern_flags & l3_flags))
6703                return rte_flow_error_set(error, EINVAL,
6704                                          RTE_FLOW_ERROR_TYPE_ITEM,
6705                                          NULL, "missing L3 protocol");
6706
6707        if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6708                return rte_flow_error_set(error, EINVAL,
6709                                          RTE_FLOW_ERROR_TYPE_ITEM,
6710                                          NULL, "missing IPv4 protocol");
6711
6712        if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6713                return rte_flow_error_set(error, EINVAL,
6714                                          RTE_FLOW_ERROR_TYPE_ITEM,
6715                                          NULL, "missing L4 protocol");
6716
6717        return 0;
6718}
6719
6720static int
6721flow_dv_validate_item_integrity_post(const struct
6722                                     rte_flow_item *integrity_items[2],
6723                                     int64_t pattern_flags,
6724                                     struct rte_flow_error *error)
6725{
6726        const struct rte_flow_item_integrity *mask;
6727        int ret;
6728
6729        if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6730                mask = (typeof(mask))integrity_items[0]->mask;
6731                ret = validate_integrity_bits(mask, pattern_flags,
6732                                              MLX5_FLOW_LAYER_OUTER_L3,
6733                                              MLX5_FLOW_LAYER_OUTER_L4,
6734                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6735                                              error);
6736                if (ret)
6737                        return ret;
6738        }
6739        if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6740                mask = (typeof(mask))integrity_items[1]->mask;
6741                ret = validate_integrity_bits(mask, pattern_flags,
6742                                              MLX5_FLOW_LAYER_INNER_L3,
6743                                              MLX5_FLOW_LAYER_INNER_L4,
6744                                              MLX5_FLOW_LAYER_INNER_L3_IPV4,
6745                                              error);
6746                if (ret)
6747                        return ret;
6748        }
6749        return 0;
6750}
6751
6752static int
6753flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6754                                const struct rte_flow_item *integrity_item,
6755                                uint64_t pattern_flags, uint64_t *last_item,
6756                                const struct rte_flow_item *integrity_items[2],
6757                                struct rte_flow_error *error)
6758{
6759        struct mlx5_priv *priv = dev->data->dev_private;
6760        const struct rte_flow_item_integrity *mask = (typeof(mask))
6761                                                     integrity_item->mask;
6762        const struct rte_flow_item_integrity *spec = (typeof(spec))
6763                                                     integrity_item->spec;
6764
6765        if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
6766                return rte_flow_error_set(error, ENOTSUP,
6767                                          RTE_FLOW_ERROR_TYPE_ITEM,
6768                                          integrity_item,
6769                                          "packet integrity integrity_item not supported");
6770        if (!spec)
6771                return rte_flow_error_set(error, ENOTSUP,
6772                                          RTE_FLOW_ERROR_TYPE_ITEM,
6773                                          integrity_item,
6774                                          "no spec for integrity item");
6775        if (!mask)
6776                mask = &rte_flow_item_integrity_mask;
6777        if (!mlx5_validate_integrity_item(mask))
6778                return rte_flow_error_set(error, ENOTSUP,
6779                                          RTE_FLOW_ERROR_TYPE_ITEM,
6780                                          integrity_item,
6781                                          "unsupported integrity filter");
6782        if ((mask->l3_ok & !spec->l3_ok) || (mask->l4_ok & !spec->l4_ok) ||
6783                (mask->ipv4_csum_ok & !spec->ipv4_csum_ok) ||
6784                (mask->l4_csum_ok & !spec->l4_csum_ok))
6785                return rte_flow_error_set(error, EINVAL,
6786                                          RTE_FLOW_ERROR_TYPE_ITEM,
6787                                          NULL, "negative integrity flow is not supported");
6788        if (spec->level > 1) {
6789                if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6790                        return rte_flow_error_set
6791                                (error, ENOTSUP,
6792                                 RTE_FLOW_ERROR_TYPE_ITEM,
6793                                 NULL, "multiple inner integrity items not supported");
6794                integrity_items[1] = integrity_item;
6795                *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6796        } else {
6797                if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6798                        return rte_flow_error_set
6799                                (error, ENOTSUP,
6800                                 RTE_FLOW_ERROR_TYPE_ITEM,
6801                                 NULL, "multiple outer integrity items not supported");
6802                integrity_items[0] = integrity_item;
6803                *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6804        }
6805        return 0;
6806}
6807
6808static int
6809flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6810                           const struct rte_flow_item *item,
6811                           uint64_t item_flags,
6812                           uint64_t *last_item,
6813                           bool is_inner,
6814                           struct rte_flow_error *error)
6815{
6816        const struct rte_flow_item_flex *flow_spec = item->spec;
6817        const struct rte_flow_item_flex *flow_mask = item->mask;
6818        struct mlx5_flex_item *flex;
6819
6820        if (!flow_spec)
6821                return rte_flow_error_set(error, EINVAL,
6822                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6823                                          "flex flow item spec cannot be NULL");
6824        if (!flow_mask)
6825                return rte_flow_error_set(error, EINVAL,
6826                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6827                                          "flex flow item mask cannot be NULL");
6828        if (item->last)
6829                return rte_flow_error_set(error, ENOTSUP,
6830                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6831                                          "flex flow item last not supported");
6832        if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6833                return rte_flow_error_set(error, EINVAL,
6834                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6835                                          "invalid flex flow item handle");
6836        flex = (struct mlx5_flex_item *)flow_spec->handle;
6837        switch (flex->tunnel_mode) {
6838        case FLEX_TUNNEL_MODE_SINGLE:
6839                if (item_flags &
6840                    (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6841                        rte_flow_error_set(error, EINVAL,
6842                                           RTE_FLOW_ERROR_TYPE_ITEM,
6843                                           NULL, "multiple flex items not supported");
6844                break;
6845        case FLEX_TUNNEL_MODE_OUTER:
6846                if (is_inner)
6847                        rte_flow_error_set(error, EINVAL,
6848                                           RTE_FLOW_ERROR_TYPE_ITEM,
6849                                           NULL, "inner flex item was not configured");
6850                if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6851                        rte_flow_error_set(error, ENOTSUP,
6852                                           RTE_FLOW_ERROR_TYPE_ITEM,
6853                                           NULL, "multiple flex items not supported");
6854                break;
6855        case FLEX_TUNNEL_MODE_INNER:
6856                if (!is_inner)
6857                        rte_flow_error_set(error, EINVAL,
6858                                           RTE_FLOW_ERROR_TYPE_ITEM,
6859                                           NULL, "outer flex item was not configured");
6860                if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6861                        rte_flow_error_set(error, EINVAL,
6862                                           RTE_FLOW_ERROR_TYPE_ITEM,
6863                                           NULL, "multiple flex items not supported");
6864                break;
6865        case FLEX_TUNNEL_MODE_MULTI:
6866                if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6867                    (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6868                        rte_flow_error_set(error, EINVAL,
6869                                           RTE_FLOW_ERROR_TYPE_ITEM,
6870                                           NULL, "multiple flex items not supported");
6871                }
6872                break;
6873        case FLEX_TUNNEL_MODE_TUNNEL:
6874                if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6875                        rte_flow_error_set(error, EINVAL,
6876                                           RTE_FLOW_ERROR_TYPE_ITEM,
6877                                           NULL, "multiple flex tunnel items not supported");
6878                break;
6879        default:
6880                rte_flow_error_set(error, EINVAL,
6881                                   RTE_FLOW_ERROR_TYPE_ITEM,
6882                                   NULL, "invalid flex item configuration");
6883        }
6884        *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6885                     MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6886                     MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6887        return 0;
6888}
6889
6890/**
6891 * Internal validation function. For validating both actions and items.
6892 *
6893 * @param[in] dev
6894 *   Pointer to the rte_eth_dev structure.
6895 * @param[in] attr
6896 *   Pointer to the flow attributes.
6897 * @param[in] items
6898 *   Pointer to the list of items.
6899 * @param[in] actions
6900 *   Pointer to the list of actions.
6901 * @param[in] external
6902 *   This flow rule is created by request external to PMD.
6903 * @param[in] hairpin
6904 *   Number of hairpin TX actions, 0 means classic flow.
6905 * @param[out] error
6906 *   Pointer to the error structure.
6907 *
6908 * @return
6909 *   0 on success, a negative errno value otherwise and rte_errno is set.
6910 */
6911static int
6912flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6913                 const struct rte_flow_item items[],
6914                 const struct rte_flow_action actions[],
6915                 bool external, int hairpin, struct rte_flow_error *error)
6916{
6917        int ret;
6918        uint64_t aso_mask, action_flags = 0;
6919        uint64_t item_flags = 0;
6920        uint64_t last_item = 0;
6921        uint8_t next_protocol = 0xff;
6922        uint16_t ether_type = 0;
6923        int actions_n = 0;
6924        uint8_t item_ipv6_proto = 0;
6925        int fdb_mirror_limit = 0;
6926        int modify_after_mirror = 0;
6927        const struct rte_flow_item *geneve_item = NULL;
6928        const struct rte_flow_item *gre_item = NULL;
6929        const struct rte_flow_item *gtp_item = NULL;
6930        const struct rte_flow_action_raw_decap *decap;
6931        const struct rte_flow_action_raw_encap *encap;
6932        const struct rte_flow_action_rss *rss = NULL;
6933        const struct rte_flow_action_rss *sample_rss = NULL;
6934        const struct rte_flow_action_count *sample_count = NULL;
6935        const struct rte_flow_item_tcp nic_tcp_mask = {
6936                .hdr = {
6937                        .tcp_flags = 0xFF,
6938                        .src_port = RTE_BE16(UINT16_MAX),
6939                        .dst_port = RTE_BE16(UINT16_MAX),
6940                }
6941        };
6942        const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6943                .hdr = {
6944                        .src_addr =
6945                        "\xff\xff\xff\xff\xff\xff\xff\xff"
6946                        "\xff\xff\xff\xff\xff\xff\xff\xff",
6947                        .dst_addr =
6948                        "\xff\xff\xff\xff\xff\xff\xff\xff"
6949                        "\xff\xff\xff\xff\xff\xff\xff\xff",
6950                        .vtc_flow = RTE_BE32(0xffffffff),
6951                        .proto = 0xff,
6952                        .hop_limits = 0xff,
6953                },
6954                .has_frag_ext = 1,
6955        };
6956        const struct rte_flow_item_ecpri nic_ecpri_mask = {
6957                .hdr = {
6958                        .common = {
6959                                .u32 =
6960                                RTE_BE32(((const struct rte_ecpri_common_hdr) {
6961                                        .type = 0xFF,
6962                                        }).u32),
6963                        },
6964                        .dummy[0] = 0xffffffff,
6965                },
6966        };
6967        struct mlx5_priv *priv = dev->data->dev_private;
6968        struct mlx5_sh_config *dev_conf = &priv->sh->config;
6969        uint16_t queue_index = 0xFFFF;
6970        const struct rte_flow_item_vlan *vlan_m = NULL;
6971        uint32_t rw_act_num = 0;
6972        uint64_t is_root;
6973        const struct mlx5_flow_tunnel *tunnel;
6974        enum mlx5_tof_rule_type tof_rule_type;
6975        struct flow_grp_info grp_info = {
6976                .external = !!external,
6977                .transfer = !!attr->transfer,
6978                .fdb_def_rule = !!priv->fdb_def_rule,
6979                .std_tbl_fix = true,
6980        };
6981        const struct rte_eth_hairpin_conf *conf;
6982        const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6983        const struct rte_flow_item *port_id_item = NULL;
6984        bool def_policy = false;
6985        bool shared_count = false;
6986        uint16_t udp_dport = 0;
6987        uint32_t tag_id = 0;
6988        const struct rte_flow_action_age *non_shared_age = NULL;
6989        const struct rte_flow_action_count *count = NULL;
6990
6991        if (items == NULL)
6992                return -1;
6993        tunnel = is_tunnel_offload_active(dev) ?
6994                 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6995        if (tunnel) {
6996                if (!dev_conf->dv_flow_en)
6997                        return rte_flow_error_set
6998                                (error, ENOTSUP,
6999                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7000                                 NULL, "tunnel offload requires DV flow interface");
7001                if (priv->representor)
7002                        return rte_flow_error_set
7003                                (error, ENOTSUP,
7004                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7005                                 NULL, "decap not supported for VF representor");
7006                if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
7007                        action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7008                else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
7009                        action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
7010                                        MLX5_FLOW_ACTION_DECAP;
7011                grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
7012                                        (dev, attr, tunnel, tof_rule_type);
7013        }
7014        ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
7015        if (ret < 0)
7016                return ret;
7017        is_root = (uint64_t)ret;
7018        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7019                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7020                int type = items->type;
7021
7022                if (!mlx5_flow_os_item_supported(type))
7023                        return rte_flow_error_set(error, ENOTSUP,
7024                                                  RTE_FLOW_ERROR_TYPE_ITEM,
7025                                                  NULL, "item not supported");
7026                switch (type) {
7027                case RTE_FLOW_ITEM_TYPE_VOID:
7028                        break;
7029                case RTE_FLOW_ITEM_TYPE_ESP:
7030                        ret = mlx5_flow_os_validate_item_esp(items, item_flags,
7031                                                          next_protocol,
7032                                                          error);
7033                        if (ret < 0)
7034                                return ret;
7035                        last_item = MLX5_FLOW_ITEM_ESP;
7036                        break;
7037                case RTE_FLOW_ITEM_TYPE_PORT_ID:
7038                        ret = flow_dv_validate_item_port_id
7039                                        (dev, items, attr, item_flags, error);
7040                        if (ret < 0)
7041                                return ret;
7042                        last_item = MLX5_FLOW_ITEM_PORT_ID;
7043                        port_id_item = items;
7044                        break;
7045                case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
7046                        ret = flow_dv_validate_item_represented_port
7047                                        (dev, items, attr, item_flags, error);
7048                        if (ret < 0)
7049                                return ret;
7050                        last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
7051                        break;
7052                case RTE_FLOW_ITEM_TYPE_ETH:
7053                        ret = mlx5_flow_validate_item_eth(items, item_flags,
7054                                                          true, error);
7055                        if (ret < 0)
7056                                return ret;
7057                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7058                                             MLX5_FLOW_LAYER_OUTER_L2;
7059                        if (items->mask != NULL && items->spec != NULL) {
7060                                ether_type =
7061                                        ((const struct rte_flow_item_eth *)
7062                                         items->spec)->type;
7063                                ether_type &=
7064                                        ((const struct rte_flow_item_eth *)
7065                                         items->mask)->type;
7066                                ether_type = rte_be_to_cpu_16(ether_type);
7067                        } else {
7068                                ether_type = 0;
7069                        }
7070                        break;
7071                case RTE_FLOW_ITEM_TYPE_VLAN:
7072                        ret = flow_dv_validate_item_vlan(items, item_flags,
7073                                                         dev, error);
7074                        if (ret < 0)
7075                                return ret;
7076                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
7077                                             MLX5_FLOW_LAYER_OUTER_VLAN;
7078                        if (items->mask != NULL && items->spec != NULL) {
7079                                ether_type =
7080                                        ((const struct rte_flow_item_vlan *)
7081                                         items->spec)->inner_type;
7082                                ether_type &=
7083                                        ((const struct rte_flow_item_vlan *)
7084                                         items->mask)->inner_type;
7085                                ether_type = rte_be_to_cpu_16(ether_type);
7086                        } else {
7087                                ether_type = 0;
7088                        }
7089                        /* Store outer VLAN mask for of_push_vlan action. */
7090                        if (!tunnel)
7091                                vlan_m = items->mask;
7092                        break;
7093                case RTE_FLOW_ITEM_TYPE_IPV4:
7094                        mlx5_flow_tunnel_ip_check(items, next_protocol,
7095                                                  &item_flags, &tunnel);
7096                        ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7097                                                         last_item, ether_type,
7098                                                         error);
7099                        if (ret < 0)
7100                                return ret;
7101                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7102                                             MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7103                        if (items->mask != NULL &&
7104                            ((const struct rte_flow_item_ipv4 *)
7105                             items->mask)->hdr.next_proto_id) {
7106                                next_protocol =
7107                                        ((const struct rte_flow_item_ipv4 *)
7108                                         (items->spec))->hdr.next_proto_id;
7109                                next_protocol &=
7110                                        ((const struct rte_flow_item_ipv4 *)
7111                                         (items->mask))->hdr.next_proto_id;
7112                        } else {
7113                                /* Reset for inner layer. */
7114                                next_protocol = 0xff;
7115                        }
7116                        break;
7117                case RTE_FLOW_ITEM_TYPE_IPV6:
7118                        mlx5_flow_tunnel_ip_check(items, next_protocol,
7119                                                  &item_flags, &tunnel);
7120                        ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7121                                                           last_item,
7122                                                           ether_type,
7123                                                           &nic_ipv6_mask,
7124                                                           error);
7125                        if (ret < 0)
7126                                return ret;
7127                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7128                                             MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7129                        if (items->mask != NULL &&
7130                            ((const struct rte_flow_item_ipv6 *)
7131                             items->mask)->hdr.proto) {
7132                                item_ipv6_proto =
7133                                        ((const struct rte_flow_item_ipv6 *)
7134                                         items->spec)->hdr.proto;
7135                                next_protocol =
7136                                        ((const struct rte_flow_item_ipv6 *)
7137                                         items->spec)->hdr.proto;
7138                                next_protocol &=
7139                                        ((const struct rte_flow_item_ipv6 *)
7140                                         items->mask)->hdr.proto;
7141                        } else {
7142                                /* Reset for inner layer. */
7143                                next_protocol = 0xff;
7144                        }
7145                        break;
7146                case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7147                        ret = flow_dv_validate_item_ipv6_frag_ext(items,
7148                                                                  item_flags,
7149                                                                  error);
7150                        if (ret < 0)
7151                                return ret;
7152                        last_item = tunnel ?
7153                                        MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7154                                        MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7155                        if (items->mask != NULL &&
7156                            ((const struct rte_flow_item_ipv6_frag_ext *)
7157                             items->mask)->hdr.next_header) {
7158                                next_protocol =
7159                                ((const struct rte_flow_item_ipv6_frag_ext *)
7160                                 items->spec)->hdr.next_header;
7161                                next_protocol &=
7162                                ((const struct rte_flow_item_ipv6_frag_ext *)
7163                                 items->mask)->hdr.next_header;
7164                        } else {
7165                                /* Reset for inner layer. */
7166                                next_protocol = 0xff;
7167                        }
7168                        break;
7169                case RTE_FLOW_ITEM_TYPE_TCP:
7170                        ret = mlx5_flow_validate_item_tcp
7171                                                (items, item_flags,
7172                                                 next_protocol,
7173                                                 &nic_tcp_mask,
7174                                                 error);
7175                        if (ret < 0)
7176                                return ret;
7177                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7178                                             MLX5_FLOW_LAYER_OUTER_L4_TCP;
7179                        break;
7180                case RTE_FLOW_ITEM_TYPE_UDP:
7181                        ret = mlx5_flow_validate_item_udp(items, item_flags,
7182                                                          next_protocol,
7183                                                          error);
7184                        const struct rte_flow_item_udp *spec = items->spec;
7185                        const struct rte_flow_item_udp *mask = items->mask;
7186                        if (!mask)
7187                                mask = &rte_flow_item_udp_mask;
7188                        if (spec != NULL)
7189                                udp_dport = rte_be_to_cpu_16
7190                                                (spec->hdr.dst_port &
7191                                                 mask->hdr.dst_port);
7192                        if (ret < 0)
7193                                return ret;
7194                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7195                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
7196                        break;
7197                case RTE_FLOW_ITEM_TYPE_GRE:
7198                        ret = mlx5_flow_validate_item_gre(items, item_flags,
7199                                                          next_protocol, error);
7200                        if (ret < 0)
7201                                return ret;
7202                        gre_item = items;
7203                        last_item = MLX5_FLOW_LAYER_GRE;
7204                        break;
7205                case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
7206                        ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
7207                                                          attr, gre_item, error);
7208                        if (ret < 0)
7209                                return ret;
7210                        last_item = MLX5_FLOW_LAYER_GRE;
7211                        break;
7212                case RTE_FLOW_ITEM_TYPE_NVGRE:
7213                        ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7214                                                            next_protocol,
7215                                                            error);
7216                        if (ret < 0)
7217                                return ret;
7218                        last_item = MLX5_FLOW_LAYER_NVGRE;
7219                        break;
7220                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7221                        ret = mlx5_flow_validate_item_gre_key
7222                                (items, item_flags, gre_item, error);
7223                        if (ret < 0)
7224                                return ret;
7225                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
7226                        break;
7227                case RTE_FLOW_ITEM_TYPE_VXLAN:
7228                        ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7229                                                            items, item_flags,
7230                                                            attr, error);
7231                        if (ret < 0)
7232                                return ret;
7233                        last_item = MLX5_FLOW_LAYER_VXLAN;
7234                        break;
7235                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7236                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
7237                                                                item_flags, dev,
7238                                                                error);
7239                        if (ret < 0)
7240                                return ret;
7241                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7242                        break;
7243                case RTE_FLOW_ITEM_TYPE_GENEVE:
7244                        ret = mlx5_flow_validate_item_geneve(items,
7245                                                             item_flags, dev,
7246                                                             error);
7247                        if (ret < 0)
7248                                return ret;
7249                        geneve_item = items;
7250                        last_item = MLX5_FLOW_LAYER_GENEVE;
7251                        break;
7252                case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7253                        ret = mlx5_flow_validate_item_geneve_opt(items,
7254                                                                 last_item,
7255                                                                 geneve_item,
7256                                                                 dev,
7257                                                                 error);
7258                        if (ret < 0)
7259                                return ret;
7260                        last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7261                        break;
7262                case RTE_FLOW_ITEM_TYPE_MPLS:
7263                        ret = mlx5_flow_validate_item_mpls(dev, items,
7264                                                           item_flags,
7265                                                           last_item, error);
7266                        if (ret < 0)
7267                                return ret;
7268                        last_item = MLX5_FLOW_LAYER_MPLS;
7269                        break;
7270
7271                case RTE_FLOW_ITEM_TYPE_MARK:
7272                        ret = flow_dv_validate_item_mark(dev, items, attr,
7273                                                         error);
7274                        if (ret < 0)
7275                                return ret;
7276                        last_item = MLX5_FLOW_ITEM_MARK;
7277                        break;
7278                case RTE_FLOW_ITEM_TYPE_META:
7279                        ret = flow_dv_validate_item_meta(dev, items, attr,
7280                                                         error);
7281                        if (ret < 0)
7282                                return ret;
7283                        last_item = MLX5_FLOW_ITEM_METADATA;
7284                        break;
7285                case RTE_FLOW_ITEM_TYPE_ICMP:
7286                        ret = mlx5_flow_validate_item_icmp(items, item_flags,
7287                                                           next_protocol,
7288                                                           error);
7289                        if (ret < 0)
7290                                return ret;
7291                        last_item = MLX5_FLOW_LAYER_ICMP;
7292                        break;
7293                case RTE_FLOW_ITEM_TYPE_ICMP6:
7294                        ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7295                                                            next_protocol,
7296                                                            error);
7297                        if (ret < 0)
7298                                return ret;
7299                        item_ipv6_proto = IPPROTO_ICMPV6;
7300                        last_item = MLX5_FLOW_LAYER_ICMP6;
7301                        break;
7302                case RTE_FLOW_ITEM_TYPE_TAG:
7303                        ret = flow_dv_validate_item_tag(dev, items,
7304                                                        attr, error);
7305                        if (ret < 0)
7306                                return ret;
7307                        last_item = MLX5_FLOW_ITEM_TAG;
7308                        break;
7309                case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7310                        last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7311                        break;
7312                case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7313                        break;
7314                case RTE_FLOW_ITEM_TYPE_GTP:
7315                        ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7316                                                        error);
7317                        if (ret < 0)
7318                                return ret;
7319                        gtp_item = items;
7320                        last_item = MLX5_FLOW_LAYER_GTP;
7321                        break;
7322                case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7323                        ret = flow_dv_validate_item_gtp_psc(items, last_item,
7324                                                            gtp_item, attr,
7325                                                            error);
7326                        if (ret < 0)
7327                                return ret;
7328                        last_item = MLX5_FLOW_LAYER_GTP_PSC;
7329                        break;
7330                case RTE_FLOW_ITEM_TYPE_ECPRI:
7331                        /* Capacity will be checked in the translate stage. */
7332                        ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7333                                                            last_item,
7334                                                            ether_type,
7335                                                            &nic_ecpri_mask,
7336                                                            error);
7337                        if (ret < 0)
7338                                return ret;
7339                        last_item = MLX5_FLOW_LAYER_ECPRI;
7340                        break;
7341                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7342                        ret = flow_dv_validate_item_integrity(dev, items,
7343                                                              item_flags,
7344                                                              &last_item,
7345                                                              integrity_items,
7346                                                              error);
7347                        if (ret < 0)
7348                                return ret;
7349                        break;
7350                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7351                        ret = flow_dv_validate_item_aso_ct(dev, items,
7352                                                           &item_flags, error);
7353                        if (ret < 0)
7354                                return ret;
7355                        break;
7356                case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7357                        /* tunnel offload item was processed before
7358                         * list it here as a supported type
7359                         */
7360                        break;
7361                case RTE_FLOW_ITEM_TYPE_FLEX:
7362                        ret = flow_dv_validate_item_flex(dev, items, item_flags,
7363                                                         &last_item,
7364                                                         tunnel != 0, error);
7365                        if (ret < 0)
7366                                return ret;
7367                        break;
7368                default:
7369                        return rte_flow_error_set(error, ENOTSUP,
7370                                                  RTE_FLOW_ERROR_TYPE_ITEM,
7371                                                  NULL, "item not supported");
7372                }
7373                item_flags |= last_item;
7374        }
7375        if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7376                ret = flow_dv_validate_item_integrity_post(integrity_items,
7377                                                           item_flags, error);
7378                if (ret)
7379                        return ret;
7380        }
7381        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7382                int type = actions->type;
7383
7384                if (!mlx5_flow_os_action_supported(type))
7385                        return rte_flow_error_set(error, ENOTSUP,
7386                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7387                                                  actions,
7388                                                  "action not supported");
7389                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7390                        return rte_flow_error_set(error, ENOTSUP,
7391                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7392                                                  actions, "too many actions");
7393                if (action_flags &
7394                        MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7395                        return rte_flow_error_set(error, ENOTSUP,
7396                                RTE_FLOW_ERROR_TYPE_ACTION,
7397                                NULL, "meter action with policy "
7398                                "must be the last action");
7399                switch (type) {
7400                case RTE_FLOW_ACTION_TYPE_VOID:
7401                        break;
7402                case RTE_FLOW_ACTION_TYPE_PORT_ID:
7403                case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7404                        ret = flow_dv_validate_action_port_id(dev,
7405                                                              action_flags,
7406                                                              actions,
7407                                                              attr,
7408                                                              error);
7409                        if (ret)
7410                                return ret;
7411                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7412                        ++actions_n;
7413                        break;
7414                case RTE_FLOW_ACTION_TYPE_FLAG:
7415                        ret = flow_dv_validate_action_flag(dev, action_flags,
7416                                                           attr, error);
7417                        if (ret < 0)
7418                                return ret;
7419                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7420                                /* Count all modify-header actions as one. */
7421                                if (!(action_flags &
7422                                      MLX5_FLOW_MODIFY_HDR_ACTIONS))
7423                                        ++actions_n;
7424                                action_flags |= MLX5_FLOW_ACTION_FLAG |
7425                                                MLX5_FLOW_ACTION_MARK_EXT;
7426                                if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7427                                        modify_after_mirror = 1;
7428
7429                        } else {
7430                                action_flags |= MLX5_FLOW_ACTION_FLAG;
7431                                ++actions_n;
7432                        }
7433                        rw_act_num += MLX5_ACT_NUM_SET_MARK;
7434                        break;
7435                case RTE_FLOW_ACTION_TYPE_MARK:
7436                        ret = flow_dv_validate_action_mark(dev, actions,
7437                                                           action_flags,
7438                                                           attr, error);
7439                        if (ret < 0)
7440                                return ret;
7441                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7442                                /* Count all modify-header actions as one. */
7443                                if (!(action_flags &
7444                                      MLX5_FLOW_MODIFY_HDR_ACTIONS))
7445                                        ++actions_n;
7446                                action_flags |= MLX5_FLOW_ACTION_MARK |
7447                                                MLX5_FLOW_ACTION_MARK_EXT;
7448                                if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7449                                        modify_after_mirror = 1;
7450                        } else {
7451                                action_flags |= MLX5_FLOW_ACTION_MARK;
7452                                ++actions_n;
7453                        }
7454                        rw_act_num += MLX5_ACT_NUM_SET_MARK;
7455                        break;
7456                case RTE_FLOW_ACTION_TYPE_SET_META:
7457                        ret = flow_dv_validate_action_set_meta(dev, actions,
7458                                                               action_flags,
7459                                                               attr, error);
7460                        if (ret < 0)
7461                                return ret;
7462                        /* Count all modify-header actions as one action. */
7463                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7464                                ++actions_n;
7465                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7466                                modify_after_mirror = 1;
7467                        action_flags |= MLX5_FLOW_ACTION_SET_META;
7468                        rw_act_num += MLX5_ACT_NUM_SET_META;
7469                        break;
7470                case RTE_FLOW_ACTION_TYPE_SET_TAG:
7471                        ret = flow_dv_validate_action_set_tag(dev, actions,
7472                                                              action_flags,
7473                                                              attr, error);
7474                        if (ret < 0)
7475                                return ret;
7476                        /* Count all modify-header actions as one action. */
7477                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7478                                ++actions_n;
7479                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7480                                modify_after_mirror = 1;
7481                        tag_id = ((const struct rte_flow_action_set_tag *)
7482                                  actions->conf)->index;
7483                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7484                        rw_act_num += MLX5_ACT_NUM_SET_TAG;
7485                        break;
7486                case RTE_FLOW_ACTION_TYPE_DROP:
7487                        ret = mlx5_flow_validate_action_drop(action_flags,
7488                                                             attr, error);
7489                        if (ret < 0)
7490                                return ret;
7491                        action_flags |= MLX5_FLOW_ACTION_DROP;
7492                        ++actions_n;
7493                        break;
7494                case RTE_FLOW_ACTION_TYPE_QUEUE:
7495                        ret = mlx5_flow_validate_action_queue(actions,
7496                                                              action_flags, dev,
7497                                                              attr, error);
7498                        if (ret < 0)
7499                                return ret;
7500                        queue_index = ((const struct rte_flow_action_queue *)
7501                                                        (actions->conf))->index;
7502                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
7503                        ++actions_n;
7504                        break;
7505                case RTE_FLOW_ACTION_TYPE_RSS:
7506                        rss = actions->conf;
7507                        ret = mlx5_flow_validate_action_rss(actions,
7508                                                            action_flags, dev,
7509                                                            attr, item_flags,
7510                                                            error);
7511                        if (ret < 0)
7512                                return ret;
7513                        if (rss && sample_rss &&
7514                            (sample_rss->level != rss->level ||
7515                            sample_rss->types != rss->types))
7516                                return rte_flow_error_set(error, ENOTSUP,
7517                                        RTE_FLOW_ERROR_TYPE_ACTION,
7518                                        NULL,
7519                                        "Can't use the different RSS types "
7520                                        "or level in the same flow");
7521                        if (rss != NULL && rss->queue_num)
7522                                queue_index = rss->queue[0];
7523                        action_flags |= MLX5_FLOW_ACTION_RSS;
7524                        ++actions_n;
7525                        break;
7526                case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7527                        ret =
7528                        mlx5_flow_validate_action_default_miss(action_flags,
7529                                        attr, error);
7530                        if (ret < 0)
7531                                return ret;
7532                        action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7533                        ++actions_n;
7534                        break;
7535                case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7536                        shared_count = true;
7537                        /* fall-through. */
7538                case RTE_FLOW_ACTION_TYPE_COUNT:
7539                        ret = flow_dv_validate_action_count(dev, shared_count,
7540                                                            action_flags,
7541                                                            attr, error);
7542                        if (ret < 0)
7543                                return ret;
7544                        count = actions->conf;
7545                        action_flags |= MLX5_FLOW_ACTION_COUNT;
7546                        ++actions_n;
7547                        break;
7548                case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7549                        if (flow_dv_validate_action_pop_vlan(dev,
7550                                                             action_flags,
7551                                                             actions,
7552                                                             item_flags, attr,
7553                                                             error))
7554                                return -rte_errno;
7555                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7556                                modify_after_mirror = 1;
7557                        action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7558                        ++actions_n;
7559                        break;
7560                case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7561                        ret = flow_dv_validate_action_push_vlan(dev,
7562                                                                action_flags,
7563                                                                vlan_m,
7564                                                                actions, attr,
7565                                                                error);
7566                        if (ret < 0)
7567                                return ret;
7568                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7569                                modify_after_mirror = 1;
7570                        action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7571                        ++actions_n;
7572                        break;
7573                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7574                        ret = flow_dv_validate_action_set_vlan_pcp
7575                                                (action_flags, actions, error);
7576                        if (ret < 0)
7577                                return ret;
7578                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7579                                modify_after_mirror = 1;
7580                        /* Count PCP with push_vlan command. */
7581                        action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7582                        break;
7583                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7584                        ret = flow_dv_validate_action_set_vlan_vid
7585                                                (item_flags, action_flags,
7586                                                 actions, error);
7587                        if (ret < 0)
7588                                return ret;
7589                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7590                                modify_after_mirror = 1;
7591                        /* Count VID with push_vlan command. */
7592                        action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7593                        rw_act_num += MLX5_ACT_NUM_MDF_VID;
7594                        break;
7595                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7596                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7597                        ret = flow_dv_validate_action_l2_encap(dev,
7598                                                               action_flags,
7599                                                               actions, attr,
7600                                                               error);
7601                        if (ret < 0)
7602                                return ret;
7603                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
7604                        ++actions_n;
7605                        break;
7606                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7607                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7608                        ret = flow_dv_validate_action_decap(dev, action_flags,
7609                                                            actions, item_flags,
7610                                                            attr, error);
7611                        if (ret < 0)
7612                                return ret;
7613                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7614                                modify_after_mirror = 1;
7615                        action_flags |= MLX5_FLOW_ACTION_DECAP;
7616                        ++actions_n;
7617                        break;
7618                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7619                        ret = flow_dv_validate_action_raw_encap_decap
7620                                (dev, NULL, actions->conf, attr, &action_flags,
7621                                 &actions_n, actions, item_flags, error);
7622                        if (ret < 0)
7623                                return ret;
7624                        break;
7625                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7626                        decap = actions->conf;
7627                        while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7628                                ;
7629                        if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7630                                encap = NULL;
7631                                actions--;
7632                        } else {
7633                                encap = actions->conf;
7634                        }
7635                        ret = flow_dv_validate_action_raw_encap_decap
7636                                           (dev,
7637                                            decap ? decap : &empty_decap, encap,
7638                                            attr, &action_flags, &actions_n,
7639                                            actions, item_flags, error);
7640                        if (ret < 0)
7641                                return ret;
7642                        if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7643                            (action_flags & MLX5_FLOW_ACTION_DECAP))
7644                                modify_after_mirror = 1;
7645                        break;
7646                case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7647                case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7648                        ret = flow_dv_validate_action_modify_mac(action_flags,
7649                                                                 actions,
7650                                                                 item_flags,
7651                                                                 error);
7652                        if (ret < 0)
7653                                return ret;
7654                        /* Count all modify-header actions as one action. */
7655                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7656                                ++actions_n;
7657                        action_flags |= actions->type ==
7658                                        RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7659                                                MLX5_FLOW_ACTION_SET_MAC_SRC :
7660                                                MLX5_FLOW_ACTION_SET_MAC_DST;
7661                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7662                                modify_after_mirror = 1;
7663                        /*
7664                         * Even if the source and destination MAC addresses have
7665                         * overlap in the header with 4B alignment, the convert
7666                         * function will handle them separately and 4 SW actions
7667                         * will be created. And 2 actions will be added each
7668                         * time no matter how many bytes of address will be set.
7669                         */
7670                        rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7671                        break;
7672                case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7673                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7674                        ret = flow_dv_validate_action_modify_ipv4(action_flags,
7675                                                                  actions,
7676                                                                  item_flags,
7677                                                                  error);
7678                        if (ret < 0)
7679                                return ret;
7680                        /* Count all modify-header actions as one action. */
7681                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7682                                ++actions_n;
7683                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7684                                modify_after_mirror = 1;
7685                        action_flags |= actions->type ==
7686                                        RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7687                                                MLX5_FLOW_ACTION_SET_IPV4_SRC :
7688                                                MLX5_FLOW_ACTION_SET_IPV4_DST;
7689                        rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7690                        break;
7691                case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7692                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7693                        ret = flow_dv_validate_action_modify_ipv6(action_flags,
7694                                                                  actions,
7695                                                                  item_flags,
7696                                                                  error);
7697                        if (ret < 0)
7698                                return ret;
7699                        if (item_ipv6_proto == IPPROTO_ICMPV6)
7700                                return rte_flow_error_set(error, ENOTSUP,
7701                                        RTE_FLOW_ERROR_TYPE_ACTION,
7702                                        actions,
7703                                        "Can't change header "
7704                                        "with ICMPv6 proto");
7705                        /* Count all modify-header actions as one action. */
7706                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7707                                ++actions_n;
7708                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7709                                modify_after_mirror = 1;
7710                        action_flags |= actions->type ==
7711                                        RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7712                                                MLX5_FLOW_ACTION_SET_IPV6_SRC :
7713                                                MLX5_FLOW_ACTION_SET_IPV6_DST;
7714                        rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7715                        break;
7716                case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7717                case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7718                        ret = flow_dv_validate_action_modify_tp(action_flags,
7719                                                                actions,
7720                                                                item_flags,
7721                                                                error);
7722                        if (ret < 0)
7723                                return ret;
7724                        /* Count all modify-header actions as one action. */
7725                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7726                                ++actions_n;
7727                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7728                                modify_after_mirror = 1;
7729                        action_flags |= actions->type ==
7730                                        RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7731                                                MLX5_FLOW_ACTION_SET_TP_SRC :
7732                                                MLX5_FLOW_ACTION_SET_TP_DST;
7733                        rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7734                        break;
7735                case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7736                case RTE_FLOW_ACTION_TYPE_SET_TTL:
7737                        ret = flow_dv_validate_action_modify_ttl(action_flags,
7738                                                                 actions,
7739                                                                 item_flags,
7740                                                                 error);
7741                        if (ret < 0)
7742                                return ret;
7743                        /* Count all modify-header actions as one action. */
7744                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7745                                ++actions_n;
7746                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7747                                modify_after_mirror = 1;
7748                        action_flags |= actions->type ==
7749                                        RTE_FLOW_ACTION_TYPE_SET_TTL ?
7750                                                MLX5_FLOW_ACTION_SET_TTL :
7751                                                MLX5_FLOW_ACTION_DEC_TTL;
7752                        rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7753                        break;
7754                case RTE_FLOW_ACTION_TYPE_JUMP:
7755                        ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7756                                                           action_flags,
7757                                                           attr, external,
7758                                                           error);
7759                        if (ret)
7760                                return ret;
7761                        if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7762                            fdb_mirror_limit)
7763                                return rte_flow_error_set(error, EINVAL,
7764                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7765                                                  NULL,
7766                                                  "sample and jump action combination is not supported");
7767                        ++actions_n;
7768                        action_flags |= MLX5_FLOW_ACTION_JUMP;
7769                        break;
7770                case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7771                case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7772                        ret = flow_dv_validate_action_modify_tcp_seq
7773                                                                (action_flags,
7774                                                                 actions,
7775                                                                 item_flags,
7776                                                                 error);
7777                        if (ret < 0)
7778                                return ret;
7779                        /* Count all modify-header actions as one action. */
7780                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7781                                ++actions_n;
7782                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7783                                modify_after_mirror = 1;
7784                        action_flags |= actions->type ==
7785                                        RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7786                                                MLX5_FLOW_ACTION_INC_TCP_SEQ :
7787                                                MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7788                        rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7789                        break;
7790                case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7791                case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7792                        ret = flow_dv_validate_action_modify_tcp_ack
7793                                                                (action_flags,
7794                                                                 actions,
7795                                                                 item_flags,
7796                                                                 error);
7797                        if (ret < 0)
7798                                return ret;
7799                        /* Count all modify-header actions as one action. */
7800                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7801                                ++actions_n;
7802                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7803                                modify_after_mirror = 1;
7804                        action_flags |= actions->type ==
7805                                        RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7806                                                MLX5_FLOW_ACTION_INC_TCP_ACK :
7807                                                MLX5_FLOW_ACTION_DEC_TCP_ACK;
7808                        rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7809                        break;
7810                case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7811                        break;
7812                case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7813                case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7814                        rw_act_num += MLX5_ACT_NUM_SET_TAG;
7815                        break;
7816                case RTE_FLOW_ACTION_TYPE_METER:
7817                        ret = mlx5_flow_validate_action_meter(dev,
7818                                                              action_flags,
7819                                                              item_flags,
7820                                                              actions, attr,
7821                                                              port_id_item,
7822                                                              &def_policy,
7823                                                              error);
7824                        if (ret < 0)
7825                                return ret;
7826                        action_flags |= MLX5_FLOW_ACTION_METER;
7827                        if (!def_policy)
7828                                action_flags |=
7829                                MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7830                        ++actions_n;
7831                        /* Meter action will add one more TAG action. */
7832                        rw_act_num += MLX5_ACT_NUM_SET_TAG;
7833                        break;
7834                case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7835                        if (!attr->transfer && !attr->group)
7836                                return rte_flow_error_set(error, ENOTSUP,
7837                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7838                                                                           NULL,
7839                          "Shared ASO age action is not supported for group 0");
7840                        if (action_flags & MLX5_FLOW_ACTION_AGE)
7841                                return rte_flow_error_set
7842                                                  (error, EINVAL,
7843                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7844                                                   NULL,
7845                                                   "duplicate age actions set");
7846                        action_flags |= MLX5_FLOW_ACTION_AGE;
7847                        ++actions_n;
7848                        break;
7849                case RTE_FLOW_ACTION_TYPE_AGE:
7850                        non_shared_age = actions->conf;
7851                        ret = flow_dv_validate_action_age(action_flags,
7852                                                          actions, dev,
7853                                                          error);
7854                        if (ret < 0)
7855                                return ret;
7856                        /*
7857                         * Validate the regular AGE action (using counter)
7858                         * mutual exclusion with indirect counter actions.
7859                         */
7860                        if (!flow_hit_aso_supported(priv->sh, attr)) {
7861                                if (shared_count)
7862                                        return rte_flow_error_set
7863                                                (error, EINVAL,
7864                                                RTE_FLOW_ERROR_TYPE_ACTION,
7865                                                NULL,
7866                                                "old age and indirect count combination is not supported");
7867                                if (sample_count)
7868                                        return rte_flow_error_set
7869                                                (error, EINVAL,
7870                                                RTE_FLOW_ERROR_TYPE_ACTION,
7871                                                NULL,
7872                                                "old age action and count must be in the same sub flow");
7873                        }
7874                        action_flags |= MLX5_FLOW_ACTION_AGE;
7875                        ++actions_n;
7876                        break;
7877                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7878                        ret = flow_dv_validate_action_modify_ipv4_dscp
7879                                                         (action_flags,
7880                                                          actions,
7881                                                          item_flags,
7882                                                          error);
7883                        if (ret < 0)
7884                                return ret;
7885                        /* Count all modify-header actions as one action. */
7886                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7887                                ++actions_n;
7888                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7889                                modify_after_mirror = 1;
7890                        action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7891                        rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7892                        break;
7893                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7894                        ret = flow_dv_validate_action_modify_ipv6_dscp
7895                                                                (action_flags,
7896                                                                 actions,
7897                                                                 item_flags,
7898                                                                 error);
7899                        if (ret < 0)
7900                                return ret;
7901                        /* Count all modify-header actions as one action. */
7902                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7903                                ++actions_n;
7904                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7905                                modify_after_mirror = 1;
7906                        action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7907                        rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7908                        break;
7909                case RTE_FLOW_ACTION_TYPE_SAMPLE:
7910                        ret = flow_dv_validate_action_sample(&action_flags,
7911                                                             actions, dev,
7912                                                             attr, item_flags,
7913                                                             rss, &sample_rss,
7914                                                             &sample_count,
7915                                                             &fdb_mirror_limit,
7916                                                             error);
7917                        if (ret < 0)
7918                                return ret;
7919                        if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
7920                            tag_id == 0 && priv->mtr_color_reg == REG_NON)
7921                                return rte_flow_error_set(error, EINVAL,
7922                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7923                                        "sample after tag action causes metadata tag index 0 corruption");
7924                        action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7925                        ++actions_n;
7926                        break;
7927                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7928                        ret = flow_dv_validate_action_modify_field(dev,
7929                                                                   action_flags,
7930                                                                   actions,
7931                                                                   attr,
7932                                                                   error);
7933                        if (ret < 0)
7934                                return ret;
7935                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7936                                modify_after_mirror = 1;
7937                        /* Count all modify-header actions as one action. */
7938                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7939                                ++actions_n;
7940                        action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7941                        rw_act_num += ret;
7942                        break;
7943                case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7944                        ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7945                                                             item_flags, attr,
7946                                                             error);
7947                        if (ret < 0)
7948                                return ret;
7949                        action_flags |= MLX5_FLOW_ACTION_CT;
7950                        break;
7951                case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7952                        /* tunnel offload action was processed before
7953                         * list it here as a supported type
7954                         */
7955                        break;
7956                default:
7957                        return rte_flow_error_set(error, ENOTSUP,
7958                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7959                                                  actions,
7960                                                  "action not supported");
7961                }
7962        }
7963        /*
7964         * Validate actions in flow rules
7965         * - Explicit decap action is prohibited by the tunnel offload API.
7966         * - Drop action in tunnel steer rule is prohibited by the API.
7967         * - Application cannot use MARK action because it's value can mask
7968         *   tunnel default miss notification.
7969         * - JUMP in tunnel match rule has no support in current PMD
7970         *   implementation.
7971         * - TAG & META are reserved for future uses.
7972         */
7973        if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7974                uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7975                                            MLX5_FLOW_ACTION_MARK     |
7976                                            MLX5_FLOW_ACTION_SET_TAG  |
7977                                            MLX5_FLOW_ACTION_SET_META |
7978                                            MLX5_FLOW_ACTION_DROP;
7979
7980                if (action_flags & bad_actions_mask)
7981                        return rte_flow_error_set
7982                                        (error, EINVAL,
7983                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7984                                        "Invalid RTE action in tunnel "
7985                                        "set decap rule");
7986                if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7987                        return rte_flow_error_set
7988                                        (error, EINVAL,
7989                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7990                                        "tunnel set decap rule must terminate "
7991                                        "with JUMP");
7992                if (!attr->ingress)
7993                        return rte_flow_error_set
7994                                        (error, EINVAL,
7995                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7996                                        "tunnel flows for ingress traffic only");
7997        }
7998        if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7999                uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
8000                                            MLX5_FLOW_ACTION_MARK    |
8001                                            MLX5_FLOW_ACTION_SET_TAG |
8002                                            MLX5_FLOW_ACTION_SET_META;
8003
8004                if (action_flags & bad_actions_mask)
8005                        return rte_flow_error_set
8006                                        (error, EINVAL,
8007                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8008                                        "Invalid RTE action in tunnel "
8009                                        "set match rule");
8010        }
8011        /*
8012         * Validate the drop action mutual exclusion with other actions.
8013         * Drop action is mutually-exclusive with any other action, except for
8014         * Count action.
8015         * Drop action compatibility with tunnel offload was already validated.
8016         */
8017        if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
8018                            MLX5_FLOW_ACTION_TUNNEL_MATCH));
8019        else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
8020            (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
8021                return rte_flow_error_set(error, EINVAL,
8022                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8023                                          "Drop action is mutually-exclusive "
8024                                          "with any other action, except for "
8025                                          "Count action");
8026        /* Eswitch has few restrictions on using items and actions */
8027        if (attr->transfer) {
8028                if (!mlx5_flow_ext_mreg_supported(dev) &&
8029                    action_flags & MLX5_FLOW_ACTION_FLAG)
8030                        return rte_flow_error_set(error, ENOTSUP,
8031                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8032                                                  NULL,
8033                                                  "unsupported action FLAG");
8034                if (!mlx5_flow_ext_mreg_supported(dev) &&
8035                    action_flags & MLX5_FLOW_ACTION_MARK)
8036                        return rte_flow_error_set(error, ENOTSUP,
8037                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8038                                                  NULL,
8039                                                  "unsupported action MARK");
8040                if (action_flags & MLX5_FLOW_ACTION_QUEUE)
8041                        return rte_flow_error_set(error, ENOTSUP,
8042                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8043                                                  NULL,
8044                                                  "unsupported action QUEUE");
8045                if (action_flags & MLX5_FLOW_ACTION_RSS)
8046                        return rte_flow_error_set(error, ENOTSUP,
8047                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8048                                                  NULL,
8049                                                  "unsupported action RSS");
8050                if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
8051                        return rte_flow_error_set(error, EINVAL,
8052                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8053                                                  actions,
8054                                                  "no fate action is found");
8055        } else {
8056                if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
8057                        return rte_flow_error_set(error, EINVAL,
8058                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8059                                                  actions,
8060                                                  "no fate action is found");
8061        }
8062        /*
8063         * Continue validation for Xcap and VLAN actions.
8064         * If hairpin is working in explicit TX rule mode, there is no actions
8065         * splitting and the validation of hairpin ingress flow should be the
8066         * same as other standard flows.
8067         */
8068        if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
8069                             MLX5_FLOW_VLAN_ACTIONS)) &&
8070            (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
8071             ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
8072             conf->tx_explicit != 0))) {
8073                if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
8074                    MLX5_FLOW_XCAP_ACTIONS)
8075                        return rte_flow_error_set(error, ENOTSUP,
8076                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8077                                                  NULL, "encap and decap "
8078                                                  "combination aren't supported");
8079                /* Push VLAN is not supported in ingress except for NICs newer than CX5. */
8080                if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
8081                        struct mlx5_dev_ctx_shared *sh = priv->sh;
8082                        bool direction_error = false;
8083
8084                        if (attr->transfer) {
8085                                bool fdb_tx = priv->representor_id != UINT16_MAX;
8086                                bool is_cx5 = sh->steering_format_version ==
8087                                    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
8088
8089                                if (!fdb_tx && is_cx5)
8090                                        direction_error = true;
8091                        } else if (attr->ingress) {
8092                                direction_error = true;
8093                        }
8094                        if (direction_error)
8095                                return rte_flow_error_set(error, ENOTSUP,
8096                                                          RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
8097                                                          NULL,
8098                                                          "push VLAN action not supported "
8099                                                          "for ingress");
8100                }
8101                if (!attr->transfer && attr->ingress) {
8102                        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8103                                return rte_flow_error_set
8104                                                (error, ENOTSUP,
8105                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8106                                                 NULL, "encap is not supported"
8107                                                 " for ingress traffic");
8108                        else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
8109                                        MLX5_FLOW_VLAN_ACTIONS)
8110                                return rte_flow_error_set
8111                                                (error, ENOTSUP,
8112                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8113                                                 NULL, "no support for "
8114                                                 "multiple VLAN actions");
8115                }
8116        }
8117        if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
8118                if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
8119                        ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
8120                        attr->ingress)
8121                        return rte_flow_error_set
8122                                (error, ENOTSUP,
8123                                RTE_FLOW_ERROR_TYPE_ACTION,
8124                                NULL, "fate action not supported for "
8125                                "meter with policy");
8126                if (attr->egress) {
8127                        if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
8128                                return rte_flow_error_set
8129                                        (error, ENOTSUP,
8130                                        RTE_FLOW_ERROR_TYPE_ACTION,
8131                                        NULL, "modify header action in egress "
8132                                        "cannot be done before meter action");
8133                        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8134                                return rte_flow_error_set
8135                                        (error, ENOTSUP,
8136                                        RTE_FLOW_ERROR_TYPE_ACTION,
8137                                        NULL, "encap action in egress "
8138                                        "cannot be done before meter action");
8139                        if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8140                                return rte_flow_error_set
8141                                        (error, ENOTSUP,
8142                                        RTE_FLOW_ERROR_TYPE_ACTION,
8143                                        NULL, "push vlan action in egress "
8144                                        "cannot be done before meter action");
8145                }
8146        }
8147        /*
8148         * Only support one ASO action in a single flow rule.
8149         * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
8150         * Group 0 uses HW counter for AGE too even if no counter action.
8151         */
8152        aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
8153                   (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
8154                   (action_flags & MLX5_FLOW_ACTION_AGE &&
8155                    !(non_shared_age && count) &&
8156                    (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
8157                    priv->sh->flow_hit_aso_en);
8158        if (__builtin_popcountl(aso_mask) > 1)
8159                return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8160                                          NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
8161        /*
8162         * Hairpin flow will add one more TAG action in TX implicit mode.
8163         * In TX explicit mode, there will be no hairpin flow ID.
8164         */
8165        if (hairpin > 0)
8166                rw_act_num += MLX5_ACT_NUM_SET_TAG;
8167        /* extra metadata enabled: one more TAG action will be add. */
8168        if (dev_conf->dv_flow_en &&
8169            dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8170            mlx5_flow_ext_mreg_supported(dev))
8171                rw_act_num += MLX5_ACT_NUM_SET_TAG;
8172        if (rw_act_num >
8173                        flow_dv_modify_hdr_action_max(dev, is_root)) {
8174                return rte_flow_error_set(error, ENOTSUP,
8175                                          RTE_FLOW_ERROR_TYPE_ACTION,
8176                                          NULL, "too many header modify"
8177                                          " actions to support");
8178        }
8179        /* Eswitch egress mirror and modify flow has limitation on CX5 */
8180        if (fdb_mirror_limit && modify_after_mirror)
8181                return rte_flow_error_set(error, EINVAL,
8182                                RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8183                                "sample before modify action is not supported");
8184        /*
8185         * Validation the NIC Egress flow on representor, except implicit
8186         * hairpin default egress flow with TX_QUEUE item, other flows not
8187         * work due to metadata regC0 mismatch.
8188         */
8189        if ((!attr->transfer && attr->egress) && priv->representor &&
8190            !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
8191                return rte_flow_error_set(error, EINVAL,
8192                                          RTE_FLOW_ERROR_TYPE_ITEM,
8193                                          NULL,
8194                                          "NIC egress rules on representors"
8195                                          " is not supported");
8196        return 0;
8197}
8198
8199/**
8200 * Internal preparation function. Allocates the DV flow size,
8201 * this size is constant.
8202 *
8203 * @param[in] dev
8204 *   Pointer to the rte_eth_dev structure.
8205 * @param[in] attr
8206 *   Pointer to the flow attributes.
8207 * @param[in] items
8208 *   Pointer to the list of items.
8209 * @param[in] actions
8210 *   Pointer to the list of actions.
8211 * @param[out] error
8212 *   Pointer to the error structure.
8213 *
8214 * @return
8215 *   Pointer to mlx5_flow object on success,
8216 *   otherwise NULL and rte_errno is set.
8217 */
8218static struct mlx5_flow *
8219flow_dv_prepare(struct rte_eth_dev *dev,
8220                const struct rte_flow_attr *attr __rte_unused,
8221                const struct rte_flow_item items[] __rte_unused,
8222                const struct rte_flow_action actions[] __rte_unused,
8223                struct rte_flow_error *error)
8224{
8225        uint32_t handle_idx = 0;
8226        struct mlx5_flow *dev_flow;
8227        struct mlx5_flow_handle *dev_handle;
8228        struct mlx5_priv *priv = dev->data->dev_private;
8229        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8230
8231        MLX5_ASSERT(wks);
8232        wks->skip_matcher_reg = 0;
8233        wks->policy = NULL;
8234        wks->final_policy = NULL;
8235        /* In case of corrupting the memory. */
8236        if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8237                rte_flow_error_set(error, ENOSPC,
8238                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8239                                   "not free temporary device flow");
8240                return NULL;
8241        }
8242        dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8243                                   &handle_idx);
8244        if (!dev_handle) {
8245                rte_flow_error_set(error, ENOMEM,
8246                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8247                                   "not enough memory to create flow handle");
8248                return NULL;
8249        }
8250        MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8251        dev_flow = &wks->flows[wks->flow_idx++];
8252        memset(dev_flow, 0, sizeof(*dev_flow));
8253        dev_flow->handle = dev_handle;
8254        dev_flow->handle_idx = handle_idx;
8255        dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8256        dev_flow->ingress = attr->ingress;
8257        dev_flow->dv.transfer = attr->transfer;
8258        return dev_flow;
8259}
8260
8261#ifdef RTE_LIBRTE_MLX5_DEBUG
8262/**
8263 * Sanity check for match mask and value. Similar to check_valid_spec() in
8264 * kernel driver. If unmasked bit is present in value, it returns failure.
8265 *
8266 * @param match_mask
8267 *   pointer to match mask buffer.
8268 * @param match_value
8269 *   pointer to match value buffer.
8270 *
8271 * @return
8272 *   0 if valid, -EINVAL otherwise.
8273 */
8274static int
8275flow_dv_check_valid_spec(void *match_mask, void *match_value)
8276{
8277        uint8_t *m = match_mask;
8278        uint8_t *v = match_value;
8279        unsigned int i;
8280
8281        for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8282                if (v[i] & ~m[i]) {
8283                        DRV_LOG(ERR,
8284                                "match_value differs from match_criteria"
8285                                " %p[%u] != %p[%u]",
8286                                match_value, i, match_mask, i);
8287                        return -EINVAL;
8288                }
8289        }
8290        return 0;
8291}
8292#endif
8293
8294/**
8295 * Add match of ip_version.
8296 *
8297 * @param[in] group
8298 *   Flow group.
8299 * @param[in] headers_v
8300 *   Values header pointer.
8301 * @param[in] headers_m
8302 *   Masks header pointer.
8303 * @param[in] ip_version
8304 *   The IP version to set.
8305 */
8306static inline void
8307flow_dv_set_match_ip_version(uint32_t group,
8308                             void *headers_v,
8309                             void *headers_m,
8310                             uint8_t ip_version)
8311{
8312        if (group == 0)
8313                MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8314        else
8315                MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8316                         ip_version);
8317        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8318        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8319        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8320}
8321
8322/**
8323 * Add Ethernet item to matcher and to the value.
8324 *
8325 * @param[in, out] matcher
8326 *   Flow matcher.
8327 * @param[in, out] key
8328 *   Flow matcher value.
8329 * @param[in] item
8330 *   Flow pattern to translate.
8331 * @param[in] inner
8332 *   Item is inner pattern.
8333 */
8334static void
8335flow_dv_translate_item_eth(void *matcher, void *key,
8336                           const struct rte_flow_item *item, int inner,
8337                           uint32_t group)
8338{
8339        const struct rte_flow_item_eth *eth_m = item->mask;
8340        const struct rte_flow_item_eth *eth_v = item->spec;
8341        const struct rte_flow_item_eth nic_mask = {
8342                .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8343                .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8344                .type = RTE_BE16(0xffff),
8345                .has_vlan = 0,
8346        };
8347        void *hdrs_m;
8348        void *hdrs_v;
8349        char *l24_v;
8350        unsigned int i;
8351
8352        if (!eth_v)
8353                return;
8354        if (!eth_m)
8355                eth_m = &nic_mask;
8356        if (inner) {
8357                hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8358                                         inner_headers);
8359                hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8360        } else {
8361                hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8362                                         outer_headers);
8363                hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8364        }
8365        memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8366               &eth_m->dst, sizeof(eth_m->dst));
8367        /* The value must be in the range of the mask. */
8368        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8369        for (i = 0; i < sizeof(eth_m->dst); ++i)
8370                l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8371        memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8372               &eth_m->src, sizeof(eth_m->src));
8373        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8374        /* The value must be in the range of the mask. */
8375        for (i = 0; i < sizeof(eth_m->dst); ++i)
8376                l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8377        /*
8378         * HW supports match on one Ethertype, the Ethertype following the last
8379         * VLAN tag of the packet (see PRM).
8380         * Set match on ethertype only if ETH header is not followed by VLAN.
8381         * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8382         * ethertype, and use ip_version field instead.
8383         * eCPRI over Ether layer will use type value 0xAEFE.
8384         */
8385        if (eth_m->type == 0xFFFF) {
8386                /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8387                MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8388                switch (eth_v->type) {
8389                case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8390                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8391                        return;
8392                case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8393                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8394                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8395                        return;
8396                case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8397                        flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8398                        return;
8399                case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8400                        flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8401                        return;
8402                default:
8403                        break;
8404                }
8405        }
8406        if (eth_m->has_vlan) {
8407                MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8408                if (eth_v->has_vlan) {
8409                        /*
8410                         * Here, when also has_more_vlan field in VLAN item is
8411                         * not set, only single-tagged packets will be matched.
8412                         */
8413                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8414                        return;
8415                }
8416        }
8417        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8418                 rte_be_to_cpu_16(eth_m->type));
8419        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8420        *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8421}
8422
8423/**
8424 * Add VLAN item to matcher and to the value.
8425 *
8426 * @param[in, out] dev_flow
8427 *   Flow descriptor.
8428 * @param[in, out] matcher
8429 *   Flow matcher.
8430 * @param[in, out] key
8431 *   Flow matcher value.
8432 * @param[in] item
8433 *   Flow pattern to translate.
8434 * @param[in] inner
8435 *   Item is inner pattern.
8436 */
8437static void
8438flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8439                            void *matcher, void *key,
8440                            const struct rte_flow_item *item,
8441                            int inner, uint32_t group)
8442{
8443        const struct rte_flow_item_vlan *vlan_m = item->mask;
8444        const struct rte_flow_item_vlan *vlan_v = item->spec;
8445        void *hdrs_m;
8446        void *hdrs_v;
8447        uint16_t tci_m;
8448        uint16_t tci_v;
8449
8450        if (inner) {
8451                hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8452                                         inner_headers);
8453                hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8454        } else {
8455                hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8456                                         outer_headers);
8457                hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8458                /*
8459                 * This is workaround, masks are not supported,
8460                 * and pre-validated.
8461                 */
8462                if (vlan_v)
8463                        dev_flow->handle->vf_vlan.tag =
8464                                        rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8465        }
8466        /*
8467         * When VLAN item exists in flow, mark packet as tagged,
8468         * even if TCI is not specified.
8469         */
8470        if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8471                MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8472                MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8473        }
8474        if (!vlan_v)
8475                return;
8476        if (!vlan_m)
8477                vlan_m = &rte_flow_item_vlan_mask;
8478        tci_m = rte_be_to_cpu_16(vlan_m->tci);
8479        tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8480        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8481        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8482        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8483        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8484        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8485        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8486        /*
8487         * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8488         * ethertype, and use ip_version field instead.
8489         */
8490        if (vlan_m->inner_type == 0xFFFF) {
8491                switch (vlan_v->inner_type) {
8492                case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8493                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8494                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8495                        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8496                        return;
8497                case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8498                        flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8499                        return;
8500                case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8501                        flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8502                        return;
8503                default:
8504                        break;
8505                }
8506        }
8507        if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8508                MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8509                MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8510                /* Only one vlan_tag bit can be set. */
8511                MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8512                return;
8513        }
8514        MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8515                 rte_be_to_cpu_16(vlan_m->inner_type));
8516        MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8517                 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8518}
8519
8520/**
8521 * Add IPV4 item to matcher and to the value.
8522 *
8523 * @param[in, out] matcher
8524 *   Flow matcher.
8525 * @param[in, out] key
8526 *   Flow matcher value.
8527 * @param[in] item
8528 *   Flow pattern to translate.
8529 * @param[in] inner
8530 *   Item is inner pattern.
8531 * @param[in] group
8532 *   The group to insert the rule.
8533 */
8534static void
8535flow_dv_translate_item_ipv4(void *matcher, void *key,
8536                            const struct rte_flow_item *item,
8537                            int inner, uint32_t group)
8538{
8539        const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8540        const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8541        const struct rte_flow_item_ipv4 nic_mask = {
8542                .hdr = {
8543                        .src_addr = RTE_BE32(0xffffffff),
8544                        .dst_addr = RTE_BE32(0xffffffff),
8545                        .type_of_service = 0xff,
8546                        .next_proto_id = 0xff,
8547                        .time_to_live = 0xff,
8548                },
8549        };
8550        void *headers_m;
8551        void *headers_v;
8552        char *l24_m;
8553        char *l24_v;
8554        uint8_t tos, ihl_m, ihl_v;
8555
8556        if (inner) {
8557                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8558                                         inner_headers);
8559                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8560        } else {
8561                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8562                                         outer_headers);
8563                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8564        }
8565        flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8566        if (!ipv4_v)
8567                return;
8568        if (!ipv4_m)
8569                ipv4_m = &nic_mask;
8570        l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8571                             dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8572        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8573                             dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8574        *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8575        *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8576        l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8577                          src_ipv4_src_ipv6.ipv4_layout.ipv4);
8578        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8579                          src_ipv4_src_ipv6.ipv4_layout.ipv4);
8580        *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8581        *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8582        tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8583        ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8584        ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8585        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8586        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8587        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8588                 ipv4_m->hdr.type_of_service);
8589        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8590        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8591                 ipv4_m->hdr.type_of_service >> 2);
8592        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8593        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8594                 ipv4_m->hdr.next_proto_id);
8595        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8596                 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8597        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8598                 ipv4_m->hdr.time_to_live);
8599        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8600                 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8601        MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8602                 !!(ipv4_m->hdr.fragment_offset));
8603        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8604                 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8605}
8606
8607/**
8608 * Add IPV6 item to matcher and to the value.
8609 *
8610 * @param[in, out] matcher
8611 *   Flow matcher.
8612 * @param[in, out] key
8613 *   Flow matcher value.
8614 * @param[in] item
8615 *   Flow pattern to translate.
8616 * @param[in] inner
8617 *   Item is inner pattern.
8618 * @param[in] group
8619 *   The group to insert the rule.
8620 */
8621static void
8622flow_dv_translate_item_ipv6(void *matcher, void *key,
8623                            const struct rte_flow_item *item,
8624                            int inner, uint32_t group)
8625{
8626        const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8627        const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8628        const struct rte_flow_item_ipv6 nic_mask = {
8629                .hdr = {
8630                        .src_addr =
8631                                "\xff\xff\xff\xff\xff\xff\xff\xff"
8632                                "\xff\xff\xff\xff\xff\xff\xff\xff",
8633                        .dst_addr =
8634                                "\xff\xff\xff\xff\xff\xff\xff\xff"
8635                                "\xff\xff\xff\xff\xff\xff\xff\xff",
8636                        .vtc_flow = RTE_BE32(0xffffffff),
8637                        .proto = 0xff,
8638                        .hop_limits = 0xff,
8639                },
8640        };
8641        void *headers_m;
8642        void *headers_v;
8643        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8644        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8645        char *l24_m;
8646        char *l24_v;
8647        uint32_t vtc_m;
8648        uint32_t vtc_v;
8649        int i;
8650        int size;
8651
8652        if (inner) {
8653                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8654                                         inner_headers);
8655                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8656        } else {
8657                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8658                                         outer_headers);
8659                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8660        }
8661        flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8662        if (!ipv6_v)
8663                return;
8664        if (!ipv6_m)
8665                ipv6_m = &nic_mask;
8666        size = sizeof(ipv6_m->hdr.dst_addr);
8667        l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8668                             dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8669        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8670                             dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8671        memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8672        for (i = 0; i < size; ++i)
8673                l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8674        l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8675                             src_ipv4_src_ipv6.ipv6_layout.ipv6);
8676        l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8677                             src_ipv4_src_ipv6.ipv6_layout.ipv6);
8678        memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8679        for (i = 0; i < size; ++i)
8680                l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8681        /* TOS. */
8682        vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8683        vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8684        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8685        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8686        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8687        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8688        /* Label. */
8689        if (inner) {
8690                MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8691                         vtc_m);
8692                MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8693                         vtc_v);
8694        } else {
8695                MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8696                         vtc_m);
8697                MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8698                         vtc_v);
8699        }
8700        /* Protocol. */
8701        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8702                 ipv6_m->hdr.proto);
8703        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8704                 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8705        /* Hop limit. */
8706        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8707                 ipv6_m->hdr.hop_limits);
8708        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8709                 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8710        MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8711                 !!(ipv6_m->has_frag_ext));
8712        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8713                 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8714}
8715
8716/**
8717 * Add IPV6 fragment extension item to matcher and to the value.
8718 *
8719 * @param[in, out] matcher
8720 *   Flow matcher.
8721 * @param[in, out] key
8722 *   Flow matcher value.
8723 * @param[in] item
8724 *   Flow pattern to translate.
8725 * @param[in] inner
8726 *   Item is inner pattern.
8727 */
8728static void
8729flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8730                                     const struct rte_flow_item *item,
8731                                     int inner)
8732{
8733        const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8734        const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8735        const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8736                .hdr = {
8737                        .next_header = 0xff,
8738                        .frag_data = RTE_BE16(0xffff),
8739                },
8740        };
8741        void *headers_m;
8742        void *headers_v;
8743
8744        if (inner) {
8745                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8746                                         inner_headers);
8747                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8748        } else {
8749                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8750                                         outer_headers);
8751                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8752        }
8753        /* IPv6 fragment extension item exists, so packet is IP fragment. */
8754        MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8755        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8756        if (!ipv6_frag_ext_v)
8757                return;
8758        if (!ipv6_frag_ext_m)
8759                ipv6_frag_ext_m = &nic_mask;
8760        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8761                 ipv6_frag_ext_m->hdr.next_header);
8762        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8763                 ipv6_frag_ext_v->hdr.next_header &
8764                 ipv6_frag_ext_m->hdr.next_header);
8765}
8766
8767/**
8768 * Add TCP item to matcher and to the value.
8769 *
8770 * @param[in, out] matcher
8771 *   Flow matcher.
8772 * @param[in, out] key
8773 *   Flow matcher value.
8774 * @param[in] item
8775 *   Flow pattern to translate.
8776 * @param[in] inner
8777 *   Item is inner pattern.
8778 */
8779static void
8780flow_dv_translate_item_tcp(void *matcher, void *key,
8781                           const struct rte_flow_item *item,
8782                           int inner)
8783{
8784        const struct rte_flow_item_tcp *tcp_m = item->mask;
8785        const struct rte_flow_item_tcp *tcp_v = item->spec;
8786        void *headers_m;
8787        void *headers_v;
8788
8789        if (inner) {
8790                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8791                                         inner_headers);
8792                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8793        } else {
8794                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8795                                         outer_headers);
8796                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8797        }
8798        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8799        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8800        if (!tcp_v)
8801                return;
8802        if (!tcp_m)
8803                tcp_m = &rte_flow_item_tcp_mask;
8804        MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8805                 rte_be_to_cpu_16(tcp_m->hdr.src_port));
8806        MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8807                 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8808        MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8809                 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8810        MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8811                 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8812        MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8813                 tcp_m->hdr.tcp_flags);
8814        MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8815                 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8816}
8817
8818/**
8819 * Add ESP item to matcher and to the value.
8820 *
8821 * @param[in, out] matcher
8822 *   Flow matcher.
8823 * @param[in, out] key
8824 *   Flow matcher value.
8825 * @param[in] item
8826 *   Flow pattern to translate.
8827 * @param[in] inner
8828 *   Item is inner pattern.
8829 */
8830static void
8831flow_dv_translate_item_esp(void *matcher, void *key,
8832                           const struct rte_flow_item *item,
8833                           int inner)
8834{
8835        const struct rte_flow_item_esp *esp_m = item->mask;
8836        const struct rte_flow_item_esp *esp_v = item->spec;
8837        void *headers_m;
8838        void *headers_v;
8839        char *spi_m;
8840        char *spi_v;
8841
8842        if (inner) {
8843                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8844                                         inner_headers);
8845                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8846        } else {
8847                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8848                                         outer_headers);
8849                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8850        }
8851        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8852        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
8853        if (!esp_v)
8854                return;
8855        if (!esp_m)
8856                esp_m = &rte_flow_item_esp_mask;
8857        headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8858        headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8859        if (inner) {
8860                spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
8861                spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
8862        } else {
8863                spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
8864                spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
8865        }
8866        *(uint32_t *)spi_m = esp_m->hdr.spi;
8867        *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
8868}
8869
8870/**
8871 * Add UDP item to matcher and to the value.
8872 *
8873 * @param[in, out] matcher
8874 *   Flow matcher.
8875 * @param[in, out] key
8876 *   Flow matcher value.
8877 * @param[in] item
8878 *   Flow pattern to translate.
8879 * @param[in] inner
8880 *   Item is inner pattern.
8881 */
8882static void
8883flow_dv_translate_item_udp(void *matcher, void *key,
8884                           const struct rte_flow_item *item,
8885                           int inner)
8886{
8887        const struct rte_flow_item_udp *udp_m = item->mask;
8888        const struct rte_flow_item_udp *udp_v = item->spec;
8889        void *headers_m;
8890        void *headers_v;
8891
8892        if (inner) {
8893                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8894                                         inner_headers);
8895                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8896        } else {
8897                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8898                                         outer_headers);
8899                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8900        }
8901        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8902        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8903        if (!udp_v)
8904                return;
8905        if (!udp_m)
8906                udp_m = &rte_flow_item_udp_mask;
8907        MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8908                 rte_be_to_cpu_16(udp_m->hdr.src_port));
8909        MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8910                 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8911        MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8912                 rte_be_to_cpu_16(udp_m->hdr.dst_port));
8913        MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8914                 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8915}
8916
8917/**
8918 * Add GRE optional Key item to matcher and to the value.
8919 *
8920 * @param[in, out] matcher
8921 *   Flow matcher.
8922 * @param[in, out] key
8923 *   Flow matcher value.
8924 * @param[in] item
8925 *   Flow pattern to translate.
8926 * @param[in] inner
8927 *   Item is inner pattern.
8928 */
8929static void
8930flow_dv_translate_item_gre_key(void *matcher, void *key,
8931                                   const struct rte_flow_item *item)
8932{
8933        const rte_be32_t *key_m = item->mask;
8934        const rte_be32_t *key_v = item->spec;
8935        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8936        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8937        rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8938
8939        /* GRE K bit must be on and should already be validated */
8940        MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8941        MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8942        if (!key_v)
8943                return;
8944        if (!key_m)
8945                key_m = &gre_key_default_mask;
8946        MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8947                 rte_be_to_cpu_32(*key_m) >> 8);
8948        MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8949                 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8950        MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8951                 rte_be_to_cpu_32(*key_m) & 0xFF);
8952        MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8953                 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8954}
8955
8956/**
8957 * Add GRE item to matcher and to the value.
8958 *
8959 * @param[in, out] matcher
8960 *   Flow matcher.
8961 * @param[in, out] key
8962 *   Flow matcher value.
8963 * @param[in] item
8964 *   Flow pattern to translate.
8965 * @param[in] pattern_flags
8966 *   Accumulated pattern flags.
8967 */
8968static void
8969flow_dv_translate_item_gre(void *matcher, void *key,
8970                           const struct rte_flow_item *item,
8971                           uint64_t pattern_flags)
8972{
8973        static const struct rte_flow_item_gre empty_gre = {0,};
8974        const struct rte_flow_item_gre *gre_m = item->mask;
8975        const struct rte_flow_item_gre *gre_v = item->spec;
8976        void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8977        void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8978        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8979        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8980        struct {
8981                union {
8982                        __extension__
8983                        struct {
8984                                uint16_t version:3;
8985                                uint16_t rsvd0:9;
8986                                uint16_t s_present:1;
8987                                uint16_t k_present:1;
8988                                uint16_t rsvd_bit1:1;
8989                                uint16_t c_present:1;
8990                        };
8991                        uint16_t value;
8992                };
8993        } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8994        uint16_t protocol_m, protocol_v;
8995
8996        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8997        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8998        if (!gre_v) {
8999                gre_v = &empty_gre;
9000                gre_m = &empty_gre;
9001        } else {
9002                if (!gre_m)
9003                        gre_m = &rte_flow_item_gre_mask;
9004        }
9005        gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
9006        gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
9007        MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
9008                 gre_crks_rsvd0_ver_m.c_present);
9009        MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
9010                 gre_crks_rsvd0_ver_v.c_present &
9011                 gre_crks_rsvd0_ver_m.c_present);
9012        MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
9013                 gre_crks_rsvd0_ver_m.k_present);
9014        MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
9015                 gre_crks_rsvd0_ver_v.k_present &
9016                 gre_crks_rsvd0_ver_m.k_present);
9017        MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
9018                 gre_crks_rsvd0_ver_m.s_present);
9019        MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
9020                 gre_crks_rsvd0_ver_v.s_present &
9021                 gre_crks_rsvd0_ver_m.s_present);
9022        protocol_m = rte_be_to_cpu_16(gre_m->protocol);
9023        protocol_v = rte_be_to_cpu_16(gre_v->protocol);
9024        if (!protocol_m) {
9025                /* Force next protocol to prevent matchers duplication */
9026                protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9027                if (protocol_v)
9028                        protocol_m = 0xFFFF;
9029        }
9030        MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
9031        MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9032                 protocol_m & protocol_v);
9033}
9034
9035/**
9036 * Add GRE optional items to matcher and to the value.
9037 *
9038 * @param[in, out] matcher
9039 *   Flow matcher.
9040 * @param[in, out] key
9041 *   Flow matcher value.
9042 * @param[in] item
9043 *   Flow pattern to translate.
9044 * @param[in] gre_item
9045 *   Pointer to gre_item.
9046 * @param[in] pattern_flags
9047 *   Accumulated pattern flags.
9048 */
9049static void
9050flow_dv_translate_item_gre_option(void *matcher, void *key,
9051                                  const struct rte_flow_item *item,
9052                                  const struct rte_flow_item *gre_item,
9053                                  uint64_t pattern_flags)
9054{
9055        const struct rte_flow_item_gre_opt *option_m = item->mask;
9056        const struct rte_flow_item_gre_opt *option_v = item->spec;
9057        const struct rte_flow_item_gre *gre_m = gre_item->mask;
9058        const struct rte_flow_item_gre *gre_v = gre_item->spec;
9059        static const struct rte_flow_item_gre empty_gre = {0};
9060        struct rte_flow_item gre_key_item;
9061        uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
9062        uint16_t protocol_m, protocol_v;
9063        void *misc5_m;
9064        void *misc5_v;
9065
9066        /*
9067         * If only match key field, keep using misc for matching.
9068         * If need to match checksum or sequence, using misc5 and do
9069         * not need using misc.
9070         */
9071        if (!(option_m->sequence.sequence ||
9072              option_m->checksum_rsvd.checksum)) {
9073                flow_dv_translate_item_gre(matcher, key, gre_item,
9074                                           pattern_flags);
9075                gre_key_item.spec = &option_v->key.key;
9076                gre_key_item.mask = &option_m->key.key;
9077                flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
9078                return;
9079        }
9080        if (!gre_v) {
9081                gre_v = &empty_gre;
9082                gre_m = &empty_gre;
9083        } else {
9084                if (!gre_m)
9085                        gre_m = &rte_flow_item_gre_mask;
9086        }
9087        protocol_v = gre_v->protocol;
9088        protocol_m = gre_m->protocol;
9089        if (!protocol_m) {
9090                /* Force next protocol to prevent matchers duplication */
9091                uint16_t ether_type =
9092                        mlx5_translate_tunnel_etypes(pattern_flags);
9093                if (ether_type) {
9094                        protocol_v = rte_be_to_cpu_16(ether_type);
9095                        protocol_m = UINT16_MAX;
9096                }
9097        }
9098        c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
9099        c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
9100        if (option_m->sequence.sequence) {
9101                c_rsvd0_ver_v |= RTE_BE16(0x1000);
9102                c_rsvd0_ver_m |= RTE_BE16(0x1000);
9103        }
9104        if (option_m->key.key) {
9105                c_rsvd0_ver_v |= RTE_BE16(0x2000);
9106                c_rsvd0_ver_m |= RTE_BE16(0x2000);
9107        }
9108        if (option_m->checksum_rsvd.checksum) {
9109                c_rsvd0_ver_v |= RTE_BE16(0x8000);
9110                c_rsvd0_ver_m |= RTE_BE16(0x8000);
9111        }
9112        /*
9113         * Hardware parses GRE optional field into the fixed location,
9114         * do not need to adjust the tunnel dword indices.
9115         */
9116        misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
9117        misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
9118        MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,
9119                 rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &
9120                                  (c_rsvd0_ver_m | protocol_m << 16)));
9121        MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,
9122                 rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));
9123        MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,
9124                 rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &
9125                                  option_m->checksum_rsvd.checksum));
9126        MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,
9127                 rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));
9128        MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,
9129                 rte_be_to_cpu_32(option_v->key.key & option_m->key.key));
9130        MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,
9131                 rte_be_to_cpu_32(option_m->key.key));
9132        MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,
9133                 rte_be_to_cpu_32(option_v->sequence.sequence &
9134                                  option_m->sequence.sequence));
9135        MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,
9136                 rte_be_to_cpu_32(option_m->sequence.sequence));
9137}
9138
9139/**
9140 * Add NVGRE item to matcher and to the value.
9141 *
9142 * @param[in, out] matcher
9143 *   Flow matcher.
9144 * @param[in, out] key
9145 *   Flow matcher value.
9146 * @param[in] item
9147 *   Flow pattern to translate.
9148 * @param[in] pattern_flags
9149 *   Accumulated pattern flags.
9150 */
9151static void
9152flow_dv_translate_item_nvgre(void *matcher, void *key,
9153                             const struct rte_flow_item *item,
9154                             unsigned long pattern_flags)
9155{
9156        const struct rte_flow_item_nvgre *nvgre_m = item->mask;
9157        const struct rte_flow_item_nvgre *nvgre_v = item->spec;
9158        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9159        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9160        const char *tni_flow_id_m;
9161        const char *tni_flow_id_v;
9162        char *gre_key_m;
9163        char *gre_key_v;
9164        int size;
9165        int i;
9166
9167        /* For NVGRE, GRE header fields must be set with defined values. */
9168        const struct rte_flow_item_gre gre_spec = {
9169                .c_rsvd0_ver = RTE_BE16(0x2000),
9170                .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
9171        };
9172        const struct rte_flow_item_gre gre_mask = {
9173                .c_rsvd0_ver = RTE_BE16(0xB000),
9174                .protocol = RTE_BE16(UINT16_MAX),
9175        };
9176        const struct rte_flow_item gre_item = {
9177                .spec = &gre_spec,
9178                .mask = &gre_mask,
9179                .last = NULL,
9180        };
9181        flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
9182        if (!nvgre_v)
9183                return;
9184        if (!nvgre_m)
9185                nvgre_m = &rte_flow_item_nvgre_mask;
9186        tni_flow_id_m = (const char *)nvgre_m->tni;
9187        tni_flow_id_v = (const char *)nvgre_v->tni;
9188        size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
9189        gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
9190        gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
9191        memcpy(gre_key_m, tni_flow_id_m, size);
9192        for (i = 0; i < size; ++i)
9193                gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
9194}
9195
9196/**
9197 * Add VXLAN item to matcher and to the value.
9198 *
9199 * @param[in] dev
9200 *   Pointer to the Ethernet device structure.
9201 * @param[in] attr
9202 *   Flow rule attributes.
9203 * @param[in, out] matcher
9204 *   Flow matcher.
9205 * @param[in, out] key
9206 *   Flow matcher value.
9207 * @param[in] item
9208 *   Flow pattern to translate.
9209 * @param[in] inner
9210 *   Item is inner pattern.
9211 */
9212static void
9213flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
9214                             const struct rte_flow_attr *attr,
9215                             void *matcher, void *key,
9216                             const struct rte_flow_item *item,
9217                             int inner)
9218{
9219        const struct rte_flow_item_vxlan *vxlan_m = item->mask;
9220        const struct rte_flow_item_vxlan *vxlan_v = item->spec;
9221        void *headers_m;
9222        void *headers_v;
9223        void *misc5_m;
9224        void *misc5_v;
9225        uint32_t *tunnel_header_v;
9226        uint32_t *tunnel_header_m;
9227        uint16_t dport;
9228        struct mlx5_priv *priv = dev->data->dev_private;
9229        const struct rte_flow_item_vxlan nic_mask = {
9230                .vni = "\xff\xff\xff",
9231                .rsvd1 = 0xff,
9232        };
9233
9234        if (inner) {
9235                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9236                                         inner_headers);
9237                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9238        } else {
9239                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9240                                         outer_headers);
9241                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9242        }
9243        dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
9244                MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
9245        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9246                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9247                MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9248        }
9249        dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
9250        if (!vxlan_v)
9251                return;
9252        if (!vxlan_m) {
9253                if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
9254                    (attr->group && !priv->sh->misc5_cap))
9255                        vxlan_m = &rte_flow_item_vxlan_mask;
9256                else
9257                        vxlan_m = &nic_mask;
9258        }
9259        if ((priv->sh->steering_format_version ==
9260            MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
9261            dport != MLX5_UDP_PORT_VXLAN) ||
9262            (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
9263            ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
9264                void *misc_m;
9265                void *misc_v;
9266                char *vni_m;
9267                char *vni_v;
9268                int size;
9269                int i;
9270                misc_m = MLX5_ADDR_OF(fte_match_param,
9271                                      matcher, misc_parameters);
9272                misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9273                size = sizeof(vxlan_m->vni);
9274                vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
9275                vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
9276                memcpy(vni_m, vxlan_m->vni, size);
9277                for (i = 0; i < size; ++i)
9278                        vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9279                return;
9280        }
9281        misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
9282        misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
9283        tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9284                                                   misc5_v,
9285                                                   tunnel_header_1);
9286        tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9287                                                   misc5_m,
9288                                                   tunnel_header_1);
9289        *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
9290                           (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
9291                           (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
9292        if (*tunnel_header_v)
9293                *tunnel_header_m = vxlan_m->vni[0] |
9294                        vxlan_m->vni[1] << 8 |
9295                        vxlan_m->vni[2] << 16;
9296        else
9297                *tunnel_header_m = 0x0;
9298        *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
9299        if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
9300                *tunnel_header_m |= vxlan_m->rsvd1 << 24;
9301}
9302
9303/**
9304 * Add VXLAN-GPE item to matcher and to the value.
9305 *
9306 * @param[in, out] matcher
9307 *   Flow matcher.
9308 * @param[in, out] key
9309 *   Flow matcher value.
9310 * @param[in] item
9311 *   Flow pattern to translate.
9312 * @param[in] inner
9313 *   Item is inner pattern.
9314 */
9315
9316static void
9317flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9318                                 const struct rte_flow_item *item,
9319                                 const uint64_t pattern_flags)
9320{
9321        static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9322        const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9323        const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9324        /* The item was validated to be on the outer side */
9325        void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9326        void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9327        void *misc_m =
9328                MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9329        void *misc_v =
9330                MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9331        char *vni_m =
9332                MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9333        char *vni_v =
9334                MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9335        int i, size = sizeof(vxlan_m->vni);
9336        uint8_t flags_m = 0xff;
9337        uint8_t flags_v = 0xc;
9338        uint8_t m_protocol, v_protocol;
9339
9340        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9341                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9342                MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9343                         MLX5_UDP_PORT_VXLAN_GPE);
9344        }
9345        if (!vxlan_v) {
9346                vxlan_v = &dummy_vxlan_gpe_hdr;
9347                vxlan_m = &dummy_vxlan_gpe_hdr;
9348        } else {
9349                if (!vxlan_m)
9350                        vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9351        }
9352        memcpy(vni_m, vxlan_m->vni, size);
9353        for (i = 0; i < size; ++i)
9354                vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9355        if (vxlan_m->flags) {
9356                flags_m = vxlan_m->flags;
9357                flags_v = vxlan_v->flags;
9358        }
9359        MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9360        MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9361        m_protocol = vxlan_m->protocol;
9362        v_protocol = vxlan_v->protocol;
9363        if (!m_protocol) {
9364                /* Force next protocol to ensure next headers parsing. */
9365                if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9366                        v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9367                else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9368                        v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9369                else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9370                        v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9371                if (v_protocol)
9372                        m_protocol = 0xFF;
9373        }
9374        MLX5_SET(fte_match_set_misc3, misc_m,
9375                 outer_vxlan_gpe_next_protocol, m_protocol);
9376        MLX5_SET(fte_match_set_misc3, misc_v,
9377                 outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9378}
9379
9380/**
9381 * Add Geneve item to matcher and to the value.
9382 *
9383 * @param[in, out] matcher
9384 *   Flow matcher.
9385 * @param[in, out] key
9386 *   Flow matcher value.
9387 * @param[in] item
9388 *   Flow pattern to translate.
9389 * @param[in] inner
9390 *   Item is inner pattern.
9391 */
9392
9393static void
9394flow_dv_translate_item_geneve(void *matcher, void *key,
9395                              const struct rte_flow_item *item,
9396                              uint64_t pattern_flags)
9397{
9398        static const struct rte_flow_item_geneve empty_geneve = {0,};
9399        const struct rte_flow_item_geneve *geneve_m = item->mask;
9400        const struct rte_flow_item_geneve *geneve_v = item->spec;
9401        /* GENEVE flow item validation allows single tunnel item */
9402        void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9403        void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9404        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9405        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9406        uint16_t gbhdr_m;
9407        uint16_t gbhdr_v;
9408        char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9409        char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9410        size_t size = sizeof(geneve_m->vni), i;
9411        uint16_t protocol_m, protocol_v;
9412
9413        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9414                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9415                MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9416                         MLX5_UDP_PORT_GENEVE);
9417        }
9418        if (!geneve_v) {
9419                geneve_v = &empty_geneve;
9420                geneve_m = &empty_geneve;
9421        } else {
9422                if (!geneve_m)
9423                        geneve_m = &rte_flow_item_geneve_mask;
9424        }
9425        memcpy(vni_m, geneve_m->vni, size);
9426        for (i = 0; i < size; ++i)
9427                vni_v[i] = vni_m[i] & geneve_v->vni[i];
9428        gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9429        gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9430        MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9431                 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9432        MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9433                 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9434        MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9435                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9436        MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9437                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9438                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9439        protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9440        protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9441        if (!protocol_m) {
9442                /* Force next protocol to prevent matchers duplication */
9443                protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9444                if (protocol_v)
9445                        protocol_m = 0xFFFF;
9446        }
9447        MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9448        MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9449                 protocol_m & protocol_v);
9450}
9451
9452/**
9453 * Create Geneve TLV option resource.
9454 *
9455 * @param dev[in, out]
9456 *   Pointer to rte_eth_dev structure.
9457 * @param[in, out] tag_be24
9458 *   Tag value in big endian then R-shift 8.
9459 * @parm[in, out] dev_flow
9460 *   Pointer to the dev_flow.
9461 * @param[out] error
9462 *   pointer to error structure.
9463 *
9464 * @return
9465 *   0 on success otherwise -errno and errno is set.
9466 */
9467
9468int
9469flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9470                                             const struct rte_flow_item *item,
9471                                             struct rte_flow_error *error)
9472{
9473        struct mlx5_priv *priv = dev->data->dev_private;
9474        struct mlx5_dev_ctx_shared *sh = priv->sh;
9475        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9476                        sh->geneve_tlv_option_resource;
9477        struct mlx5_devx_obj *obj;
9478        const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9479        int ret = 0;
9480
9481        if (!geneve_opt_v)
9482                return -1;
9483        rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9484        if (geneve_opt_resource != NULL) {
9485                if (geneve_opt_resource->option_class ==
9486                        geneve_opt_v->option_class &&
9487                        geneve_opt_resource->option_type ==
9488                        geneve_opt_v->option_type &&
9489                        geneve_opt_resource->length ==
9490                        geneve_opt_v->option_len) {
9491                        /* We already have GENEVE TLV option obj allocated. */
9492                        __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9493                                           __ATOMIC_RELAXED);
9494                } else {
9495                        ret = rte_flow_error_set(error, ENOMEM,
9496                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9497                                "Only one GENEVE TLV option supported");
9498                        goto exit;
9499                }
9500        } else {
9501                /* Create a GENEVE TLV object and resource. */
9502                obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9503                                geneve_opt_v->option_class,
9504                                geneve_opt_v->option_type,
9505                                geneve_opt_v->option_len);
9506                if (!obj) {
9507                        ret = rte_flow_error_set(error, ENODATA,
9508                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9509                                "Failed to create GENEVE TLV Devx object");
9510                        goto exit;
9511                }
9512                sh->geneve_tlv_option_resource =
9513                                mlx5_malloc(MLX5_MEM_ZERO,
9514                                                sizeof(*geneve_opt_resource),
9515                                                0, SOCKET_ID_ANY);
9516                if (!sh->geneve_tlv_option_resource) {
9517                        claim_zero(mlx5_devx_cmd_destroy(obj));
9518                        ret = rte_flow_error_set(error, ENOMEM,
9519                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9520                                "GENEVE TLV object memory allocation failed");
9521                        goto exit;
9522                }
9523                geneve_opt_resource = sh->geneve_tlv_option_resource;
9524                geneve_opt_resource->obj = obj;
9525                geneve_opt_resource->option_class = geneve_opt_v->option_class;
9526                geneve_opt_resource->option_type = geneve_opt_v->option_type;
9527                geneve_opt_resource->length = geneve_opt_v->option_len;
9528                __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9529                                __ATOMIC_RELAXED);
9530        }
9531exit:
9532        rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9533        return ret;
9534}
9535
9536/**
9537 * Add Geneve TLV option item to matcher.
9538 *
9539 * @param[in, out] dev
9540 *   Pointer to rte_eth_dev structure.
9541 * @param[in, out] matcher
9542 *   Flow matcher.
9543 * @param[in, out] key
9544 *   Flow matcher value.
9545 * @param[in] item
9546 *   Flow pattern to translate.
9547 * @param[out] error
9548 *   Pointer to error structure.
9549 */
9550static int
9551flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9552                                  void *key, const struct rte_flow_item *item,
9553                                  struct rte_flow_error *error)
9554{
9555        const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9556        const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9557        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9558        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9559        void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9560                        misc_parameters_3);
9561        void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9562        rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9563        int ret = 0;
9564
9565        if (!geneve_opt_v)
9566                return -1;
9567        if (!geneve_opt_m)
9568                geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9569        ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9570                                                           error);
9571        if (ret) {
9572                DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9573                return ret;
9574        }
9575        /*
9576         * Set the option length in GENEVE header if not requested.
9577         * The GENEVE TLV option length is expressed by the option length field
9578         * in the GENEVE header.
9579         * If the option length was not requested but the GENEVE TLV option item
9580         * is present we set the option length field implicitly.
9581         */
9582        if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9583                MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9584                         MLX5_GENEVE_OPTLEN_MASK);
9585                MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9586                         geneve_opt_v->option_len + 1);
9587        }
9588        MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9589        MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9590        /* Set the data. */
9591        if (geneve_opt_v->data) {
9592                memcpy(&opt_data_key, geneve_opt_v->data,
9593                        RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9594                                sizeof(opt_data_key)));
9595                MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9596                                sizeof(opt_data_key));
9597                memcpy(&opt_data_mask, geneve_opt_m->data,
9598                        RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9599                                sizeof(opt_data_mask)));
9600                MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9601                                sizeof(opt_data_mask));
9602                MLX5_SET(fte_match_set_misc3, misc3_m,
9603                                geneve_tlv_option_0_data,
9604                                rte_be_to_cpu_32(opt_data_mask));
9605                MLX5_SET(fte_match_set_misc3, misc3_v,
9606                                geneve_tlv_option_0_data,
9607                        rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9608        }
9609        return ret;
9610}
9611
9612/**
9613 * Add MPLS item to matcher and to the value.
9614 *
9615 * @param[in, out] matcher
9616 *   Flow matcher.
9617 * @param[in, out] key
9618 *   Flow matcher value.
9619 * @param[in] item
9620 *   Flow pattern to translate.
9621 * @param[in] prev_layer
9622 *   The protocol layer indicated in previous item.
9623 * @param[in] inner
9624 *   Item is inner pattern.
9625 */
9626static void
9627flow_dv_translate_item_mpls(void *matcher, void *key,
9628                            const struct rte_flow_item *item,
9629                            uint64_t prev_layer,
9630                            int inner)
9631{
9632        const uint32_t *in_mpls_m = item->mask;
9633        const uint32_t *in_mpls_v = item->spec;
9634        uint32_t *out_mpls_m = 0;
9635        uint32_t *out_mpls_v = 0;
9636        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9637        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9638        void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9639                                     misc_parameters_2);
9640        void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9641        void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9642        void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9643
9644        switch (prev_layer) {
9645        case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9646                if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9647                        MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9648                                 0xffff);
9649                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9650                                 MLX5_UDP_PORT_MPLS);
9651                }
9652                break;
9653        case MLX5_FLOW_LAYER_GRE:
9654                /* Fall-through. */
9655        case MLX5_FLOW_LAYER_GRE_KEY:
9656                if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9657                        MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9658                                 0xffff);
9659                        MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9660                                 RTE_ETHER_TYPE_MPLS);
9661                }
9662                break;
9663        default:
9664                break;
9665        }
9666        if (!in_mpls_v)
9667                return;
9668        if (!in_mpls_m)
9669                in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9670        switch (prev_layer) {
9671        case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9672                out_mpls_m =
9673                        (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9674                                                 outer_first_mpls_over_udp);
9675                out_mpls_v =
9676                        (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9677                                                 outer_first_mpls_over_udp);
9678                break;
9679        case MLX5_FLOW_LAYER_GRE:
9680                out_mpls_m =
9681                        (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9682                                                 outer_first_mpls_over_gre);
9683                out_mpls_v =
9684                        (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9685                                                 outer_first_mpls_over_gre);
9686                break;
9687        default:
9688                /* Inner MPLS not over GRE is not supported. */
9689                if (!inner) {
9690                        out_mpls_m =
9691                                (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9692                                                         misc2_m,
9693                                                         outer_first_mpls);
9694                        out_mpls_v =
9695                                (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9696                                                         misc2_v,
9697                                                         outer_first_mpls);
9698                }
9699                break;
9700        }
9701        if (out_mpls_m && out_mpls_v) {
9702                *out_mpls_m = *in_mpls_m;
9703                *out_mpls_v = *in_mpls_v & *in_mpls_m;
9704        }
9705}
9706
9707/**
9708 * Add metadata register item to matcher
9709 *
9710 * @param[in, out] matcher
9711 *   Flow matcher.
9712 * @param[in, out] key
9713 *   Flow matcher value.
9714 * @param[in] reg_type
9715 *   Type of device metadata register
9716 * @param[in] value
9717 *   Register value
9718 * @param[in] mask
9719 *   Register mask
9720 */
9721static void
9722flow_dv_match_meta_reg(void *matcher, void *key,
9723                       enum modify_reg reg_type,
9724                       uint32_t data, uint32_t mask)
9725{
9726        void *misc2_m =
9727                MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9728        void *misc2_v =
9729                MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9730        uint32_t temp;
9731
9732        data &= mask;
9733        switch (reg_type) {
9734        case REG_A:
9735                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9736                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9737                break;
9738        case REG_B:
9739                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9740                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9741                break;
9742        case REG_C_0:
9743                /*
9744                 * The metadata register C0 field might be divided into
9745                 * source vport index and META item value, we should set
9746                 * this field according to specified mask, not as whole one.
9747                 */
9748                temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9749                temp |= mask;
9750                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9751                temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9752                temp &= ~mask;
9753                temp |= data;
9754                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9755                break;
9756        case REG_C_1:
9757                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9758                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9759                break;
9760        case REG_C_2:
9761                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9762                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9763                break;
9764        case REG_C_3:
9765                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9766                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9767                break;
9768        case REG_C_4:
9769                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9770                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9771                break;
9772        case REG_C_5:
9773                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9774                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9775                break;
9776        case REG_C_6:
9777                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9778                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9779                break;
9780        case REG_C_7:
9781                MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9782                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9783                break;
9784        default:
9785                MLX5_ASSERT(false);
9786                break;
9787        }
9788}
9789
9790/**
9791 * Add MARK item to matcher
9792 *
9793 * @param[in] dev
9794 *   The device to configure through.
9795 * @param[in, out] matcher
9796 *   Flow matcher.
9797 * @param[in, out] key
9798 *   Flow matcher value.
9799 * @param[in] item
9800 *   Flow pattern to translate.
9801 */
9802static void
9803flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9804                            void *matcher, void *key,
9805                            const struct rte_flow_item *item)
9806{
9807        struct mlx5_priv *priv = dev->data->dev_private;
9808        const struct rte_flow_item_mark *mark;
9809        uint32_t value;
9810        uint32_t mask;
9811
9812        mark = item->mask ? (const void *)item->mask :
9813                            &rte_flow_item_mark_mask;
9814        mask = mark->id & priv->sh->dv_mark_mask;
9815        mark = (const void *)item->spec;
9816        MLX5_ASSERT(mark);
9817        value = mark->id & priv->sh->dv_mark_mask & mask;
9818        if (mask) {
9819                enum modify_reg reg;
9820
9821                /* Get the metadata register index for the mark. */
9822                reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9823                MLX5_ASSERT(reg > 0);
9824                if (reg == REG_C_0) {
9825                        struct mlx5_priv *priv = dev->data->dev_private;
9826                        uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9827                        uint32_t shl_c0 = rte_bsf32(msk_c0);
9828
9829                        mask &= msk_c0;
9830                        mask <<= shl_c0;
9831                        value <<= shl_c0;
9832                }
9833                flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9834        }
9835}
9836
9837/**
9838 * Add META item to matcher
9839 *
9840 * @param[in] dev
9841 *   The devich to configure through.
9842 * @param[in, out] matcher
9843 *   Flow matcher.
9844 * @param[in, out] key
9845 *   Flow matcher value.
9846 * @param[in] attr
9847 *   Attributes of flow that includes this item.
9848 * @param[in] item
9849 *   Flow pattern to translate.
9850 */
9851static void
9852flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9853                            void *matcher, void *key,
9854                            const struct rte_flow_attr *attr,
9855                            const struct rte_flow_item *item)
9856{
9857        const struct rte_flow_item_meta *meta_m;
9858        const struct rte_flow_item_meta *meta_v;
9859
9860        meta_m = (const void *)item->mask;
9861        if (!meta_m)
9862                meta_m = &rte_flow_item_meta_mask;
9863        meta_v = (const void *)item->spec;
9864        if (meta_v) {
9865                int reg;
9866                uint32_t value = meta_v->data;
9867                uint32_t mask = meta_m->data;
9868
9869                reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9870                if (reg < 0)
9871                        return;
9872                MLX5_ASSERT(reg != REG_NON);
9873                if (reg == REG_C_0) {
9874                        struct mlx5_priv *priv = dev->data->dev_private;
9875                        uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9876                        uint32_t shl_c0 = rte_bsf32(msk_c0);
9877
9878                        mask &= msk_c0;
9879                        mask <<= shl_c0;
9880                        value <<= shl_c0;
9881                }
9882                flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9883        }
9884}
9885
9886/**
9887 * Add vport metadata Reg C0 item to matcher
9888 *
9889 * @param[in, out] matcher
9890 *   Flow matcher.
9891 * @param[in, out] key
9892 *   Flow matcher value.
9893 * @param[in] reg
9894 *   Flow pattern to translate.
9895 */
9896static void
9897flow_dv_translate_item_meta_vport(void *matcher, void *key,
9898                                  uint32_t value, uint32_t mask)
9899{
9900        flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9901}
9902
9903/**
9904 * Add tag item to matcher
9905 *
9906 * @param[in] dev
9907 *   The devich to configure through.
9908 * @param[in, out] matcher
9909 *   Flow matcher.
9910 * @param[in, out] key
9911 *   Flow matcher value.
9912 * @param[in] item
9913 *   Flow pattern to translate.
9914 */
9915static void
9916flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9917                                void *matcher, void *key,
9918                                const struct rte_flow_item *item)
9919{
9920        const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9921        const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9922        uint32_t mask, value;
9923
9924        MLX5_ASSERT(tag_v);
9925        value = tag_v->data;
9926        mask = tag_m ? tag_m->data : UINT32_MAX;
9927        if (tag_v->id == REG_C_0) {
9928                struct mlx5_priv *priv = dev->data->dev_private;
9929                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9930                uint32_t shl_c0 = rte_bsf32(msk_c0);
9931
9932                mask &= msk_c0;
9933                mask <<= shl_c0;
9934                value <<= shl_c0;
9935        }
9936        flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9937}
9938
9939/**
9940 * Add TAG item to matcher
9941 *
9942 * @param[in] dev
9943 *   The devich to configure through.
9944 * @param[in, out] matcher
9945 *   Flow matcher.
9946 * @param[in, out] key
9947 *   Flow matcher value.
9948 * @param[in] item
9949 *   Flow pattern to translate.
9950 */
9951static void
9952flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9953                           void *matcher, void *key,
9954                           const struct rte_flow_item *item)
9955{
9956        const struct rte_flow_item_tag *tag_v = item->spec;
9957        const struct rte_flow_item_tag *tag_m = item->mask;
9958        enum modify_reg reg;
9959
9960        MLX5_ASSERT(tag_v);
9961        tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9962        /* Get the metadata register index for the tag. */
9963        reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9964        MLX5_ASSERT(reg > 0);
9965        flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9966}
9967
9968/**
9969 * Add source vport match to the specified matcher.
9970 *
9971 * @param[in, out] matcher
9972 *   Flow matcher.
9973 * @param[in, out] key
9974 *   Flow matcher value.
9975 * @param[in] port
9976 *   Source vport value to match
9977 * @param[in] mask
9978 *   Mask
9979 */
9980static void
9981flow_dv_translate_item_source_vport(void *matcher, void *key,
9982                                    int16_t port, uint16_t mask)
9983{
9984        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9985        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9986
9987        MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9988        MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9989}
9990
9991/**
9992 * Translate port-id item to eswitch match on  port-id.
9993 *
9994 * @param[in] dev
9995 *   The devich to configure through.
9996 * @param[in, out] matcher
9997 *   Flow matcher.
9998 * @param[in, out] key
9999 *   Flow matcher value.
10000 * @param[in] item
10001 *   Flow pattern to translate.
10002 * @param[in]
10003 *   Flow attributes.
10004 *
10005 * @return
10006 *   0 on success, a negative errno value otherwise.
10007 */
10008static int
10009flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
10010                               void *key, const struct rte_flow_item *item,
10011                               const struct rte_flow_attr *attr)
10012{
10013        const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
10014        const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
10015        struct mlx5_priv *priv;
10016        uint16_t mask, id;
10017
10018        if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
10019                flow_dv_translate_item_source_vport(matcher, key,
10020                        mlx5_flow_get_esw_manager_vport_id(dev), 0xffff);
10021                return 0;
10022        }
10023        mask = pid_m ? pid_m->id : 0xffff;
10024        id = pid_v ? pid_v->id : dev->data->port_id;
10025        priv = mlx5_port_to_eswitch_info(id, item == NULL);
10026        if (!priv)
10027                return -rte_errno;
10028        /*
10029         * Translate to vport field or to metadata, depending on mode.
10030         * Kernel can use either misc.source_port or half of C0 metadata
10031         * register.
10032         */
10033        if (priv->vport_meta_mask) {
10034                /*
10035                 * Provide the hint for SW steering library
10036                 * to insert the flow into ingress domain and
10037                 * save the extra vport match.
10038                 */
10039                if (mask == 0xffff && priv->vport_id == 0xffff &&
10040                    priv->pf_bond < 0 && attr->transfer)
10041                        flow_dv_translate_item_source_vport
10042                                (matcher, key, priv->vport_id, mask);
10043                /*
10044                 * We should always set the vport metadata register,
10045                 * otherwise the SW steering library can drop
10046                 * the rule if wire vport metadata value is not zero,
10047                 * it depends on kernel configuration.
10048                 */
10049                flow_dv_translate_item_meta_vport(matcher, key,
10050                                                  priv->vport_meta_tag,
10051                                                  priv->vport_meta_mask);
10052        } else {
10053                flow_dv_translate_item_source_vport(matcher, key,
10054                                                    priv->vport_id, mask);
10055        }
10056        return 0;
10057}
10058
10059/**
10060 * Translate represented port item to eswitch match on port id.
10061 *
10062 * @param[in] dev
10063 *   The devich to configure through.
10064 * @param[in, out] matcher
10065 *   Flow matcher.
10066 * @param[in, out] key
10067 *   Flow matcher value.
10068 * @param[in] item
10069 *   Flow pattern to translate.
10070 * @param[in]
10071 *   Flow attributes.
10072 *
10073 * @return
10074 *   0 on success, a negative errno value otherwise.
10075 */
10076static int
10077flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,
10078                                        void *key,
10079                                        const struct rte_flow_item *item,
10080                                        const struct rte_flow_attr *attr)
10081{
10082        const struct rte_flow_item_ethdev *pid_m = item ? item->mask : NULL;
10083        const struct rte_flow_item_ethdev *pid_v = item ? item->spec : NULL;
10084        struct mlx5_priv *priv;
10085        uint16_t mask, id;
10086
10087        if (!pid_m && !pid_v)
10088                return 0;
10089        if (pid_v && pid_v->port_id == UINT16_MAX) {
10090                flow_dv_translate_item_source_vport(matcher, key,
10091                        mlx5_flow_get_esw_manager_vport_id(dev), UINT16_MAX);
10092                return 0;
10093        }
10094        mask = pid_m ? pid_m->port_id : UINT16_MAX;
10095        id = pid_v ? pid_v->port_id : dev->data->port_id;
10096        priv = mlx5_port_to_eswitch_info(id, item == NULL);
10097        if (!priv)
10098                return -rte_errno;
10099        /*
10100         * Translate to vport field or to metadata, depending on mode.
10101         * Kernel can use either misc.source_port or half of C0 metadata
10102         * register.
10103         */
10104        if (priv->vport_meta_mask) {
10105                /*
10106                 * Provide the hint for SW steering library
10107                 * to insert the flow into ingress domain and
10108                 * save the extra vport match.
10109                 */
10110                if (mask == UINT16_MAX && priv->vport_id == UINT16_MAX &&
10111                    priv->pf_bond < 0 && attr->transfer)
10112                        flow_dv_translate_item_source_vport
10113                                (matcher, key, priv->vport_id, mask);
10114                /*
10115                 * We should always set the vport metadata register,
10116                 * otherwise the SW steering library can drop
10117                 * the rule if wire vport metadata value is not zero,
10118                 * it depends on kernel configuration.
10119                 */
10120                flow_dv_translate_item_meta_vport(matcher, key,
10121                                                  priv->vport_meta_tag,
10122                                                  priv->vport_meta_mask);
10123        } else {
10124                flow_dv_translate_item_source_vport(matcher, key,
10125                                                    priv->vport_id, mask);
10126        }
10127        return 0;
10128}
10129
10130/**
10131 * Add ICMP6 item to matcher and to the value.
10132 *
10133 * @param[in, out] matcher
10134 *   Flow matcher.
10135 * @param[in, out] key
10136 *   Flow matcher value.
10137 * @param[in] item
10138 *   Flow pattern to translate.
10139 * @param[in] inner
10140 *   Item is inner pattern.
10141 */
10142static void
10143flow_dv_translate_item_icmp6(void *matcher, void *key,
10144                              const struct rte_flow_item *item,
10145                              int inner)
10146{
10147        const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
10148        const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
10149        void *headers_m;
10150        void *headers_v;
10151        void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10152                                     misc_parameters_3);
10153        void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10154        if (inner) {
10155                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10156                                         inner_headers);
10157                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10158        } else {
10159                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10160                                         outer_headers);
10161                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10162        }
10163        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
10164        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
10165        if (!icmp6_v)
10166                return;
10167        if (!icmp6_m)
10168                icmp6_m = &rte_flow_item_icmp6_mask;
10169        MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
10170        MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
10171                 icmp6_v->type & icmp6_m->type);
10172        MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
10173        MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
10174                 icmp6_v->code & icmp6_m->code);
10175}
10176
10177/**
10178 * Add ICMP item to matcher and to the value.
10179 *
10180 * @param[in, out] matcher
10181 *   Flow matcher.
10182 * @param[in, out] key
10183 *   Flow matcher value.
10184 * @param[in] item
10185 *   Flow pattern to translate.
10186 * @param[in] inner
10187 *   Item is inner pattern.
10188 */
10189static void
10190flow_dv_translate_item_icmp(void *matcher, void *key,
10191                            const struct rte_flow_item *item,
10192                            int inner)
10193{
10194        const struct rte_flow_item_icmp *icmp_m = item->mask;
10195        const struct rte_flow_item_icmp *icmp_v = item->spec;
10196        uint32_t icmp_header_data_m = 0;
10197        uint32_t icmp_header_data_v = 0;
10198        void *headers_m;
10199        void *headers_v;
10200        void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10201                                     misc_parameters_3);
10202        void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10203        if (inner) {
10204                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10205                                         inner_headers);
10206                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10207        } else {
10208                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10209                                         outer_headers);
10210                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10211        }
10212        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
10213        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
10214        if (!icmp_v)
10215                return;
10216        if (!icmp_m)
10217                icmp_m = &rte_flow_item_icmp_mask;
10218        MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
10219                 icmp_m->hdr.icmp_type);
10220        MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
10221                 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
10222        MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
10223                 icmp_m->hdr.icmp_code);
10224        MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
10225                 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
10226        icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
10227        icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
10228        if (icmp_header_data_m) {
10229                icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
10230                icmp_header_data_v |=
10231                         rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
10232                MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
10233                         icmp_header_data_m);
10234                MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
10235                         icmp_header_data_v & icmp_header_data_m);
10236        }
10237}
10238
10239/**
10240 * Add GTP item to matcher and to the value.
10241 *
10242 * @param[in, out] matcher
10243 *   Flow matcher.
10244 * @param[in, out] key
10245 *   Flow matcher value.
10246 * @param[in] item
10247 *   Flow pattern to translate.
10248 * @param[in] inner
10249 *   Item is inner pattern.
10250 */
10251static void
10252flow_dv_translate_item_gtp(void *matcher, void *key,
10253                           const struct rte_flow_item *item, int inner)
10254{
10255        const struct rte_flow_item_gtp *gtp_m = item->mask;
10256        const struct rte_flow_item_gtp *gtp_v = item->spec;
10257        void *headers_m;
10258        void *headers_v;
10259        void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10260                                     misc_parameters_3);
10261        void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10262        uint16_t dport = RTE_GTPU_UDP_PORT;
10263
10264        if (inner) {
10265                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10266                                         inner_headers);
10267                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10268        } else {
10269                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10270                                         outer_headers);
10271                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10272        }
10273        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
10274                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
10275                MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
10276        }
10277        if (!gtp_v)
10278                return;
10279        if (!gtp_m)
10280                gtp_m = &rte_flow_item_gtp_mask;
10281        MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
10282                 gtp_m->v_pt_rsv_flags);
10283        MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
10284                 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
10285        MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
10286        MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
10287                 gtp_v->msg_type & gtp_m->msg_type);
10288        MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
10289                 rte_be_to_cpu_32(gtp_m->teid));
10290        MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
10291                 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
10292}
10293
10294/**
10295 * Add GTP PSC item to matcher.
10296 *
10297 * @param[in, out] matcher
10298 *   Flow matcher.
10299 * @param[in, out] key
10300 *   Flow matcher value.
10301 * @param[in] item
10302 *   Flow pattern to translate.
10303 */
10304static int
10305flow_dv_translate_item_gtp_psc(void *matcher, void *key,
10306                               const struct rte_flow_item *item)
10307{
10308        const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
10309        const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
10310        void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10311                        misc_parameters_3);
10312        void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10313        union {
10314                uint32_t w32;
10315                struct {
10316                        uint16_t seq_num;
10317                        uint8_t npdu_num;
10318                        uint8_t next_ext_header_type;
10319                };
10320        } dw_2;
10321        uint8_t gtp_flags;
10322
10323        /* Always set E-flag match on one, regardless of GTP item settings. */
10324        gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
10325        gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10326        MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
10327        gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
10328        gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10329        MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
10330        /*Set next extension header type. */
10331        dw_2.seq_num = 0;
10332        dw_2.npdu_num = 0;
10333        dw_2.next_ext_header_type = 0xff;
10334        MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
10335                 rte_cpu_to_be_32(dw_2.w32));
10336        dw_2.seq_num = 0;
10337        dw_2.npdu_num = 0;
10338        dw_2.next_ext_header_type = 0x85;
10339        MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
10340                 rte_cpu_to_be_32(dw_2.w32));
10341        if (gtp_psc_v) {
10342                union {
10343                        uint32_t w32;
10344                        struct {
10345                                uint8_t len;
10346                                uint8_t type_flags;
10347                                uint8_t qfi;
10348                                uint8_t reserved;
10349                        };
10350                } dw_0;
10351
10352                /*Set extension header PDU type and Qos. */
10353                if (!gtp_psc_m)
10354                        gtp_psc_m = &rte_flow_item_gtp_psc_mask;
10355                dw_0.w32 = 0;
10356                dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
10357                dw_0.qfi = gtp_psc_m->hdr.qfi;
10358                MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
10359                         rte_cpu_to_be_32(dw_0.w32));
10360                dw_0.w32 = 0;
10361                dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
10362                                                        gtp_psc_m->hdr.type);
10363                dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
10364                MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
10365                         rte_cpu_to_be_32(dw_0.w32));
10366        }
10367        return 0;
10368}
10369
10370/**
10371 * Add eCPRI item to matcher and to the value.
10372 *
10373 * @param[in] dev
10374 *   The devich to configure through.
10375 * @param[in, out] matcher
10376 *   Flow matcher.
10377 * @param[in, out] key
10378 *   Flow matcher value.
10379 * @param[in] item
10380 *   Flow pattern to translate.
10381 * @param[in] last_item
10382 *   Last item flags.
10383 */
10384static void
10385flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10386                             void *key, const struct rte_flow_item *item,
10387                             uint64_t last_item)
10388{
10389        struct mlx5_priv *priv = dev->data->dev_private;
10390        const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10391        const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10392        struct rte_ecpri_common_hdr common;
10393        void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10394                                     misc_parameters_4);
10395        void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10396        uint32_t *samples;
10397        void *dw_m;
10398        void *dw_v;
10399
10400        /*
10401         * In case of eCPRI over Ethernet, if EtherType is not specified,
10402         * match on eCPRI EtherType implicitly.
10403         */
10404        if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10405                void *hdrs_m, *hdrs_v, *l2m, *l2v;
10406
10407                hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10408                hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10409                l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10410                l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10411                if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10412                        *(uint16_t *)l2m = UINT16_MAX;
10413                        *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10414                }
10415        }
10416        if (!ecpri_v)
10417                return;
10418        if (!ecpri_m)
10419                ecpri_m = &rte_flow_item_ecpri_mask;
10420        /*
10421         * Maximal four DW samples are supported in a single matching now.
10422         * Two are used now for a eCPRI matching:
10423         * 1. Type: one byte, mask should be 0x00ff0000 in network order
10424         * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10425         *    if any.
10426         */
10427        if (!ecpri_m->hdr.common.u32)
10428                return;
10429        samples = priv->sh->ecpri_parser.ids;
10430        /* Need to take the whole DW as the mask to fill the entry. */
10431        dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10432                            prog_sample_field_value_0);
10433        dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10434                            prog_sample_field_value_0);
10435        /* Already big endian (network order) in the header. */
10436        *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10437        *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10438        /* Sample#0, used for matching type, offset 0. */
10439        MLX5_SET(fte_match_set_misc4, misc4_m,
10440                 prog_sample_field_id_0, samples[0]);
10441        /* It makes no sense to set the sample ID in the mask field. */
10442        MLX5_SET(fte_match_set_misc4, misc4_v,
10443                 prog_sample_field_id_0, samples[0]);
10444        /*
10445         * Checking if message body part needs to be matched.
10446         * Some wildcard rules only matching type field should be supported.
10447         */
10448        if (ecpri_m->hdr.dummy[0]) {
10449                common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10450                switch (common.type) {
10451                case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10452                case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10453                case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10454                        dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10455                                            prog_sample_field_value_1);
10456                        dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10457                                            prog_sample_field_value_1);
10458                        *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10459                        *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10460                                            ecpri_m->hdr.dummy[0];
10461                        /* Sample#1, to match message body, offset 4. */
10462                        MLX5_SET(fte_match_set_misc4, misc4_m,
10463                                 prog_sample_field_id_1, samples[1]);
10464                        MLX5_SET(fte_match_set_misc4, misc4_v,
10465                                 prog_sample_field_id_1, samples[1]);
10466                        break;
10467                default:
10468                        /* Others, do not match any sample ID. */
10469                        break;
10470                }
10471        }
10472}
10473
10474/*
10475 * Add connection tracking status item to matcher
10476 *
10477 * @param[in] dev
10478 *   The devich to configure through.
10479 * @param[in, out] matcher
10480 *   Flow matcher.
10481 * @param[in, out] key
10482 *   Flow matcher value.
10483 * @param[in] item
10484 *   Flow pattern to translate.
10485 */
10486static void
10487flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10488                              void *matcher, void *key,
10489                              const struct rte_flow_item *item)
10490{
10491        uint32_t reg_value = 0;
10492        int reg_id;
10493        /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10494        uint32_t reg_mask = 0;
10495        const struct rte_flow_item_conntrack *spec = item->spec;
10496        const struct rte_flow_item_conntrack *mask = item->mask;
10497        uint32_t flags;
10498        struct rte_flow_error error;
10499
10500        if (!mask)
10501                mask = &rte_flow_item_conntrack_mask;
10502        if (!spec || !mask->flags)
10503                return;
10504        flags = spec->flags & mask->flags;
10505        /* The conflict should be checked in the validation. */
10506        if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10507                reg_value |= MLX5_CT_SYNDROME_VALID;
10508        if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10509                reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10510        if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10511                reg_value |= MLX5_CT_SYNDROME_INVALID;
10512        if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10513                reg_value |= MLX5_CT_SYNDROME_TRAP;
10514        if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10515                reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10516        if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10517                           RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10518                           RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10519                reg_mask |= 0xc0;
10520        if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10521                reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10522        if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10523                reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10524        /* The REG_C_x value could be saved during startup. */
10525        reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10526        if (reg_id == REG_NON)
10527                return;
10528        flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10529                               reg_value, reg_mask);
10530}
10531
10532static void
10533flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10534                            const struct rte_flow_item *item,
10535                            struct mlx5_flow *dev_flow, bool is_inner)
10536{
10537        const struct rte_flow_item_flex *spec =
10538                (const struct rte_flow_item_flex *)item->spec;
10539        int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10540
10541        MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10542        if (index < 0)
10543                return;
10544        if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10545                /* Don't count both inner and outer flex items in one rule. */
10546                if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10547                        MLX5_ASSERT(false);
10548                dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
10549        }
10550        mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10551}
10552
10553static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10554
10555#define HEADER_IS_ZERO(match_criteria, headers)                              \
10556        !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10557                 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10558
10559/**
10560 * Calculate flow matcher enable bitmap.
10561 *
10562 * @param match_criteria
10563 *   Pointer to flow matcher criteria.
10564 *
10565 * @return
10566 *   Bitmap of enabled fields.
10567 */
10568static uint8_t
10569flow_dv_matcher_enable(uint32_t *match_criteria)
10570{
10571        uint8_t match_criteria_enable;
10572
10573        match_criteria_enable =
10574                (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10575                MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10576        match_criteria_enable |=
10577                (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10578                MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10579        match_criteria_enable |=
10580                (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10581                MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10582        match_criteria_enable |=
10583                (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10584                MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10585        match_criteria_enable |=
10586                (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10587                MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10588        match_criteria_enable |=
10589                (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10590                MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10591        match_criteria_enable |=
10592                (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10593                MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10594        return match_criteria_enable;
10595}
10596
10597static void
10598__flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10599{
10600        /*
10601         * Check flow matching criteria first, subtract misc5/4 length if flow
10602         * doesn't own misc5/4 parameters. In some old rdma-core releases,
10603         * misc5/4 are not supported, and matcher creation failure is expected
10604         * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10605         * misc5 is right after misc4.
10606         */
10607        if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10608                *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10609                        MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10610                if (!(match_criteria & (1 <<
10611                        MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10612                        *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10613                }
10614        }
10615}
10616
10617static struct mlx5_list_entry *
10618flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10619                         struct mlx5_list_entry *entry, void *cb_ctx)
10620{
10621        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10622        struct mlx5_flow_dv_matcher *ref = ctx->data;
10623        struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10624                                                            typeof(*tbl), tbl);
10625        struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10626                                                            sizeof(*resource),
10627                                                            0, SOCKET_ID_ANY);
10628
10629        if (!resource) {
10630                rte_flow_error_set(ctx->error, ENOMEM,
10631                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10632                                   "cannot create matcher");
10633                return NULL;
10634        }
10635        memcpy(resource, entry, sizeof(*resource));
10636        resource->tbl = &tbl->tbl;
10637        return &resource->entry;
10638}
10639
10640static void
10641flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10642                             struct mlx5_list_entry *entry)
10643{
10644        mlx5_free(entry);
10645}
10646
10647struct mlx5_list_entry *
10648flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10649{
10650        struct mlx5_dev_ctx_shared *sh = tool_ctx;
10651        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10652        struct rte_eth_dev *dev = ctx->dev;
10653        struct mlx5_flow_tbl_data_entry *tbl_data;
10654        struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10655        struct rte_flow_error *error = ctx->error;
10656        union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10657        struct mlx5_flow_tbl_resource *tbl;
10658        void *domain;
10659        uint32_t idx = 0;
10660        int ret;
10661
10662        tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10663        if (!tbl_data) {
10664                rte_flow_error_set(error, ENOMEM,
10665                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10666                                   NULL,
10667                                   "cannot allocate flow table data entry");
10668                return NULL;
10669        }
10670        tbl_data->idx = idx;
10671        tbl_data->tunnel = tt_prm->tunnel;
10672        tbl_data->group_id = tt_prm->group_id;
10673        tbl_data->external = !!tt_prm->external;
10674        tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10675        tbl_data->is_egress = !!key.is_egress;
10676        tbl_data->is_transfer = !!key.is_fdb;
10677        tbl_data->dummy = !!key.dummy;
10678        tbl_data->level = key.level;
10679        tbl_data->id = key.id;
10680        tbl = &tbl_data->tbl;
10681        if (key.dummy)
10682                return &tbl_data->entry;
10683        if (key.is_fdb)
10684                domain = sh->fdb_domain;
10685        else if (key.is_egress)
10686                domain = sh->tx_domain;
10687        else
10688                domain = sh->rx_domain;
10689        ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10690        if (ret) {
10691                rte_flow_error_set(error, ENOMEM,
10692                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10693                                   NULL, "cannot create flow table object");
10694                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10695                return NULL;
10696        }
10697        if (key.level != 0) {
10698                ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10699                                        (tbl->obj, &tbl_data->jump.action);
10700                if (ret) {
10701                        rte_flow_error_set(error, ENOMEM,
10702                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10703                                           NULL,
10704                                           "cannot create flow jump action");
10705                        mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10706                        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10707                        return NULL;
10708                }
10709        }
10710        MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10711              key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10712              key.level, key.id);
10713        tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10714                                              flow_dv_matcher_create_cb,
10715                                              flow_dv_matcher_match_cb,
10716                                              flow_dv_matcher_remove_cb,
10717                                              flow_dv_matcher_clone_cb,
10718                                              flow_dv_matcher_clone_free_cb);
10719        if (!tbl_data->matchers) {
10720                rte_flow_error_set(error, ENOMEM,
10721                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10722                                   NULL,
10723                                   "cannot create tbl matcher list");
10724                mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10725                mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10726                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10727                return NULL;
10728        }
10729        return &tbl_data->entry;
10730}
10731
10732int
10733flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10734                     void *cb_ctx)
10735{
10736        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10737        struct mlx5_flow_tbl_data_entry *tbl_data =
10738                container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10739        union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10740
10741        return tbl_data->level != key.level ||
10742               tbl_data->id != key.id ||
10743               tbl_data->dummy != key.dummy ||
10744               tbl_data->is_transfer != !!key.is_fdb ||
10745               tbl_data->is_egress != !!key.is_egress;
10746}
10747
10748struct mlx5_list_entry *
10749flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10750                      void *cb_ctx)
10751{
10752        struct mlx5_dev_ctx_shared *sh = tool_ctx;
10753        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10754        struct mlx5_flow_tbl_data_entry *tbl_data;
10755        struct rte_flow_error *error = ctx->error;
10756        uint32_t idx = 0;
10757
10758        tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10759        if (!tbl_data) {
10760                rte_flow_error_set(error, ENOMEM,
10761                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10762                                   NULL,
10763                                   "cannot allocate flow table data entry");
10764                return NULL;
10765        }
10766        memcpy(tbl_data, oentry, sizeof(*tbl_data));
10767        tbl_data->idx = idx;
10768        return &tbl_data->entry;
10769}
10770
10771void
10772flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10773{
10774        struct mlx5_dev_ctx_shared *sh = tool_ctx;
10775        struct mlx5_flow_tbl_data_entry *tbl_data =
10776                    container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10777
10778        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10779}
10780
10781/**
10782 * Get a flow table.
10783 *
10784 * @param[in, out] dev
10785 *   Pointer to rte_eth_dev structure.
10786 * @param[in] table_level
10787 *   Table level to use.
10788 * @param[in] egress
10789 *   Direction of the table.
10790 * @param[in] transfer
10791 *   E-Switch or NIC flow.
10792 * @param[in] dummy
10793 *   Dummy entry for dv API.
10794 * @param[in] table_id
10795 *   Table id to use.
10796 * @param[out] error
10797 *   pointer to error structure.
10798 *
10799 * @return
10800 *   Returns tables resource based on the index, NULL in case of failed.
10801 */
10802struct mlx5_flow_tbl_resource *
10803flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10804                         uint32_t table_level, uint8_t egress,
10805                         uint8_t transfer,
10806                         bool external,
10807                         const struct mlx5_flow_tunnel *tunnel,
10808                         uint32_t group_id, uint8_t dummy,
10809                         uint32_t table_id,
10810                         struct rte_flow_error *error)
10811{
10812        struct mlx5_priv *priv = dev->data->dev_private;
10813        union mlx5_flow_tbl_key table_key = {
10814                {
10815                        .level = table_level,
10816                        .id = table_id,
10817                        .reserved = 0,
10818                        .dummy = !!dummy,
10819                        .is_fdb = !!transfer,
10820                        .is_egress = !!egress,
10821                }
10822        };
10823        struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10824                .tunnel = tunnel,
10825                .group_id = group_id,
10826                .external = external,
10827        };
10828        struct mlx5_flow_cb_ctx ctx = {
10829                .dev = dev,
10830                .error = error,
10831                .data = &table_key.v64,
10832                .data2 = &tt_prm,
10833        };
10834        struct mlx5_list_entry *entry;
10835        struct mlx5_flow_tbl_data_entry *tbl_data;
10836
10837        entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10838        if (!entry) {
10839                rte_flow_error_set(error, ENOMEM,
10840                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10841                                   "cannot get table");
10842                return NULL;
10843        }
10844        DRV_LOG(DEBUG, "table_level %u table_id %u "
10845                "tunnel %u group %u registered.",
10846                table_level, table_id,
10847                tunnel ? tunnel->tunnel_id : 0, group_id);
10848        tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10849        return &tbl_data->tbl;
10850}
10851
10852void
10853flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10854{
10855        struct mlx5_dev_ctx_shared *sh = tool_ctx;
10856        struct mlx5_flow_tbl_data_entry *tbl_data =
10857                    container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10858
10859        MLX5_ASSERT(entry && sh);
10860        if (tbl_data->jump.action)
10861                mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10862        if (tbl_data->tbl.obj)
10863                mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10864        if (tbl_data->tunnel_offload && tbl_data->external) {
10865                struct mlx5_list_entry *he;
10866                struct mlx5_hlist *tunnel_grp_hash;
10867                struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10868                union tunnel_tbl_key tunnel_key = {
10869                        .tunnel_id = tbl_data->tunnel ?
10870                                        tbl_data->tunnel->tunnel_id : 0,
10871                        .group = tbl_data->group_id
10872                };
10873                uint32_t table_level = tbl_data->level;
10874                struct mlx5_flow_cb_ctx ctx = {
10875                        .data = (void *)&tunnel_key.val,
10876                };
10877
10878                tunnel_grp_hash = tbl_data->tunnel ?
10879                                        tbl_data->tunnel->groups :
10880                                        thub->groups;
10881                he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10882                if (he)
10883                        mlx5_hlist_unregister(tunnel_grp_hash, he);
10884                DRV_LOG(DEBUG,
10885                        "table_level %u id %u tunnel %u group %u released.",
10886                        table_level,
10887                        tbl_data->id,
10888                        tbl_data->tunnel ?
10889                        tbl_data->tunnel->tunnel_id : 0,
10890                        tbl_data->group_id);
10891        }
10892        if (tbl_data->matchers)
10893                mlx5_list_destroy(tbl_data->matchers);
10894        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10895}
10896
10897/**
10898 * Release a flow table.
10899 *
10900 * @param[in] sh
10901 *   Pointer to device shared structure.
10902 * @param[in] tbl
10903 *   Table resource to be released.
10904 *
10905 * @return
10906 *   Returns 0 if table was released, else return 1;
10907 */
10908static int
10909flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10910                             struct mlx5_flow_tbl_resource *tbl)
10911{
10912        struct mlx5_flow_tbl_data_entry *tbl_data =
10913                container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10914
10915        if (!tbl)
10916                return 0;
10917        return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10918}
10919
10920int
10921flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10922                         struct mlx5_list_entry *entry, void *cb_ctx)
10923{
10924        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10925        struct mlx5_flow_dv_matcher *ref = ctx->data;
10926        struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10927                                                        entry);
10928
10929        return cur->crc != ref->crc ||
10930               cur->priority != ref->priority ||
10931               memcmp((const void *)cur->mask.buf,
10932                      (const void *)ref->mask.buf, ref->mask.size);
10933}
10934
10935struct mlx5_list_entry *
10936flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10937{
10938        struct mlx5_dev_ctx_shared *sh = tool_ctx;
10939        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10940        struct mlx5_flow_dv_matcher *ref = ctx->data;
10941        struct mlx5_flow_dv_matcher *resource;
10942        struct mlx5dv_flow_matcher_attr dv_attr = {
10943                .type = IBV_FLOW_ATTR_NORMAL,
10944                .match_mask = (void *)&ref->mask,
10945        };
10946        struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10947                                                            typeof(*tbl), tbl);
10948        int ret;
10949
10950        resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10951                               SOCKET_ID_ANY);
10952        if (!resource) {
10953                rte_flow_error_set(ctx->error, ENOMEM,
10954                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10955                                   "cannot create matcher");
10956                return NULL;
10957        }
10958        *resource = *ref;
10959        dv_attr.match_criteria_enable =
10960                flow_dv_matcher_enable(resource->mask.buf);
10961        __flow_dv_adjust_buf_size(&ref->mask.size,
10962                                  dv_attr.match_criteria_enable);
10963        dv_attr.priority = ref->priority;
10964        if (tbl->is_egress)
10965                dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10966        ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10967                                               tbl->tbl.obj,
10968                                               &resource->matcher_object);
10969        if (ret) {
10970                mlx5_free(resource);
10971                rte_flow_error_set(ctx->error, ENOMEM,
10972                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10973                                   "cannot create matcher");
10974                return NULL;
10975        }
10976        return &resource->entry;
10977}
10978
10979/**
10980 * Register the flow matcher.
10981 *
10982 * @param[in, out] dev
10983 *   Pointer to rte_eth_dev structure.
10984 * @param[in, out] matcher
10985 *   Pointer to flow matcher.
10986 * @param[in, out] key
10987 *   Pointer to flow table key.
10988 * @parm[in, out] dev_flow
10989 *   Pointer to the dev_flow.
10990 * @param[out] error
10991 *   pointer to error structure.
10992 *
10993 * @return
10994 *   0 on success otherwise -errno and errno is set.
10995 */
10996static int
10997flow_dv_matcher_register(struct rte_eth_dev *dev,
10998                         struct mlx5_flow_dv_matcher *ref,
10999                         union mlx5_flow_tbl_key *key,
11000                         struct mlx5_flow *dev_flow,
11001                         const struct mlx5_flow_tunnel *tunnel,
11002                         uint32_t group_id,
11003                         struct rte_flow_error *error)
11004{
11005        struct mlx5_list_entry *entry;
11006        struct mlx5_flow_dv_matcher *resource;
11007        struct mlx5_flow_tbl_resource *tbl;
11008        struct mlx5_flow_tbl_data_entry *tbl_data;
11009        struct mlx5_flow_cb_ctx ctx = {
11010                .error = error,
11011                .data = ref,
11012        };
11013        /**
11014         * tunnel offload API requires this registration for cases when
11015         * tunnel match rule was inserted before tunnel set rule.
11016         */
11017        tbl = flow_dv_tbl_resource_get(dev, key->level,
11018                                       key->is_egress, key->is_fdb,
11019                                       dev_flow->external, tunnel,
11020                                       group_id, 0, key->id, error);
11021        if (!tbl)
11022                return -rte_errno;      /* No need to refill the error info */
11023        tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
11024        ref->tbl = tbl;
11025        entry = mlx5_list_register(tbl_data->matchers, &ctx);
11026        if (!entry) {
11027                flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11028                return rte_flow_error_set(error, ENOMEM,
11029                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11030                                          "cannot allocate ref memory");
11031        }
11032        resource = container_of(entry, typeof(*resource), entry);
11033        dev_flow->handle->dvh.matcher = resource;
11034        return 0;
11035}
11036
11037struct mlx5_list_entry *
11038flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
11039{
11040        struct mlx5_dev_ctx_shared *sh = tool_ctx;
11041        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11042        struct mlx5_flow_dv_tag_resource *entry;
11043        uint32_t idx = 0;
11044        int ret;
11045
11046        entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
11047        if (!entry) {
11048                rte_flow_error_set(ctx->error, ENOMEM,
11049                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11050                                   "cannot allocate resource memory");
11051                return NULL;
11052        }
11053        entry->idx = idx;
11054        entry->tag_id = *(uint32_t *)(ctx->data);
11055        ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
11056                                                  &entry->action);
11057        if (ret) {
11058                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
11059                rte_flow_error_set(ctx->error, ENOMEM,
11060                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11061                                   NULL, "cannot create action");
11062                return NULL;
11063        }
11064        return &entry->entry;
11065}
11066
11067int
11068flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
11069                     void *cb_ctx)
11070{
11071        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11072        struct mlx5_flow_dv_tag_resource *tag =
11073                   container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11074
11075        return *(uint32_t *)(ctx->data) != tag->tag_id;
11076}
11077
11078struct mlx5_list_entry *
11079flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
11080                     void *cb_ctx)
11081{
11082        struct mlx5_dev_ctx_shared *sh = tool_ctx;
11083        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11084        struct mlx5_flow_dv_tag_resource *entry;
11085        uint32_t idx = 0;
11086
11087        entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
11088        if (!entry) {
11089                rte_flow_error_set(ctx->error, ENOMEM,
11090                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11091                                   "cannot allocate tag resource memory");
11092                return NULL;
11093        }
11094        memcpy(entry, oentry, sizeof(*entry));
11095        entry->idx = idx;
11096        return &entry->entry;
11097}
11098
11099void
11100flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
11101{
11102        struct mlx5_dev_ctx_shared *sh = tool_ctx;
11103        struct mlx5_flow_dv_tag_resource *tag =
11104                   container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11105
11106        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
11107}
11108
11109/**
11110 * Find existing tag resource or create and register a new one.
11111 *
11112 * @param dev[in, out]
11113 *   Pointer to rte_eth_dev structure.
11114 * @param[in, out] tag_be24
11115 *   Tag value in big endian then R-shift 8.
11116 * @parm[in, out] dev_flow
11117 *   Pointer to the dev_flow.
11118 * @param[out] error
11119 *   pointer to error structure.
11120 *
11121 * @return
11122 *   0 on success otherwise -errno and errno is set.
11123 */
11124static int
11125flow_dv_tag_resource_register
11126                        (struct rte_eth_dev *dev,
11127                         uint32_t tag_be24,
11128                         struct mlx5_flow *dev_flow,
11129                         struct rte_flow_error *error)
11130{
11131        struct mlx5_priv *priv = dev->data->dev_private;
11132        struct mlx5_flow_dv_tag_resource *resource;
11133        struct mlx5_list_entry *entry;
11134        struct mlx5_flow_cb_ctx ctx = {
11135                                        .error = error,
11136                                        .data = &tag_be24,
11137                                        };
11138        struct mlx5_hlist *tag_table;
11139
11140        tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
11141                                      "tags",
11142                                      MLX5_TAGS_HLIST_ARRAY_SIZE,
11143                                      false, false, priv->sh,
11144                                      flow_dv_tag_create_cb,
11145                                      flow_dv_tag_match_cb,
11146                                      flow_dv_tag_remove_cb,
11147                                      flow_dv_tag_clone_cb,
11148                                      flow_dv_tag_clone_free_cb,
11149                                      error);
11150        if (unlikely(!tag_table))
11151                return -rte_errno;
11152        entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
11153        if (entry) {
11154                resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
11155                                        entry);
11156                dev_flow->handle->dvh.rix_tag = resource->idx;
11157                dev_flow->dv.tag_resource = resource;
11158                return 0;
11159        }
11160        return -rte_errno;
11161}
11162
11163void
11164flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
11165{
11166        struct mlx5_dev_ctx_shared *sh = tool_ctx;
11167        struct mlx5_flow_dv_tag_resource *tag =
11168                   container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11169
11170        MLX5_ASSERT(tag && sh && tag->action);
11171        claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
11172        DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
11173        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
11174}
11175
11176/**
11177 * Release the tag.
11178 *
11179 * @param dev
11180 *   Pointer to Ethernet device.
11181 * @param tag_idx
11182 *   Tag index.
11183 *
11184 * @return
11185 *   1 while a reference on it exists, 0 when freed.
11186 */
11187static int
11188flow_dv_tag_release(struct rte_eth_dev *dev,
11189                    uint32_t tag_idx)
11190{
11191        struct mlx5_priv *priv = dev->data->dev_private;
11192        struct mlx5_flow_dv_tag_resource *tag;
11193
11194        tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
11195        if (!tag)
11196                return 0;
11197        DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
11198                dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
11199        return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
11200}
11201
11202/**
11203 * Translate action PORT_ID / REPRESENTED_PORT to vport.
11204 *
11205 * @param[in] dev
11206 *   Pointer to rte_eth_dev structure.
11207 * @param[in] action
11208 *   Pointer to action PORT_ID / REPRESENTED_PORT.
11209 * @param[out] dst_port_id
11210 *   The target port ID.
11211 * @param[out] error
11212 *   Pointer to the error structure.
11213 *
11214 * @return
11215 *   0 on success, a negative errno value otherwise and rte_errno is set.
11216 */
11217static int
11218flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
11219                                 const struct rte_flow_action *action,
11220                                 uint32_t *dst_port_id,
11221                                 struct rte_flow_error *error)
11222{
11223        uint32_t port;
11224        struct mlx5_priv *priv;
11225
11226        switch (action->type) {
11227        case RTE_FLOW_ACTION_TYPE_PORT_ID: {
11228                const struct rte_flow_action_port_id *conf;
11229
11230                conf = (const struct rte_flow_action_port_id *)action->conf;
11231                port = conf->original ? dev->data->port_id : conf->id;
11232                break;
11233        }
11234        case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
11235                const struct rte_flow_action_ethdev *ethdev;
11236
11237                ethdev = (const struct rte_flow_action_ethdev *)action->conf;
11238                port = ethdev->port_id;
11239                break;
11240        }
11241        default:
11242                MLX5_ASSERT(false);
11243                return rte_flow_error_set(error, EINVAL,
11244                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
11245                                          "unknown E-Switch action");
11246        }
11247
11248        priv = mlx5_port_to_eswitch_info(port, false);
11249        if (!priv)
11250                return rte_flow_error_set(error, -rte_errno,
11251                                          RTE_FLOW_ERROR_TYPE_ACTION,
11252                                          NULL,
11253                                          "No eswitch info was found for port");
11254#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
11255        /*
11256         * This parameter is transferred to
11257         * mlx5dv_dr_action_create_dest_ib_port().
11258         */
11259        *dst_port_id = priv->dev_port;
11260#else
11261        /*
11262         * Legacy mode, no LAG configurations is supported.
11263         * This parameter is transferred to
11264         * mlx5dv_dr_action_create_dest_vport().
11265         */
11266        *dst_port_id = priv->vport_id;
11267#endif
11268        return 0;
11269}
11270
11271/**
11272 * Create a counter with aging configuration.
11273 *
11274 * @param[in] dev
11275 *   Pointer to rte_eth_dev structure.
11276 * @param[in] dev_flow
11277 *   Pointer to the mlx5_flow.
11278 * @param[out] count
11279 *   Pointer to the counter action configuration.
11280 * @param[in] age
11281 *   Pointer to the aging action configuration.
11282 *
11283 * @return
11284 *   Index to flow counter on success, 0 otherwise.
11285 */
11286static uint32_t
11287flow_dv_translate_create_counter(struct rte_eth_dev *dev,
11288                                struct mlx5_flow *dev_flow,
11289                                const struct rte_flow_action_count *count
11290                                        __rte_unused,
11291                                const struct rte_flow_action_age *age)
11292{
11293        uint32_t counter;
11294        struct mlx5_age_param *age_param;
11295
11296        counter = flow_dv_counter_alloc(dev, !!age);
11297        if (!counter || age == NULL)
11298                return counter;
11299        age_param = flow_dv_counter_idx_get_age(dev, counter);
11300        age_param->context = age->context ? age->context :
11301                (void *)(uintptr_t)(dev_flow->flow_idx);
11302        age_param->timeout = age->timeout;
11303        age_param->port_id = dev->data->port_id;
11304        __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
11305        __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
11306        return counter;
11307}
11308
11309/**
11310 * Add Tx queue matcher
11311 *
11312 * @param[in] dev
11313 *   Pointer to the dev struct.
11314 * @param[in, out] matcher
11315 *   Flow matcher.
11316 * @param[in, out] key
11317 *   Flow matcher value.
11318 * @param[in] item
11319 *   Flow pattern to translate.
11320 * @param[in] inner
11321 *   Item is inner pattern.
11322 */
11323static void
11324flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
11325                                void *matcher, void *key,
11326                                const struct rte_flow_item *item)
11327{
11328        const struct mlx5_rte_flow_item_tx_queue *queue_m;
11329        const struct mlx5_rte_flow_item_tx_queue *queue_v;
11330        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
11331        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
11332        struct mlx5_txq_ctrl *txq;
11333        uint32_t queue, mask;
11334
11335        queue_m = (const void *)item->mask;
11336        queue_v = (const void *)item->spec;
11337        if (!queue_v)
11338                return;
11339        txq = mlx5_txq_get(dev, queue_v->queue);
11340        if (!txq)
11341                return;
11342        if (txq->is_hairpin)
11343                queue = txq->obj->sq->id;
11344        else
11345                queue = txq->obj->sq_obj.sq->id;
11346        mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
11347        MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
11348        MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
11349        mlx5_txq_release(dev, queue_v->queue);
11350}
11351
11352/**
11353 * Set the hash fields according to the @p flow information.
11354 *
11355 * @param[in] item_flags
11356 *   The match pattern item flags.
11357 * @param[in] rss_desc
11358 *   Pointer to the mlx5_flow_rss_desc.
11359 * @param[out] hash_fields
11360 *   Pointer to the RSS hash fields.
11361 */
11362void
11363flow_dv_hashfields_set(uint64_t item_flags,
11364                       struct mlx5_flow_rss_desc *rss_desc,
11365                       uint64_t *hash_fields)
11366{
11367        uint64_t items = item_flags;
11368        uint64_t fields = 0;
11369        int rss_inner = 0;
11370        uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
11371
11372        *hash_fields = 0;
11373#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
11374        if (rss_desc->level >= 2)
11375                rss_inner = 1;
11376#endif
11377        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
11378            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
11379             !items) {
11380                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
11381                        if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11382                                fields |= IBV_RX_HASH_SRC_IPV4;
11383                        else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11384                                fields |= IBV_RX_HASH_DST_IPV4;
11385                        else
11386                                fields |= MLX5_IPV4_IBV_RX_HASH;
11387                }
11388        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
11389                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
11390                   !items) {
11391                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11392                        if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11393                                fields |= IBV_RX_HASH_SRC_IPV6;
11394                        else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11395                                fields |= IBV_RX_HASH_DST_IPV6;
11396                        else
11397                                fields |= MLX5_IPV6_IBV_RX_HASH;
11398                }
11399        }
11400        if (items & MLX5_FLOW_ITEM_ESP) {
11401                if (rss_types & RTE_ETH_RSS_ESP)
11402                        fields |= IBV_RX_HASH_IPSEC_SPI;
11403        }
11404        if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
11405                *hash_fields = fields;
11406                /*
11407                 * There is no match between the RSS types and the
11408                 * L3 protocol (IPv4/IPv6) defined in the flow rule.
11409                 */
11410                return;
11411        }
11412        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11413            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
11414            !items) {
11415                if (rss_types & RTE_ETH_RSS_UDP) {
11416                        if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11417                                fields |= IBV_RX_HASH_SRC_PORT_UDP;
11418                        else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11419                                fields |= IBV_RX_HASH_DST_PORT_UDP;
11420                        else
11421                                fields |= MLX5_UDP_IBV_RX_HASH;
11422                }
11423        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11424                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
11425                   !items) {
11426                if (rss_types & RTE_ETH_RSS_TCP) {
11427                        if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11428                                fields |= IBV_RX_HASH_SRC_PORT_TCP;
11429                        else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11430                                fields |= IBV_RX_HASH_DST_PORT_TCP;
11431                        else
11432                                fields |= MLX5_TCP_IBV_RX_HASH;
11433                }
11434        }
11435        if (rss_inner)
11436                fields |= IBV_RX_HASH_INNER;
11437        *hash_fields = fields;
11438}
11439
11440/**
11441 * Prepare an Rx Hash queue.
11442 *
11443 * @param dev
11444 *   Pointer to Ethernet device.
11445 * @param[in] dev_flow
11446 *   Pointer to the mlx5_flow.
11447 * @param[in] rss_desc
11448 *   Pointer to the mlx5_flow_rss_desc.
11449 * @param[out] hrxq_idx
11450 *   Hash Rx queue index.
11451 *
11452 * @return
11453 *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11454 */
11455static struct mlx5_hrxq *
11456flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11457                     struct mlx5_flow *dev_flow,
11458                     struct mlx5_flow_rss_desc *rss_desc,
11459                     uint32_t *hrxq_idx)
11460{
11461        struct mlx5_flow_handle *dh = dev_flow->handle;
11462        uint32_t shared_rss = rss_desc->shared_rss;
11463        struct mlx5_hrxq *hrxq;
11464
11465        MLX5_ASSERT(rss_desc->queue_num);
11466        rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11467        rss_desc->hash_fields = dev_flow->hash_fields;
11468        rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11469        rss_desc->shared_rss = 0;
11470        if (rss_desc->hash_fields == 0)
11471                rss_desc->queue_num = 1;
11472        hrxq = mlx5_hrxq_get(dev, rss_desc);
11473        *hrxq_idx = hrxq ? hrxq->idx : 0;
11474        rss_desc->shared_rss = shared_rss;
11475        return hrxq;
11476}
11477
11478/**
11479 * Release sample sub action resource.
11480 *
11481 * @param[in, out] dev
11482 *   Pointer to rte_eth_dev structure.
11483 * @param[in] act_res
11484 *   Pointer to sample sub action resource.
11485 */
11486static void
11487flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11488                                   struct mlx5_flow_sub_actions_idx *act_res)
11489{
11490        if (act_res->rix_hrxq) {
11491                mlx5_hrxq_release(dev, act_res->rix_hrxq);
11492                act_res->rix_hrxq = 0;
11493        }
11494        if (act_res->rix_encap_decap) {
11495                flow_dv_encap_decap_resource_release(dev,
11496                                                     act_res->rix_encap_decap);
11497                act_res->rix_encap_decap = 0;
11498        }
11499        if (act_res->rix_port_id_action) {
11500                flow_dv_port_id_action_resource_release(dev,
11501                                                act_res->rix_port_id_action);
11502                act_res->rix_port_id_action = 0;
11503        }
11504        if (act_res->rix_tag) {
11505                flow_dv_tag_release(dev, act_res->rix_tag);
11506                act_res->rix_tag = 0;
11507        }
11508        if (act_res->rix_jump) {
11509                flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11510                act_res->rix_jump = 0;
11511        }
11512}
11513
11514int
11515flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11516                        struct mlx5_list_entry *entry, void *cb_ctx)
11517{
11518        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11519        struct rte_eth_dev *dev = ctx->dev;
11520        struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11521        struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11522                                                              typeof(*resource),
11523                                                              entry);
11524
11525        if (ctx_resource->ratio == resource->ratio &&
11526            ctx_resource->ft_type == resource->ft_type &&
11527            ctx_resource->ft_id == resource->ft_id &&
11528            ctx_resource->set_action == resource->set_action &&
11529            !memcmp((void *)&ctx_resource->sample_act,
11530                    (void *)&resource->sample_act,
11531                    sizeof(struct mlx5_flow_sub_actions_list))) {
11532                /*
11533                 * Existing sample action should release the prepared
11534                 * sub-actions reference counter.
11535                 */
11536                flow_dv_sample_sub_actions_release(dev,
11537                                                   &ctx_resource->sample_idx);
11538                return 0;
11539        }
11540        return 1;
11541}
11542
11543struct mlx5_list_entry *
11544flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11545{
11546        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11547        struct rte_eth_dev *dev = ctx->dev;
11548        struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11549        void **sample_dv_actions = ctx_resource->sub_actions;
11550        struct mlx5_flow_dv_sample_resource *resource;
11551        struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11552        struct mlx5_priv *priv = dev->data->dev_private;
11553        struct mlx5_dev_ctx_shared *sh = priv->sh;
11554        struct mlx5_flow_tbl_resource *tbl;
11555        uint32_t idx = 0;
11556        const uint32_t next_ft_step = 1;
11557        uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11558        uint8_t is_egress = 0;
11559        uint8_t is_transfer = 0;
11560        struct rte_flow_error *error = ctx->error;
11561
11562        /* Register new sample resource. */
11563        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11564        if (!resource) {
11565                rte_flow_error_set(error, ENOMEM,
11566                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11567                                          NULL,
11568                                          "cannot allocate resource memory");
11569                return NULL;
11570        }
11571        *resource = *ctx_resource;
11572        /* Create normal path table level */
11573        if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11574                is_transfer = 1;
11575        else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11576                is_egress = 1;
11577        tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11578                                        is_egress, is_transfer,
11579                                        true, NULL, 0, 0, 0, error);
11580        if (!tbl) {
11581                rte_flow_error_set(error, ENOMEM,
11582                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11583                                          NULL,
11584                                          "fail to create normal path table "
11585                                          "for sample");
11586                goto error;
11587        }
11588        resource->normal_path_tbl = tbl;
11589        if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11590                if (!sh->default_miss_action) {
11591                        rte_flow_error_set(error, ENOMEM,
11592                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11593                                                NULL,
11594                                                "default miss action was not "
11595                                                "created");
11596                        goto error;
11597                }
11598                sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11599                                                sh->default_miss_action;
11600        }
11601        /* Create a DR sample action */
11602        sampler_attr.sample_ratio = resource->ratio;
11603        sampler_attr.default_next_table = tbl->obj;
11604        sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11605        sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11606                                                        &sample_dv_actions[0];
11607        sampler_attr.action = resource->set_action;
11608        if (mlx5_os_flow_dr_create_flow_action_sampler
11609                        (&sampler_attr, &resource->verbs_action)) {
11610                rte_flow_error_set(error, ENOMEM,
11611                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11612                                        NULL, "cannot create sample action");
11613                goto error;
11614        }
11615        resource->idx = idx;
11616        resource->dev = dev;
11617        return &resource->entry;
11618error:
11619        if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11620                flow_dv_sample_sub_actions_release(dev,
11621                                                   &resource->sample_idx);
11622        if (resource->normal_path_tbl)
11623                flow_dv_tbl_resource_release(MLX5_SH(dev),
11624                                resource->normal_path_tbl);
11625        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11626        return NULL;
11627
11628}
11629
11630struct mlx5_list_entry *
11631flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11632                         struct mlx5_list_entry *entry __rte_unused,
11633                         void *cb_ctx)
11634{
11635        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11636        struct rte_eth_dev *dev = ctx->dev;
11637        struct mlx5_flow_dv_sample_resource *resource;
11638        struct mlx5_priv *priv = dev->data->dev_private;
11639        struct mlx5_dev_ctx_shared *sh = priv->sh;
11640        uint32_t idx = 0;
11641
11642        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11643        if (!resource) {
11644                rte_flow_error_set(ctx->error, ENOMEM,
11645                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11646                                          NULL,
11647                                          "cannot allocate resource memory");
11648                return NULL;
11649        }
11650        memcpy(resource, entry, sizeof(*resource));
11651        resource->idx = idx;
11652        resource->dev = dev;
11653        return &resource->entry;
11654}
11655
11656void
11657flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11658                             struct mlx5_list_entry *entry)
11659{
11660        struct mlx5_flow_dv_sample_resource *resource =
11661                                  container_of(entry, typeof(*resource), entry);
11662        struct rte_eth_dev *dev = resource->dev;
11663        struct mlx5_priv *priv = dev->data->dev_private;
11664
11665        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11666}
11667
11668/**
11669 * Find existing sample resource or create and register a new one.
11670 *
11671 * @param[in, out] dev
11672 *   Pointer to rte_eth_dev structure.
11673 * @param[in] ref
11674 *   Pointer to sample resource reference.
11675 * @parm[in, out] dev_flow
11676 *   Pointer to the dev_flow.
11677 * @param[out] error
11678 *   pointer to error structure.
11679 *
11680 * @return
11681 *   0 on success otherwise -errno and errno is set.
11682 */
11683static int
11684flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11685                         struct mlx5_flow_dv_sample_resource *ref,
11686                         struct mlx5_flow *dev_flow,
11687                         struct rte_flow_error *error)
11688{
11689        struct mlx5_flow_dv_sample_resource *resource;
11690        struct mlx5_list_entry *entry;
11691        struct mlx5_priv *priv = dev->data->dev_private;
11692        struct mlx5_flow_cb_ctx ctx = {
11693                .dev = dev,
11694                .error = error,
11695                .data = ref,
11696        };
11697
11698        entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11699        if (!entry)
11700                return -rte_errno;
11701        resource = container_of(entry, typeof(*resource), entry);
11702        dev_flow->handle->dvh.rix_sample = resource->idx;
11703        dev_flow->dv.sample_res = resource;
11704        return 0;
11705}
11706
11707int
11708flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11709                            struct mlx5_list_entry *entry, void *cb_ctx)
11710{
11711        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11712        struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11713        struct rte_eth_dev *dev = ctx->dev;
11714        struct mlx5_flow_dv_dest_array_resource *resource =
11715                                  container_of(entry, typeof(*resource), entry);
11716        uint32_t idx = 0;
11717
11718        if (ctx_resource->num_of_dest == resource->num_of_dest &&
11719            ctx_resource->ft_type == resource->ft_type &&
11720            !memcmp((void *)resource->sample_act,
11721                    (void *)ctx_resource->sample_act,
11722                   (ctx_resource->num_of_dest *
11723                   sizeof(struct mlx5_flow_sub_actions_list)))) {
11724                /*
11725                 * Existing sample action should release the prepared
11726                 * sub-actions reference counter.
11727                 */
11728                for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11729                        flow_dv_sample_sub_actions_release(dev,
11730                                        &ctx_resource->sample_idx[idx]);
11731                return 0;
11732        }
11733        return 1;
11734}
11735
11736struct mlx5_list_entry *
11737flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11738{
11739        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11740        struct rte_eth_dev *dev = ctx->dev;
11741        struct mlx5_flow_dv_dest_array_resource *resource;
11742        struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11743        struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11744        struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11745        struct mlx5_priv *priv = dev->data->dev_private;
11746        struct mlx5_dev_ctx_shared *sh = priv->sh;
11747        struct mlx5_flow_sub_actions_list *sample_act;
11748        struct mlx5dv_dr_domain *domain;
11749        uint32_t idx = 0, res_idx = 0;
11750        struct rte_flow_error *error = ctx->error;
11751        uint64_t action_flags;
11752        int ret;
11753
11754        /* Register new destination array resource. */
11755        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11756                                            &res_idx);
11757        if (!resource) {
11758                rte_flow_error_set(error, ENOMEM,
11759                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11760                                          NULL,
11761                                          "cannot allocate resource memory");
11762                return NULL;
11763        }
11764        *resource = *ctx_resource;
11765        if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11766                domain = sh->fdb_domain;
11767        else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11768                domain = sh->rx_domain;
11769        else
11770                domain = sh->tx_domain;
11771        for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11772                dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11773                                 mlx5_malloc(MLX5_MEM_ZERO,
11774                                 sizeof(struct mlx5dv_dr_action_dest_attr),
11775                                 0, SOCKET_ID_ANY);
11776                if (!dest_attr[idx]) {
11777                        rte_flow_error_set(error, ENOMEM,
11778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11779                                           NULL,
11780                                           "cannot allocate resource memory");
11781                        goto error;
11782                }
11783                dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11784                sample_act = &ctx_resource->sample_act[idx];
11785                action_flags = sample_act->action_flags;
11786                switch (action_flags) {
11787                case MLX5_FLOW_ACTION_QUEUE:
11788                        dest_attr[idx]->dest = sample_act->dr_queue_action;
11789                        break;
11790                case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11791                        dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11792                        dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11793                        dest_attr[idx]->dest_reformat->reformat =
11794                                        sample_act->dr_encap_action;
11795                        dest_attr[idx]->dest_reformat->dest =
11796                                        sample_act->dr_port_id_action;
11797                        break;
11798                case MLX5_FLOW_ACTION_PORT_ID:
11799                        dest_attr[idx]->dest = sample_act->dr_port_id_action;
11800                        break;
11801                case MLX5_FLOW_ACTION_JUMP:
11802                        dest_attr[idx]->dest = sample_act->dr_jump_action;
11803                        break;
11804                default:
11805                        rte_flow_error_set(error, EINVAL,
11806                                           RTE_FLOW_ERROR_TYPE_ACTION,
11807                                           NULL,
11808                                           "unsupported actions type");
11809                        goto error;
11810                }
11811        }
11812        /* create a dest array action */
11813        ret = mlx5_os_flow_dr_create_flow_action_dest_array
11814                                                (domain,
11815                                                 resource->num_of_dest,
11816                                                 dest_attr,
11817                                                 &resource->action);
11818        if (ret) {
11819                rte_flow_error_set(error, ENOMEM,
11820                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11821                                   NULL,
11822                                   "cannot create destination array action");
11823                goto error;
11824        }
11825        resource->idx = res_idx;
11826        resource->dev = dev;
11827        for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11828                mlx5_free(dest_attr[idx]);
11829        return &resource->entry;
11830error:
11831        for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11832                flow_dv_sample_sub_actions_release(dev,
11833                                                   &resource->sample_idx[idx]);
11834                if (dest_attr[idx])
11835                        mlx5_free(dest_attr[idx]);
11836        }
11837        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11838        return NULL;
11839}
11840
11841struct mlx5_list_entry *
11842flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11843                            struct mlx5_list_entry *entry __rte_unused,
11844                            void *cb_ctx)
11845{
11846        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11847        struct rte_eth_dev *dev = ctx->dev;
11848        struct mlx5_flow_dv_dest_array_resource *resource;
11849        struct mlx5_priv *priv = dev->data->dev_private;
11850        struct mlx5_dev_ctx_shared *sh = priv->sh;
11851        uint32_t res_idx = 0;
11852        struct rte_flow_error *error = ctx->error;
11853
11854        resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11855                                      &res_idx);
11856        if (!resource) {
11857                rte_flow_error_set(error, ENOMEM,
11858                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11859                                          NULL,
11860                                          "cannot allocate dest-array memory");
11861                return NULL;
11862        }
11863        memcpy(resource, entry, sizeof(*resource));
11864        resource->idx = res_idx;
11865        resource->dev = dev;
11866        return &resource->entry;
11867}
11868
11869void
11870flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11871                                 struct mlx5_list_entry *entry)
11872{
11873        struct mlx5_flow_dv_dest_array_resource *resource =
11874                        container_of(entry, typeof(*resource), entry);
11875        struct rte_eth_dev *dev = resource->dev;
11876        struct mlx5_priv *priv = dev->data->dev_private;
11877
11878        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11879}
11880
11881/**
11882 * Find existing destination array resource or create and register a new one.
11883 *
11884 * @param[in, out] dev
11885 *   Pointer to rte_eth_dev structure.
11886 * @param[in] ref
11887 *   Pointer to destination array resource reference.
11888 * @parm[in, out] dev_flow
11889 *   Pointer to the dev_flow.
11890 * @param[out] error
11891 *   pointer to error structure.
11892 *
11893 * @return
11894 *   0 on success otherwise -errno and errno is set.
11895 */
11896static int
11897flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11898                         struct mlx5_flow_dv_dest_array_resource *ref,
11899                         struct mlx5_flow *dev_flow,
11900                         struct rte_flow_error *error)
11901{
11902        struct mlx5_flow_dv_dest_array_resource *resource;
11903        struct mlx5_priv *priv = dev->data->dev_private;
11904        struct mlx5_list_entry *entry;
11905        struct mlx5_flow_cb_ctx ctx = {
11906                .dev = dev,
11907                .error = error,
11908                .data = ref,
11909        };
11910
11911        entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11912        if (!entry)
11913                return -rte_errno;
11914        resource = container_of(entry, typeof(*resource), entry);
11915        dev_flow->handle->dvh.rix_dest_array = resource->idx;
11916        dev_flow->dv.dest_array_res = resource;
11917        return 0;
11918}
11919
11920/**
11921 * Convert Sample action to DV specification.
11922 *
11923 * @param[in] dev
11924 *   Pointer to rte_eth_dev structure.
11925 * @param[in] action
11926 *   Pointer to sample action structure.
11927 * @param[in, out] dev_flow
11928 *   Pointer to the mlx5_flow.
11929 * @param[in] attr
11930 *   Pointer to the flow attributes.
11931 * @param[in, out] num_of_dest
11932 *   Pointer to the num of destination.
11933 * @param[in, out] sample_actions
11934 *   Pointer to sample actions list.
11935 * @param[in, out] res
11936 *   Pointer to sample resource.
11937 * @param[out] error
11938 *   Pointer to the error structure.
11939 *
11940 * @return
11941 *   0 on success, a negative errno value otherwise and rte_errno is set.
11942 */
11943static int
11944flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11945                                const struct rte_flow_action_sample *action,
11946                                struct mlx5_flow *dev_flow,
11947                                const struct rte_flow_attr *attr,
11948                                uint32_t *num_of_dest,
11949                                void **sample_actions,
11950                                struct mlx5_flow_dv_sample_resource *res,
11951                                struct rte_flow_error *error)
11952{
11953        struct mlx5_priv *priv = dev->data->dev_private;
11954        const struct rte_flow_action *sub_actions;
11955        struct mlx5_flow_sub_actions_list *sample_act;
11956        struct mlx5_flow_sub_actions_idx *sample_idx;
11957        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11958        struct rte_flow *flow = dev_flow->flow;
11959        struct mlx5_flow_rss_desc *rss_desc;
11960        uint64_t action_flags = 0;
11961
11962        MLX5_ASSERT(wks);
11963        rss_desc = &wks->rss_desc;
11964        sample_act = &res->sample_act;
11965        sample_idx = &res->sample_idx;
11966        res->ratio = action->ratio;
11967        sub_actions = action->actions;
11968        for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11969                int type = sub_actions->type;
11970                uint32_t pre_rix = 0;
11971                void *pre_r;
11972                switch (type) {
11973                case RTE_FLOW_ACTION_TYPE_QUEUE:
11974                {
11975                        const struct rte_flow_action_queue *queue;
11976                        struct mlx5_hrxq *hrxq;
11977                        uint32_t hrxq_idx;
11978
11979                        queue = sub_actions->conf;
11980                        rss_desc->queue_num = 1;
11981                        rss_desc->queue[0] = queue->index;
11982                        hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11983                                                    rss_desc, &hrxq_idx);
11984                        if (!hrxq)
11985                                return rte_flow_error_set
11986                                        (error, rte_errno,
11987                                         RTE_FLOW_ERROR_TYPE_ACTION,
11988                                         NULL,
11989                                         "cannot create fate queue");
11990                        sample_act->dr_queue_action = hrxq->action;
11991                        sample_idx->rix_hrxq = hrxq_idx;
11992                        sample_actions[sample_act->actions_num++] =
11993                                                hrxq->action;
11994                        (*num_of_dest)++;
11995                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
11996                        if (action_flags & MLX5_FLOW_ACTION_MARK)
11997                                dev_flow->handle->rix_hrxq = hrxq_idx;
11998                        dev_flow->handle->fate_action =
11999                                        MLX5_FLOW_FATE_QUEUE;
12000                        break;
12001                }
12002                case RTE_FLOW_ACTION_TYPE_RSS:
12003                {
12004                        struct mlx5_hrxq *hrxq;
12005                        uint32_t hrxq_idx;
12006                        const struct rte_flow_action_rss *rss;
12007                        const uint8_t *rss_key;
12008
12009                        rss = sub_actions->conf;
12010                        memcpy(rss_desc->queue, rss->queue,
12011                               rss->queue_num * sizeof(uint16_t));
12012                        rss_desc->queue_num = rss->queue_num;
12013                        /* NULL RSS key indicates default RSS key. */
12014                        rss_key = !rss->key ? rss_hash_default_key : rss->key;
12015                        memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12016                        /*
12017                         * rss->level and rss.types should be set in advance
12018                         * when expanding items for RSS.
12019                         */
12020                        flow_dv_hashfields_set(dev_flow->handle->layers,
12021                                               rss_desc,
12022                                               &dev_flow->hash_fields);
12023                        hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
12024                                                    rss_desc, &hrxq_idx);
12025                        if (!hrxq)
12026                                return rte_flow_error_set
12027                                        (error, rte_errno,
12028                                         RTE_FLOW_ERROR_TYPE_ACTION,
12029                                         NULL,
12030                                         "cannot create fate queue");
12031                        sample_act->dr_queue_action = hrxq->action;
12032                        sample_idx->rix_hrxq = hrxq_idx;
12033                        sample_actions[sample_act->actions_num++] =
12034                                                hrxq->action;
12035                        (*num_of_dest)++;
12036                        action_flags |= MLX5_FLOW_ACTION_RSS;
12037                        if (action_flags & MLX5_FLOW_ACTION_MARK)
12038                                dev_flow->handle->rix_hrxq = hrxq_idx;
12039                        dev_flow->handle->fate_action =
12040                                        MLX5_FLOW_FATE_QUEUE;
12041                        break;
12042                }
12043                case RTE_FLOW_ACTION_TYPE_MARK:
12044                {
12045                        uint32_t tag_be = mlx5_flow_mark_set
12046                                (((const struct rte_flow_action_mark *)
12047                                (sub_actions->conf))->id);
12048
12049                        wks->mark = 1;
12050                        pre_rix = dev_flow->handle->dvh.rix_tag;
12051                        /* Save the mark resource before sample */
12052                        pre_r = dev_flow->dv.tag_resource;
12053                        if (flow_dv_tag_resource_register(dev, tag_be,
12054                                                  dev_flow, error))
12055                                return -rte_errno;
12056                        MLX5_ASSERT(dev_flow->dv.tag_resource);
12057                        sample_act->dr_tag_action =
12058                                dev_flow->dv.tag_resource->action;
12059                        sample_idx->rix_tag =
12060                                dev_flow->handle->dvh.rix_tag;
12061                        sample_actions[sample_act->actions_num++] =
12062                                                sample_act->dr_tag_action;
12063                        /* Recover the mark resource after sample */
12064                        dev_flow->dv.tag_resource = pre_r;
12065                        dev_flow->handle->dvh.rix_tag = pre_rix;
12066                        action_flags |= MLX5_FLOW_ACTION_MARK;
12067                        break;
12068                }
12069                case RTE_FLOW_ACTION_TYPE_COUNT:
12070                {
12071                        if (!flow->counter) {
12072                                flow->counter =
12073                                        flow_dv_translate_create_counter(dev,
12074                                                dev_flow, sub_actions->conf,
12075                                                0);
12076                                if (!flow->counter)
12077                                        return rte_flow_error_set
12078                                                (error, rte_errno,
12079                                                RTE_FLOW_ERROR_TYPE_ACTION,
12080                                                NULL,
12081                                                "cannot create counter"
12082                                                " object.");
12083                        }
12084                        sample_act->dr_cnt_action =
12085                                  (flow_dv_counter_get_by_idx(dev,
12086                                  flow->counter, NULL))->action;
12087                        sample_actions[sample_act->actions_num++] =
12088                                                sample_act->dr_cnt_action;
12089                        action_flags |= MLX5_FLOW_ACTION_COUNT;
12090                        break;
12091                }
12092                case RTE_FLOW_ACTION_TYPE_PORT_ID:
12093                case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12094                {
12095                        struct mlx5_flow_dv_port_id_action_resource
12096                                        port_id_resource;
12097                        uint32_t port_id = 0;
12098
12099                        memset(&port_id_resource, 0, sizeof(port_id_resource));
12100                        /* Save the port id resource before sample */
12101                        pre_rix = dev_flow->handle->rix_port_id_action;
12102                        pre_r = dev_flow->dv.port_id_action;
12103                        if (flow_dv_translate_action_port_id(dev, sub_actions,
12104                                                             &port_id, error))
12105                                return -rte_errno;
12106                        port_id_resource.port_id = port_id;
12107                        if (flow_dv_port_id_action_resource_register
12108                            (dev, &port_id_resource, dev_flow, error))
12109                                return -rte_errno;
12110                        sample_act->dr_port_id_action =
12111                                dev_flow->dv.port_id_action->action;
12112                        sample_idx->rix_port_id_action =
12113                                dev_flow->handle->rix_port_id_action;
12114                        sample_actions[sample_act->actions_num++] =
12115                                                sample_act->dr_port_id_action;
12116                        /* Recover the port id resource after sample */
12117                        dev_flow->dv.port_id_action = pre_r;
12118                        dev_flow->handle->rix_port_id_action = pre_rix;
12119                        (*num_of_dest)++;
12120                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12121                        break;
12122                }
12123                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12124                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12125                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12126                        /* Save the encap resource before sample */
12127                        pre_rix = dev_flow->handle->dvh.rix_encap_decap;
12128                        pre_r = dev_flow->dv.encap_decap;
12129                        if (flow_dv_create_action_l2_encap(dev, sub_actions,
12130                                                           dev_flow,
12131                                                           attr->transfer,
12132                                                           error))
12133                                return -rte_errno;
12134                        sample_act->dr_encap_action =
12135                                dev_flow->dv.encap_decap->action;
12136                        sample_idx->rix_encap_decap =
12137                                dev_flow->handle->dvh.rix_encap_decap;
12138                        sample_actions[sample_act->actions_num++] =
12139                                                sample_act->dr_encap_action;
12140                        /* Recover the encap resource after sample */
12141                        dev_flow->dv.encap_decap = pre_r;
12142                        dev_flow->handle->dvh.rix_encap_decap = pre_rix;
12143                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
12144                        break;
12145                default:
12146                        return rte_flow_error_set(error, EINVAL,
12147                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12148                                NULL,
12149                                "Not support for sampler action");
12150                }
12151        }
12152        sample_act->action_flags = action_flags;
12153        res->ft_id = dev_flow->dv.group;
12154        if (attr->transfer) {
12155                union {
12156                        uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
12157                        uint64_t set_action;
12158                } action_ctx = { .set_action = 0 };
12159
12160                res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12161                MLX5_SET(set_action_in, action_ctx.action_in, action_type,
12162                         MLX5_MODIFICATION_TYPE_SET);
12163                MLX5_SET(set_action_in, action_ctx.action_in, field,
12164                         MLX5_MODI_META_REG_C_0);
12165                MLX5_SET(set_action_in, action_ctx.action_in, data,
12166                         priv->vport_meta_tag);
12167                res->set_action = action_ctx.set_action;
12168        } else if (attr->ingress) {
12169                res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12170        } else {
12171                res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
12172        }
12173        return 0;
12174}
12175
12176/**
12177 * Convert Sample action to DV specification.
12178 *
12179 * @param[in] dev
12180 *   Pointer to rte_eth_dev structure.
12181 * @param[in, out] dev_flow
12182 *   Pointer to the mlx5_flow.
12183 * @param[in] num_of_dest
12184 *   The num of destination.
12185 * @param[in, out] res
12186 *   Pointer to sample resource.
12187 * @param[in, out] mdest_res
12188 *   Pointer to destination array resource.
12189 * @param[in] sample_actions
12190 *   Pointer to sample path actions list.
12191 * @param[in] action_flags
12192 *   Holds the actions detected until now.
12193 * @param[out] error
12194 *   Pointer to the error structure.
12195 *
12196 * @return
12197 *   0 on success, a negative errno value otherwise and rte_errno is set.
12198 */
12199static int
12200flow_dv_create_action_sample(struct rte_eth_dev *dev,
12201                             struct mlx5_flow *dev_flow,
12202                             uint32_t num_of_dest,
12203                             struct mlx5_flow_dv_sample_resource *res,
12204                             struct mlx5_flow_dv_dest_array_resource *mdest_res,
12205                             void **sample_actions,
12206                             uint64_t action_flags,
12207                             struct rte_flow_error *error)
12208{
12209        /* update normal path action resource into last index of array */
12210        uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
12211        struct mlx5_flow_sub_actions_list *sample_act =
12212                                        &mdest_res->sample_act[dest_index];
12213        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12214        struct mlx5_flow_rss_desc *rss_desc;
12215        uint32_t normal_idx = 0;
12216        struct mlx5_hrxq *hrxq;
12217        uint32_t hrxq_idx;
12218
12219        MLX5_ASSERT(wks);
12220        rss_desc = &wks->rss_desc;
12221        if (num_of_dest > 1) {
12222                if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
12223                        /* Handle QP action for mirroring */
12224                        hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
12225                                                    rss_desc, &hrxq_idx);
12226                        if (!hrxq)
12227                                return rte_flow_error_set
12228                                     (error, rte_errno,
12229                                      RTE_FLOW_ERROR_TYPE_ACTION,
12230                                      NULL,
12231                                      "cannot create rx queue");
12232                        normal_idx++;
12233                        mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
12234                        sample_act->dr_queue_action = hrxq->action;
12235                        if (action_flags & MLX5_FLOW_ACTION_MARK)
12236                                dev_flow->handle->rix_hrxq = hrxq_idx;
12237                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12238                }
12239                if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
12240                        normal_idx++;
12241                        mdest_res->sample_idx[dest_index].rix_encap_decap =
12242                                dev_flow->handle->dvh.rix_encap_decap;
12243                        sample_act->dr_encap_action =
12244                                dev_flow->dv.encap_decap->action;
12245                        dev_flow->handle->dvh.rix_encap_decap = 0;
12246                }
12247                if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
12248                        normal_idx++;
12249                        mdest_res->sample_idx[dest_index].rix_port_id_action =
12250                                dev_flow->handle->rix_port_id_action;
12251                        sample_act->dr_port_id_action =
12252                                dev_flow->dv.port_id_action->action;
12253                        dev_flow->handle->rix_port_id_action = 0;
12254                }
12255                if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
12256                        normal_idx++;
12257                        mdest_res->sample_idx[dest_index].rix_jump =
12258                                dev_flow->handle->rix_jump;
12259                        sample_act->dr_jump_action =
12260                                dev_flow->dv.jump->action;
12261                        dev_flow->handle->rix_jump = 0;
12262                }
12263                sample_act->actions_num = normal_idx;
12264                /* update sample action resource into first index of array */
12265                mdest_res->ft_type = res->ft_type;
12266                memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
12267                                sizeof(struct mlx5_flow_sub_actions_idx));
12268                memcpy(&mdest_res->sample_act[0], &res->sample_act,
12269                                sizeof(struct mlx5_flow_sub_actions_list));
12270                mdest_res->num_of_dest = num_of_dest;
12271                if (flow_dv_dest_array_resource_register(dev, mdest_res,
12272                                                         dev_flow, error))
12273                        return rte_flow_error_set(error, EINVAL,
12274                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12275                                                  NULL, "can't create sample "
12276                                                  "action");
12277        } else {
12278                res->sub_actions = sample_actions;
12279                if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
12280                        return rte_flow_error_set(error, EINVAL,
12281                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12282                                                  NULL,
12283                                                  "can't create sample action");
12284        }
12285        return 0;
12286}
12287
12288/**
12289 * Remove an ASO age action from age actions list.
12290 *
12291 * @param[in] dev
12292 *   Pointer to the Ethernet device structure.
12293 * @param[in] age
12294 *   Pointer to the aso age action handler.
12295 */
12296static void
12297flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
12298                                struct mlx5_aso_age_action *age)
12299{
12300        struct mlx5_age_info *age_info;
12301        struct mlx5_age_param *age_param = &age->age_params;
12302        struct mlx5_priv *priv = dev->data->dev_private;
12303        uint16_t expected = AGE_CANDIDATE;
12304
12305        age_info = GET_PORT_AGE_INFO(priv);
12306        if (!__atomic_compare_exchange_n(&age_param->state, &expected,
12307                                         AGE_FREE, false, __ATOMIC_RELAXED,
12308                                         __ATOMIC_RELAXED)) {
12309                /**
12310                 * We need the lock even it is age timeout,
12311                 * since age action may still in process.
12312                 */
12313                rte_spinlock_lock(&age_info->aged_sl);
12314                LIST_REMOVE(age, next);
12315                rte_spinlock_unlock(&age_info->aged_sl);
12316                __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
12317        }
12318}
12319
12320/**
12321 * Release an ASO age action.
12322 *
12323 * @param[in] dev
12324 *   Pointer to the Ethernet device structure.
12325 * @param[in] age_idx
12326 *   Index of ASO age action to release.
12327 * @param[in] flow
12328 *   True if the release operation is during flow destroy operation.
12329 *   False if the release operation is during action destroy operation.
12330 *
12331 * @return
12332 *   0 when age action was removed, otherwise the number of references.
12333 */
12334static int
12335flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
12336{
12337        struct mlx5_priv *priv = dev->data->dev_private;
12338        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12339        struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
12340        uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
12341
12342        if (!ret) {
12343                flow_dv_aso_age_remove_from_age(dev, age);
12344                rte_spinlock_lock(&mng->free_sl);
12345                LIST_INSERT_HEAD(&mng->free, age, next);
12346                rte_spinlock_unlock(&mng->free_sl);
12347        }
12348        return ret;
12349}
12350
12351/**
12352 * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
12353 *
12354 * @param[in] dev
12355 *   Pointer to the Ethernet device structure.
12356 *
12357 * @return
12358 *   0 on success, otherwise negative errno value and rte_errno is set.
12359 */
12360static int
12361flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
12362{
12363        struct mlx5_priv *priv = dev->data->dev_private;
12364        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12365        void *old_pools = mng->pools;
12366        uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
12367        uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
12368        void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12369
12370        if (!pools) {
12371                rte_errno = ENOMEM;
12372                return -ENOMEM;
12373        }
12374        if (old_pools) {
12375                memcpy(pools, old_pools,
12376                       mng->n * sizeof(struct mlx5_flow_counter_pool *));
12377                mlx5_free(old_pools);
12378        } else {
12379                /* First ASO flow hit allocation - starting ASO data-path. */
12380                int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
12381
12382                if (ret) {
12383                        mlx5_free(pools);
12384                        return ret;
12385                }
12386        }
12387        mng->n = resize;
12388        mng->pools = pools;
12389        return 0;
12390}
12391
12392/**
12393 * Create and initialize a new ASO aging pool.
12394 *
12395 * @param[in] dev
12396 *   Pointer to the Ethernet device structure.
12397 * @param[out] age_free
12398 *   Where to put the pointer of a new age action.
12399 *
12400 * @return
12401 *   The age actions pool pointer and @p age_free is set on success,
12402 *   NULL otherwise and rte_errno is set.
12403 */
12404static struct mlx5_aso_age_pool *
12405flow_dv_age_pool_create(struct rte_eth_dev *dev,
12406                        struct mlx5_aso_age_action **age_free)
12407{
12408        struct mlx5_priv *priv = dev->data->dev_private;
12409        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12410        struct mlx5_aso_age_pool *pool = NULL;
12411        struct mlx5_devx_obj *obj = NULL;
12412        uint32_t i;
12413
12414        obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12415                                                    priv->sh->cdev->pdn);
12416        if (!obj) {
12417                rte_errno = ENODATA;
12418                DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12419                return NULL;
12420        }
12421        pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12422        if (!pool) {
12423                claim_zero(mlx5_devx_cmd_destroy(obj));
12424                rte_errno = ENOMEM;
12425                return NULL;
12426        }
12427        pool->flow_hit_aso_obj = obj;
12428        pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12429        rte_rwlock_write_lock(&mng->resize_rwl);
12430        pool->index = mng->next;
12431        /* Resize pools array if there is no room for the new pool in it. */
12432        if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12433                claim_zero(mlx5_devx_cmd_destroy(obj));
12434                mlx5_free(pool);
12435                rte_rwlock_write_unlock(&mng->resize_rwl);
12436                return NULL;
12437        }
12438        mng->pools[pool->index] = pool;
12439        mng->next++;
12440        rte_rwlock_write_unlock(&mng->resize_rwl);
12441        /* Assign the first action in the new pool, the rest go to free list. */
12442        *age_free = &pool->actions[0];
12443        for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12444                pool->actions[i].offset = i;
12445                LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12446        }
12447        return pool;
12448}
12449
12450/**
12451 * Allocate a ASO aging bit.
12452 *
12453 * @param[in] dev
12454 *   Pointer to the Ethernet device structure.
12455 * @param[out] error
12456 *   Pointer to the error structure.
12457 *
12458 * @return
12459 *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12460 */
12461static uint32_t
12462flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12463{
12464        struct mlx5_priv *priv = dev->data->dev_private;
12465        const struct mlx5_aso_age_pool *pool;
12466        struct mlx5_aso_age_action *age_free = NULL;
12467        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12468
12469        MLX5_ASSERT(mng);
12470        /* Try to get the next free age action bit. */
12471        rte_spinlock_lock(&mng->free_sl);
12472        age_free = LIST_FIRST(&mng->free);
12473        if (age_free) {
12474                LIST_REMOVE(age_free, next);
12475        } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12476                rte_spinlock_unlock(&mng->free_sl);
12477                rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12478                                   NULL, "failed to create ASO age pool");
12479                return 0; /* 0 is an error. */
12480        }
12481        rte_spinlock_unlock(&mng->free_sl);
12482        pool = container_of
12483          ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12484                  (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12485                                                                       actions);
12486        if (!age_free->dr_action) {
12487                int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12488                                                 error);
12489
12490                if (reg_c < 0) {
12491                        rte_flow_error_set(error, rte_errno,
12492                                           RTE_FLOW_ERROR_TYPE_ACTION,
12493                                           NULL, "failed to get reg_c "
12494                                           "for ASO flow hit");
12495                        return 0; /* 0 is an error. */
12496                }
12497#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12498                age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12499                                (priv->sh->rx_domain,
12500                                 pool->flow_hit_aso_obj->obj, age_free->offset,
12501                                 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12502                                 (reg_c - REG_C_0));
12503#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12504                if (!age_free->dr_action) {
12505                        rte_errno = errno;
12506                        rte_spinlock_lock(&mng->free_sl);
12507                        LIST_INSERT_HEAD(&mng->free, age_free, next);
12508                        rte_spinlock_unlock(&mng->free_sl);
12509                        rte_flow_error_set(error, rte_errno,
12510                                           RTE_FLOW_ERROR_TYPE_ACTION,
12511                                           NULL, "failed to create ASO "
12512                                           "flow hit action");
12513                        return 0; /* 0 is an error. */
12514                }
12515        }
12516        __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12517        return pool->index | ((age_free->offset + 1) << 16);
12518}
12519
12520/**
12521 * Initialize flow ASO age parameters.
12522 *
12523 * @param[in] dev
12524 *   Pointer to rte_eth_dev structure.
12525 * @param[in] age_idx
12526 *   Index of ASO age action.
12527 * @param[in] context
12528 *   Pointer to flow counter age context.
12529 * @param[in] timeout
12530 *   Aging timeout in seconds.
12531 *
12532 */
12533static void
12534flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12535                            uint32_t age_idx,
12536                            void *context,
12537                            uint32_t timeout)
12538{
12539        struct mlx5_aso_age_action *aso_age;
12540
12541        aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12542        MLX5_ASSERT(aso_age);
12543        aso_age->age_params.context = context;
12544        aso_age->age_params.timeout = timeout;
12545        aso_age->age_params.port_id = dev->data->port_id;
12546        __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12547                         __ATOMIC_RELAXED);
12548        __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12549                         __ATOMIC_RELAXED);
12550}
12551
12552static void
12553flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12554                               const struct rte_flow_item_integrity *value,
12555                               void *headers_m, void *headers_v)
12556{
12557        if (mask->l4_ok) {
12558                /* RTE l4_ok filter aggregates hardware l4_ok and
12559                 * l4_checksum_ok filters.
12560                 * Positive RTE l4_ok match requires hardware match on both L4
12561                 * hardware integrity bits.
12562                 * For negative match, check hardware l4_checksum_ok bit only,
12563                 * because hardware sets that bit to 0 for all packets
12564                 * with bad L4.
12565                 */
12566                if (value->l4_ok) {
12567                        MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12568                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12569                }
12570                MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12571                MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12572                         !!value->l4_ok);
12573        }
12574        if (mask->l4_csum_ok) {
12575                MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12576                MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12577                         value->l4_csum_ok);
12578        }
12579}
12580
12581static void
12582flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12583                               const struct rte_flow_item_integrity *value,
12584                               void *headers_m, void *headers_v, bool is_ipv4)
12585{
12586        if (mask->l3_ok) {
12587                /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12588                 * ipv4_csum_ok filters.
12589                 * Positive RTE l3_ok match requires hardware match on both L3
12590                 * hardware integrity bits.
12591                 * For negative match, check hardware l3_csum_ok bit only,
12592                 * because hardware sets that bit to 0 for all packets
12593                 * with bad L3.
12594                 */
12595                if (is_ipv4) {
12596                        if (value->l3_ok) {
12597                                MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12598                                         l3_ok, 1);
12599                                MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12600                                         l3_ok, 1);
12601                        }
12602                        MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12603                                 ipv4_checksum_ok, 1);
12604                        MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12605                                 ipv4_checksum_ok, !!value->l3_ok);
12606                } else {
12607                        MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12608                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12609                                 value->l3_ok);
12610                }
12611        }
12612        if (mask->ipv4_csum_ok) {
12613                MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12614                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12615                         value->ipv4_csum_ok);
12616        }
12617}
12618
12619static void
12620set_integrity_bits(void *headers_m, void *headers_v,
12621                   const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12622{
12623        const struct rte_flow_item_integrity *spec = integrity_item->spec;
12624        const struct rte_flow_item_integrity *mask = integrity_item->mask;
12625
12626        /* Integrity bits validation cleared spec pointer */
12627        MLX5_ASSERT(spec != NULL);
12628        if (!mask)
12629                mask = &rte_flow_item_integrity_mask;
12630        flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12631                                       is_l3_ip4);
12632        flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12633}
12634
12635static void
12636flow_dv_translate_item_integrity_post(void *matcher, void *key,
12637                                      const
12638                                      struct rte_flow_item *integrity_items[2],
12639                                      uint64_t pattern_flags)
12640{
12641        void *headers_m, *headers_v;
12642        bool is_l3_ip4;
12643
12644        if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12645                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12646                                         inner_headers);
12647                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12648                is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12649                            0;
12650                set_integrity_bits(headers_m, headers_v,
12651                                   integrity_items[1], is_l3_ip4);
12652        }
12653        if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12654                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12655                                         outer_headers);
12656                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12657                is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12658                            0;
12659                set_integrity_bits(headers_m, headers_v,
12660                                   integrity_items[0], is_l3_ip4);
12661        }
12662}
12663
12664static void
12665flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12666                                 const struct rte_flow_item *integrity_items[2],
12667                                 uint64_t *last_item)
12668{
12669        const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12670
12671        /* integrity bits validation cleared spec pointer */
12672        MLX5_ASSERT(spec != NULL);
12673        if (spec->level > 1) {
12674                integrity_items[1] = item;
12675                *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12676        } else {
12677                integrity_items[0] = item;
12678                *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12679        }
12680}
12681
12682/**
12683 * Prepares DV flow counter with aging configuration.
12684 * Gets it by index when exists, creates a new one when doesn't.
12685 *
12686 * @param[in] dev
12687 *   Pointer to rte_eth_dev structure.
12688 * @param[in] dev_flow
12689 *   Pointer to the mlx5_flow.
12690 * @param[in, out] flow
12691 *   Pointer to the sub flow.
12692 * @param[in] count
12693 *   Pointer to the counter action configuration.
12694 * @param[in] age
12695 *   Pointer to the aging action configuration.
12696 * @param[out] error
12697 *   Pointer to the error structure.
12698 *
12699 * @return
12700 *   Pointer to the counter, NULL otherwise.
12701 */
12702static struct mlx5_flow_counter *
12703flow_dv_prepare_counter(struct rte_eth_dev *dev,
12704                        struct mlx5_flow *dev_flow,
12705                        struct rte_flow *flow,
12706                        const struct rte_flow_action_count *count,
12707                        const struct rte_flow_action_age *age,
12708                        struct rte_flow_error *error)
12709{
12710        if (!flow->counter) {
12711                flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12712                                                                 count, age);
12713                if (!flow->counter) {
12714                        rte_flow_error_set(error, rte_errno,
12715                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12716                                           "cannot create counter object.");
12717                        return NULL;
12718                }
12719        }
12720        return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12721}
12722
12723/*
12724 * Release an ASO CT action by its own device.
12725 *
12726 * @param[in] dev
12727 *   Pointer to the Ethernet device structure.
12728 * @param[in] idx
12729 *   Index of ASO CT action to release.
12730 *
12731 * @return
12732 *   0 when CT action was removed, otherwise the number of references.
12733 */
12734static inline int
12735flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12736{
12737        struct mlx5_priv *priv = dev->data->dev_private;
12738        struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12739        uint32_t ret;
12740        struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12741        enum mlx5_aso_ct_state state =
12742                        __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12743
12744        /* Cannot release when CT is in the ASO SQ. */
12745        if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12746                return -1;
12747        ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12748        if (!ret) {
12749                if (ct->dr_action_orig) {
12750#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12751                        claim_zero(mlx5_glue->destroy_flow_action
12752                                        (ct->dr_action_orig));
12753#endif
12754                        ct->dr_action_orig = NULL;
12755                }
12756                if (ct->dr_action_rply) {
12757#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12758                        claim_zero(mlx5_glue->destroy_flow_action
12759                                        (ct->dr_action_rply));
12760#endif
12761                        ct->dr_action_rply = NULL;
12762                }
12763                /* Clear the state to free, no need in 1st allocation. */
12764                MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12765                rte_spinlock_lock(&mng->ct_sl);
12766                LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12767                rte_spinlock_unlock(&mng->ct_sl);
12768        }
12769        return (int)ret;
12770}
12771
12772static inline int
12773flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12774                       struct rte_flow_error *error)
12775{
12776        uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12777        uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12778        struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12779        int ret;
12780
12781        MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12782        if (dev->data->dev_started != 1)
12783                return rte_flow_error_set(error, EAGAIN,
12784                                          RTE_FLOW_ERROR_TYPE_ACTION,
12785                                          NULL,
12786                                          "Indirect CT action cannot be destroyed when the port is stopped");
12787        ret = flow_dv_aso_ct_dev_release(owndev, idx);
12788        if (ret < 0)
12789                return rte_flow_error_set(error, EAGAIN,
12790                                          RTE_FLOW_ERROR_TYPE_ACTION,
12791                                          NULL,
12792                                          "Current state prevents indirect CT action from being destroyed");
12793        return ret;
12794}
12795
12796/*
12797 * Resize the ASO CT pools array by 64 pools.
12798 *
12799 * @param[in] dev
12800 *   Pointer to the Ethernet device structure.
12801 *
12802 * @return
12803 *   0 on success, otherwise negative errno value and rte_errno is set.
12804 */
12805static int
12806flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12807{
12808        struct mlx5_priv *priv = dev->data->dev_private;
12809        struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12810        void *old_pools = mng->pools;
12811        /* Magic number now, need a macro. */
12812        uint32_t resize = mng->n + 64;
12813        uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12814        void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12815
12816        if (!pools) {
12817                rte_errno = ENOMEM;
12818                return -rte_errno;
12819        }
12820        rte_rwlock_write_lock(&mng->resize_rwl);
12821        /* ASO SQ/QP was already initialized in the startup. */
12822        if (old_pools) {
12823                /* Realloc could be an alternative choice. */
12824                rte_memcpy(pools, old_pools,
12825                           mng->n * sizeof(struct mlx5_aso_ct_pool *));
12826                mlx5_free(old_pools);
12827        }
12828        mng->n = resize;
12829        mng->pools = pools;
12830        rte_rwlock_write_unlock(&mng->resize_rwl);
12831        return 0;
12832}
12833
12834/*
12835 * Create and initialize a new ASO CT pool.
12836 *
12837 * @param[in] dev
12838 *   Pointer to the Ethernet device structure.
12839 * @param[out] ct_free
12840 *   Where to put the pointer of a new CT action.
12841 *
12842 * @return
12843 *   The CT actions pool pointer and @p ct_free is set on success,
12844 *   NULL otherwise and rte_errno is set.
12845 */
12846static struct mlx5_aso_ct_pool *
12847flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12848                       struct mlx5_aso_ct_action **ct_free)
12849{
12850        struct mlx5_priv *priv = dev->data->dev_private;
12851        struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12852        struct mlx5_aso_ct_pool *pool = NULL;
12853        struct mlx5_devx_obj *obj = NULL;
12854        uint32_t i;
12855        uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12856
12857        obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12858                                                          priv->sh->cdev->pdn,
12859                                                          log_obj_size);
12860        if (!obj) {
12861                rte_errno = ENODATA;
12862                DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12863                return NULL;
12864        }
12865        pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12866        if (!pool) {
12867                rte_errno = ENOMEM;
12868                claim_zero(mlx5_devx_cmd_destroy(obj));
12869                return NULL;
12870        }
12871        pool->devx_obj = obj;
12872        pool->index = mng->next;
12873        /* Resize pools array if there is no room for the new pool in it. */
12874        if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12875                claim_zero(mlx5_devx_cmd_destroy(obj));
12876                mlx5_free(pool);
12877                return NULL;
12878        }
12879        mng->pools[pool->index] = pool;
12880        mng->next++;
12881        /* Assign the first action in the new pool, the rest go to free list. */
12882        *ct_free = &pool->actions[0];
12883        /* Lock outside, the list operation is safe here. */
12884        for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12885                /* refcnt is 0 when allocating the memory. */
12886                pool->actions[i].offset = i;
12887                LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12888        }
12889        return pool;
12890}
12891
12892/*
12893 * Allocate a ASO CT action from free list.
12894 *
12895 * @param[in] dev
12896 *   Pointer to the Ethernet device structure.
12897 * @param[out] error
12898 *   Pointer to the error structure.
12899 *
12900 * @return
12901 *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12902 */
12903static uint32_t
12904flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12905{
12906        struct mlx5_priv *priv = dev->data->dev_private;
12907        struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12908        struct mlx5_aso_ct_action *ct = NULL;
12909        struct mlx5_aso_ct_pool *pool;
12910        uint8_t reg_c;
12911        uint32_t ct_idx;
12912
12913        MLX5_ASSERT(mng);
12914        if (!priv->sh->cdev->config.devx) {
12915                rte_errno = ENOTSUP;
12916                return 0;
12917        }
12918        /* Get a free CT action, if no, a new pool will be created. */
12919        rte_spinlock_lock(&mng->ct_sl);
12920        ct = LIST_FIRST(&mng->free_cts);
12921        if (ct) {
12922                LIST_REMOVE(ct, next);
12923        } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12924                rte_spinlock_unlock(&mng->ct_sl);
12925                rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12926                                   NULL, "failed to create ASO CT pool");
12927                return 0;
12928        }
12929        rte_spinlock_unlock(&mng->ct_sl);
12930        pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12931        ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12932        /* 0: inactive, 1: created, 2+: used by flows. */
12933        __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12934        reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12935        if (!ct->dr_action_orig) {
12936#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12937                ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12938                        (priv->sh->rx_domain, pool->devx_obj->obj,
12939                         ct->offset,
12940                         MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12941                         reg_c - REG_C_0);
12942#else
12943                RTE_SET_USED(reg_c);
12944#endif
12945                if (!ct->dr_action_orig) {
12946                        flow_dv_aso_ct_dev_release(dev, ct_idx);
12947                        rte_flow_error_set(error, rte_errno,
12948                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12949                                           "failed to create ASO CT action");
12950                        return 0;
12951                }
12952        }
12953        if (!ct->dr_action_rply) {
12954#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12955                ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12956                        (priv->sh->rx_domain, pool->devx_obj->obj,
12957                         ct->offset,
12958                         MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12959                         reg_c - REG_C_0);
12960#endif
12961                if (!ct->dr_action_rply) {
12962                        flow_dv_aso_ct_dev_release(dev, ct_idx);
12963                        rte_flow_error_set(error, rte_errno,
12964                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12965                                           "failed to create ASO CT action");
12966                        return 0;
12967                }
12968        }
12969        return ct_idx;
12970}
12971
12972/*
12973 * Create a conntrack object with context and actions by using ASO mechanism.
12974 *
12975 * @param[in] dev
12976 *   Pointer to rte_eth_dev structure.
12977 * @param[in] pro
12978 *   Pointer to conntrack information profile.
12979 * @param[out] error
12980 *   Pointer to the error structure.
12981 *
12982 * @return
12983 *   Index to conntrack object on success, 0 otherwise.
12984 */
12985static uint32_t
12986flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12987                                   const struct rte_flow_action_conntrack *pro,
12988                                   struct rte_flow_error *error)
12989{
12990        struct mlx5_priv *priv = dev->data->dev_private;
12991        struct mlx5_dev_ctx_shared *sh = priv->sh;
12992        struct mlx5_aso_ct_action *ct;
12993        uint32_t idx;
12994
12995        if (!sh->ct_aso_en)
12996                return rte_flow_error_set(error, ENOTSUP,
12997                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12998                                          "Connection is not supported");
12999        idx = flow_dv_aso_ct_alloc(dev, error);
13000        if (!idx)
13001                return rte_flow_error_set(error, rte_errno,
13002                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13003                                          "Failed to allocate CT object");
13004        ct = flow_aso_ct_get_by_dev_idx(dev, idx);
13005        if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
13006                return rte_flow_error_set(error, EBUSY,
13007                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13008                                          "Failed to update CT");
13009        ct->is_original = !!pro->is_original_dir;
13010        ct->peer = pro->peer_port;
13011        return idx;
13012}
13013
13014/**
13015 * Fill the flow with DV spec, lock free
13016 * (mutex should be acquired by caller).
13017 *
13018 * @param[in] dev
13019 *   Pointer to rte_eth_dev structure.
13020 * @param[in, out] dev_flow
13021 *   Pointer to the sub flow.
13022 * @param[in] attr
13023 *   Pointer to the flow attributes.
13024 * @param[in] items
13025 *   Pointer to the list of items.
13026 * @param[in] actions
13027 *   Pointer to the list of actions.
13028 * @param[out] error
13029 *   Pointer to the error structure.
13030 *
13031 * @return
13032 *   0 on success, a negative errno value otherwise and rte_errno is set.
13033 */
13034static int
13035flow_dv_translate(struct rte_eth_dev *dev,
13036                  struct mlx5_flow *dev_flow,
13037                  const struct rte_flow_attr *attr,
13038                  const struct rte_flow_item items[],
13039                  const struct rte_flow_action actions[],
13040                  struct rte_flow_error *error)
13041{
13042        struct mlx5_priv *priv = dev->data->dev_private;
13043        struct mlx5_sh_config *dev_conf = &priv->sh->config;
13044        struct rte_flow *flow = dev_flow->flow;
13045        struct mlx5_flow_handle *handle = dev_flow->handle;
13046        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13047        struct mlx5_flow_rss_desc *rss_desc;
13048        uint64_t item_flags = 0;
13049        uint64_t last_item = 0;
13050        uint64_t action_flags = 0;
13051        struct mlx5_flow_dv_matcher matcher = {
13052                .mask = {
13053                        .size = sizeof(matcher.mask.buf),
13054                },
13055        };
13056        int actions_n = 0;
13057        bool actions_end = false;
13058        union {
13059                struct mlx5_flow_dv_modify_hdr_resource res;
13060                uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13061                            sizeof(struct mlx5_modification_cmd) *
13062                            (MLX5_MAX_MODIFY_NUM + 1)];
13063        } mhdr_dummy;
13064        struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
13065        const struct rte_flow_action_count *count = NULL;
13066        const struct rte_flow_action_age *non_shared_age = NULL;
13067        union flow_dv_attr flow_attr = { .attr = 0 };
13068        uint32_t tag_be;
13069        union mlx5_flow_tbl_key tbl_key;
13070        uint32_t modify_action_position = UINT32_MAX;
13071        void *match_mask = matcher.mask.buf;
13072        void *match_value = dev_flow->dv.value.buf;
13073        uint8_t next_protocol = 0xff;
13074        struct rte_vlan_hdr vlan = { 0 };
13075        struct mlx5_flow_dv_dest_array_resource mdest_res;
13076        struct mlx5_flow_dv_sample_resource sample_res;
13077        void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13078        const struct rte_flow_action_sample *sample = NULL;
13079        struct mlx5_flow_sub_actions_list *sample_act;
13080        uint32_t sample_act_pos = UINT32_MAX;
13081        uint32_t age_act_pos = UINT32_MAX;
13082        uint32_t num_of_dest = 0;
13083        int tmp_actions_n = 0;
13084        uint32_t table;
13085        int ret = 0;
13086        const struct mlx5_flow_tunnel *tunnel = NULL;
13087        struct flow_grp_info grp_info = {
13088                .external = !!dev_flow->external,
13089                .transfer = !!attr->transfer,
13090                .fdb_def_rule = !!priv->fdb_def_rule,
13091                .skip_scale = dev_flow->skip_scale &
13092                        (1 << MLX5_SCALE_FLOW_GROUP_BIT),
13093                .std_tbl_fix = true,
13094        };
13095        const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
13096        const struct rte_flow_item *tunnel_item = NULL;
13097        const struct rte_flow_item *gre_item = NULL;
13098
13099        if (!wks)
13100                return rte_flow_error_set(error, ENOMEM,
13101                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13102                                          NULL,
13103                                          "failed to push flow workspace");
13104        rss_desc = &wks->rss_desc;
13105        memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
13106        memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
13107        mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13108                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13109        /* update normal path action resource into last index of array */
13110        sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
13111        if (is_tunnel_offload_active(dev)) {
13112                if (dev_flow->tunnel) {
13113                        RTE_VERIFY(dev_flow->tof_type ==
13114                                   MLX5_TUNNEL_OFFLOAD_MISS_RULE);
13115                        tunnel = dev_flow->tunnel;
13116                } else {
13117                        tunnel = mlx5_get_tof(items, actions,
13118                                              &dev_flow->tof_type);
13119                        dev_flow->tunnel = tunnel;
13120                }
13121                grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
13122                                        (dev, attr, tunnel, dev_flow->tof_type);
13123        }
13124        mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13125                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13126        ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
13127                                       &grp_info, error);
13128        if (ret)
13129                return ret;
13130        dev_flow->dv.group = table;
13131        if (attr->transfer)
13132                mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
13133        /* number of actions must be set to 0 in case of dirty stack. */
13134        mhdr_res->actions_num = 0;
13135        if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
13136                /*
13137                 * do not add decap action if match rule drops packet
13138                 * HW rejects rules with decap & drop
13139                 *
13140                 * if tunnel match rule was inserted before matching tunnel set
13141                 * rule flow table used in the match rule must be registered.
13142                 * current implementation handles that in the
13143                 * flow_dv_match_register() at the function end.
13144                 */
13145                bool add_decap = true;
13146                const struct rte_flow_action *ptr = actions;
13147
13148                for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
13149                        if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
13150                                add_decap = false;
13151                                break;
13152                        }
13153                }
13154                if (add_decap) {
13155                        if (flow_dv_create_action_l2_decap(dev, dev_flow,
13156                                                           attr->transfer,
13157                                                           error))
13158                                return -rte_errno;
13159                        dev_flow->dv.actions[actions_n++] =
13160                                        dev_flow->dv.encap_decap->action;
13161                        action_flags |= MLX5_FLOW_ACTION_DECAP;
13162                }
13163        }
13164        for (; !actions_end ; actions++) {
13165                const struct rte_flow_action_queue *queue;
13166                const struct rte_flow_action_rss *rss;
13167                const struct rte_flow_action *action = actions;
13168                const uint8_t *rss_key;
13169                struct mlx5_flow_tbl_resource *tbl;
13170                struct mlx5_aso_age_action *age_act;
13171                struct mlx5_flow_counter *cnt_act;
13172                uint32_t port_id = 0;
13173                struct mlx5_flow_dv_port_id_action_resource port_id_resource;
13174                int action_type = actions->type;
13175                const struct rte_flow_action *found_action = NULL;
13176                uint32_t jump_group = 0;
13177                uint32_t owner_idx;
13178                struct mlx5_aso_ct_action *ct;
13179
13180                if (!mlx5_flow_os_action_supported(action_type))
13181                        return rte_flow_error_set(error, ENOTSUP,
13182                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13183                                                  actions,
13184                                                  "action not supported");
13185                switch (action_type) {
13186                case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
13187                        action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
13188                        break;
13189                case RTE_FLOW_ACTION_TYPE_VOID:
13190                        break;
13191                case RTE_FLOW_ACTION_TYPE_PORT_ID:
13192                case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
13193                        if (flow_dv_translate_action_port_id(dev, action,
13194                                                             &port_id, error))
13195                                return -rte_errno;
13196                        port_id_resource.port_id = port_id;
13197                        MLX5_ASSERT(!handle->rix_port_id_action);
13198                        if (flow_dv_port_id_action_resource_register
13199                            (dev, &port_id_resource, dev_flow, error))
13200                                return -rte_errno;
13201                        dev_flow->dv.actions[actions_n++] =
13202                                        dev_flow->dv.port_id_action->action;
13203                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13204                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
13205                        sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13206                        num_of_dest++;
13207                        break;
13208                case RTE_FLOW_ACTION_TYPE_FLAG:
13209                        action_flags |= MLX5_FLOW_ACTION_FLAG;
13210                        wks->mark = 1;
13211                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
13212                                struct rte_flow_action_mark mark = {
13213                                        .id = MLX5_FLOW_MARK_DEFAULT,
13214                                };
13215
13216                                if (flow_dv_convert_action_mark(dev, &mark,
13217                                                                mhdr_res,
13218                                                                error))
13219                                        return -rte_errno;
13220                                action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13221                                break;
13222                        }
13223                        tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
13224                        /*
13225                         * Only one FLAG or MARK is supported per device flow
13226                         * right now. So the pointer to the tag resource must be
13227                         * zero before the register process.
13228                         */
13229                        MLX5_ASSERT(!handle->dvh.rix_tag);
13230                        if (flow_dv_tag_resource_register(dev, tag_be,
13231                                                          dev_flow, error))
13232                                return -rte_errno;
13233                        MLX5_ASSERT(dev_flow->dv.tag_resource);
13234                        dev_flow->dv.actions[actions_n++] =
13235                                        dev_flow->dv.tag_resource->action;
13236                        break;
13237                case RTE_FLOW_ACTION_TYPE_MARK:
13238                        action_flags |= MLX5_FLOW_ACTION_MARK;
13239                        wks->mark = 1;
13240                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
13241                                const struct rte_flow_action_mark *mark =
13242                                        (const struct rte_flow_action_mark *)
13243                                                actions->conf;
13244
13245                                if (flow_dv_convert_action_mark(dev, mark,
13246                                                                mhdr_res,
13247                                                                error))
13248                                        return -rte_errno;
13249                                action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13250                                break;
13251                        }
13252                        /* Fall-through */
13253                case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
13254                        /* Legacy (non-extensive) MARK action. */
13255                        tag_be = mlx5_flow_mark_set
13256                              (((const struct rte_flow_action_mark *)
13257                               (actions->conf))->id);
13258                        MLX5_ASSERT(!handle->dvh.rix_tag);
13259                        if (flow_dv_tag_resource_register(dev, tag_be,
13260                                                          dev_flow, error))
13261                                return -rte_errno;
13262                        MLX5_ASSERT(dev_flow->dv.tag_resource);
13263                        dev_flow->dv.actions[actions_n++] =
13264                                        dev_flow->dv.tag_resource->action;
13265                        break;
13266                case RTE_FLOW_ACTION_TYPE_SET_META:
13267                        if (flow_dv_convert_action_set_meta
13268                                (dev, mhdr_res, attr,
13269                                 (const struct rte_flow_action_set_meta *)
13270                                  actions->conf, error))
13271                                return -rte_errno;
13272                        action_flags |= MLX5_FLOW_ACTION_SET_META;
13273                        break;
13274                case RTE_FLOW_ACTION_TYPE_SET_TAG:
13275                        if (flow_dv_convert_action_set_tag
13276                                (dev, mhdr_res,
13277                                 (const struct rte_flow_action_set_tag *)
13278                                  actions->conf, error))
13279                                return -rte_errno;
13280                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13281                        break;
13282                case RTE_FLOW_ACTION_TYPE_DROP:
13283                        action_flags |= MLX5_FLOW_ACTION_DROP;
13284                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
13285                        break;
13286                case RTE_FLOW_ACTION_TYPE_QUEUE:
13287                        queue = actions->conf;
13288                        rss_desc->queue_num = 1;
13289                        rss_desc->queue[0] = queue->index;
13290                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
13291                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
13292                        sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
13293                        num_of_dest++;
13294                        break;
13295                case RTE_FLOW_ACTION_TYPE_RSS:
13296                        rss = actions->conf;
13297                        memcpy(rss_desc->queue, rss->queue,
13298                               rss->queue_num * sizeof(uint16_t));
13299                        rss_desc->queue_num = rss->queue_num;
13300                        /* NULL RSS key indicates default RSS key. */
13301                        rss_key = !rss->key ? rss_hash_default_key : rss->key;
13302                        memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13303                        /*
13304                         * rss->level and rss.types should be set in advance
13305                         * when expanding items for RSS.
13306                         */
13307                        action_flags |= MLX5_FLOW_ACTION_RSS;
13308                        dev_flow->handle->fate_action = rss_desc->shared_rss ?
13309                                MLX5_FLOW_FATE_SHARED_RSS :
13310                                MLX5_FLOW_FATE_QUEUE;
13311                        break;
13312                case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
13313                        owner_idx = (uint32_t)(uintptr_t)action->conf;
13314                        age_act = flow_aso_age_get_by_idx(dev, owner_idx);
13315                        if (flow->age == 0) {
13316                                flow->age = owner_idx;
13317                                __atomic_fetch_add(&age_act->refcnt, 1,
13318                                                   __ATOMIC_RELAXED);
13319                        }
13320                        age_act_pos = actions_n++;
13321                        action_flags |= MLX5_FLOW_ACTION_AGE;
13322                        break;
13323                case RTE_FLOW_ACTION_TYPE_AGE:
13324                        non_shared_age = action->conf;
13325                        age_act_pos = actions_n++;
13326                        action_flags |= MLX5_FLOW_ACTION_AGE;
13327                        break;
13328                case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
13329                        owner_idx = (uint32_t)(uintptr_t)action->conf;
13330                        cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
13331                                                             NULL);
13332                        MLX5_ASSERT(cnt_act != NULL);
13333                        /**
13334                         * When creating meter drop flow in drop table, the
13335                         * counter should not overwrite the rte flow counter.
13336                         */
13337                        if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13338                            dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
13339                                dev_flow->dv.actions[actions_n++] =
13340                                                        cnt_act->action;
13341                        } else {
13342                                if (flow->counter == 0) {
13343                                        flow->counter = owner_idx;
13344                                        __atomic_fetch_add
13345                                                (&cnt_act->shared_info.refcnt,
13346                                                 1, __ATOMIC_RELAXED);
13347                                }
13348                                /* Save information first, will apply later. */
13349                                action_flags |= MLX5_FLOW_ACTION_COUNT;
13350                        }
13351                        break;
13352                case RTE_FLOW_ACTION_TYPE_COUNT:
13353                        if (!priv->sh->cdev->config.devx) {
13354                                return rte_flow_error_set
13355                                              (error, ENOTSUP,
13356                                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13357                                               NULL,
13358                                               "count action not supported");
13359                        }
13360                        /* Save information first, will apply later. */
13361                        count = action->conf;
13362                        action_flags |= MLX5_FLOW_ACTION_COUNT;
13363                        break;
13364                case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
13365                        dev_flow->dv.actions[actions_n++] =
13366                                                priv->sh->pop_vlan_action;
13367                        action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
13368                        break;
13369                case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
13370                        if (!(action_flags &
13371                              MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
13372                                flow_dev_get_vlan_info_from_items(items, &vlan);
13373                        vlan.eth_proto = rte_be_to_cpu_16
13374                             ((((const struct rte_flow_action_of_push_vlan *)
13375                                                   actions->conf)->ethertype));
13376                        found_action = mlx5_flow_find_action
13377                                        (actions + 1,
13378                                         RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
13379                        if (found_action)
13380                                mlx5_update_vlan_vid_pcp(found_action, &vlan);
13381                        found_action = mlx5_flow_find_action
13382                                        (actions + 1,
13383                                         RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
13384                        if (found_action)
13385                                mlx5_update_vlan_vid_pcp(found_action, &vlan);
13386                        if (flow_dv_create_action_push_vlan
13387                                            (dev, attr, &vlan, dev_flow, error))
13388                                return -rte_errno;
13389                        dev_flow->dv.actions[actions_n++] =
13390                                        dev_flow->dv.push_vlan_res->action;
13391                        action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
13392                        break;
13393                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
13394                        /* of_vlan_push action handled this action */
13395                        MLX5_ASSERT(action_flags &
13396                                    MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13397                        break;
13398                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13399                        if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13400                                break;
13401                        flow_dev_get_vlan_info_from_items(items, &vlan);
13402                        mlx5_update_vlan_vid_pcp(actions, &vlan);
13403                        /* If no VLAN push - this is a modify header action */
13404                        if (flow_dv_convert_action_modify_vlan_vid
13405                                                (mhdr_res, actions, error))
13406                                return -rte_errno;
13407                        action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13408                        break;
13409                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13410                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13411                        if (flow_dv_create_action_l2_encap(dev, actions,
13412                                                           dev_flow,
13413                                                           attr->transfer,
13414                                                           error))
13415                                return -rte_errno;
13416                        dev_flow->dv.actions[actions_n++] =
13417                                        dev_flow->dv.encap_decap->action;
13418                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
13419                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13420                                sample_act->action_flags |=
13421                                                        MLX5_FLOW_ACTION_ENCAP;
13422                        break;
13423                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13424                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13425                        if (flow_dv_create_action_l2_decap(dev, dev_flow,
13426                                                           attr->transfer,
13427                                                           error))
13428                                return -rte_errno;
13429                        dev_flow->dv.actions[actions_n++] =
13430                                        dev_flow->dv.encap_decap->action;
13431                        action_flags |= MLX5_FLOW_ACTION_DECAP;
13432                        break;
13433                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13434                        /* Handle encap with preceding decap. */
13435                        if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13436                                if (flow_dv_create_action_raw_encap
13437                                        (dev, actions, dev_flow, attr, error))
13438                                        return -rte_errno;
13439                                dev_flow->dv.actions[actions_n++] =
13440                                        dev_flow->dv.encap_decap->action;
13441                        } else {
13442                                /* Handle encap without preceding decap. */
13443                                if (flow_dv_create_action_l2_encap
13444                                    (dev, actions, dev_flow, attr->transfer,
13445                                     error))
13446                                        return -rte_errno;
13447                                dev_flow->dv.actions[actions_n++] =
13448                                        dev_flow->dv.encap_decap->action;
13449                        }
13450                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
13451                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13452                                sample_act->action_flags |=
13453                                                        MLX5_FLOW_ACTION_ENCAP;
13454                        break;
13455                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13456                        while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13457                                ;
13458                        if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13459                                if (flow_dv_create_action_l2_decap
13460                                    (dev, dev_flow, attr->transfer, error))
13461                                        return -rte_errno;
13462                                dev_flow->dv.actions[actions_n++] =
13463                                        dev_flow->dv.encap_decap->action;
13464                        }
13465                        /* If decap is followed by encap, handle it at encap. */
13466                        action_flags |= MLX5_FLOW_ACTION_DECAP;
13467                        break;
13468                case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13469                        dev_flow->dv.actions[actions_n++] =
13470                                (void *)(uintptr_t)action->conf;
13471                        action_flags |= MLX5_FLOW_ACTION_JUMP;
13472                        break;
13473                case RTE_FLOW_ACTION_TYPE_JUMP:
13474                        jump_group = ((const struct rte_flow_action_jump *)
13475                                                        action->conf)->group;
13476                        grp_info.std_tbl_fix = 0;
13477                        if (dev_flow->skip_scale &
13478                                (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13479                                grp_info.skip_scale = 1;
13480                        else
13481                                grp_info.skip_scale = 0;
13482                        ret = mlx5_flow_group_to_table(dev, tunnel,
13483                                                       jump_group,
13484                                                       &table,
13485                                                       &grp_info, error);
13486                        if (ret)
13487                                return ret;
13488                        tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13489                                                       attr->transfer,
13490                                                       !!dev_flow->external,
13491                                                       tunnel, jump_group, 0,
13492                                                       0, error);
13493                        if (!tbl)
13494                                return rte_flow_error_set
13495                                                (error, errno,
13496                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13497                                                 NULL,
13498                                                 "cannot create jump action.");
13499                        if (flow_dv_jump_tbl_resource_register
13500                            (dev, tbl, dev_flow, error)) {
13501                                flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13502                                return rte_flow_error_set
13503                                                (error, errno,
13504                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13505                                                 NULL,
13506                                                 "cannot create jump action.");
13507                        }
13508                        dev_flow->dv.actions[actions_n++] =
13509                                        dev_flow->dv.jump->action;
13510                        action_flags |= MLX5_FLOW_ACTION_JUMP;
13511                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13512                        sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13513                        num_of_dest++;
13514                        break;
13515                case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13516                case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13517                        if (flow_dv_convert_action_modify_mac
13518                                        (mhdr_res, actions, error))
13519                                return -rte_errno;
13520                        action_flags |= actions->type ==
13521                                        RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13522                                        MLX5_FLOW_ACTION_SET_MAC_SRC :
13523                                        MLX5_FLOW_ACTION_SET_MAC_DST;
13524                        break;
13525                case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13526                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13527                        if (flow_dv_convert_action_modify_ipv4
13528                                        (mhdr_res, actions, error))
13529                                return -rte_errno;
13530                        action_flags |= actions->type ==
13531                                        RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13532                                        MLX5_FLOW_ACTION_SET_IPV4_SRC :
13533                                        MLX5_FLOW_ACTION_SET_IPV4_DST;
13534                        break;
13535                case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13536                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13537                        if (flow_dv_convert_action_modify_ipv6
13538                                        (mhdr_res, actions, error))
13539                                return -rte_errno;
13540                        action_flags |= actions->type ==
13541                                        RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13542                                        MLX5_FLOW_ACTION_SET_IPV6_SRC :
13543                                        MLX5_FLOW_ACTION_SET_IPV6_DST;
13544                        break;
13545                case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13546                case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13547                        if (flow_dv_convert_action_modify_tp
13548                                        (mhdr_res, actions, items,
13549                                         &flow_attr, dev_flow, !!(action_flags &
13550                                         MLX5_FLOW_ACTION_DECAP), error))
13551                                return -rte_errno;
13552                        action_flags |= actions->type ==
13553                                        RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13554                                        MLX5_FLOW_ACTION_SET_TP_SRC :
13555                                        MLX5_FLOW_ACTION_SET_TP_DST;
13556                        break;
13557                case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13558                        if (flow_dv_convert_action_modify_dec_ttl
13559                                        (mhdr_res, items, &flow_attr, dev_flow,
13560                                         !!(action_flags &
13561                                         MLX5_FLOW_ACTION_DECAP), error))
13562                                return -rte_errno;
13563                        action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13564                        break;
13565                case RTE_FLOW_ACTION_TYPE_SET_TTL:
13566                        if (flow_dv_convert_action_modify_ttl
13567                                        (mhdr_res, actions, items, &flow_attr,
13568                                         dev_flow, !!(action_flags &
13569                                         MLX5_FLOW_ACTION_DECAP), error))
13570                                return -rte_errno;
13571                        action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13572                        break;
13573                case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13574                case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13575                        if (flow_dv_convert_action_modify_tcp_seq
13576                                        (mhdr_res, actions, error))
13577                                return -rte_errno;
13578                        action_flags |= actions->type ==
13579                                        RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13580                                        MLX5_FLOW_ACTION_INC_TCP_SEQ :
13581                                        MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13582                        break;
13583
13584                case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13585                case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13586                        if (flow_dv_convert_action_modify_tcp_ack
13587                                        (mhdr_res, actions, error))
13588                                return -rte_errno;
13589                        action_flags |= actions->type ==
13590                                        RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13591                                        MLX5_FLOW_ACTION_INC_TCP_ACK :
13592                                        MLX5_FLOW_ACTION_DEC_TCP_ACK;
13593                        break;
13594                case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13595                        if (flow_dv_convert_action_set_reg
13596                                        (mhdr_res, actions, error))
13597                                return -rte_errno;
13598                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13599                        break;
13600                case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13601                        if (flow_dv_convert_action_copy_mreg
13602                                        (dev, mhdr_res, actions, error))
13603                                return -rte_errno;
13604                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13605                        break;
13606                case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13607                        action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13608                        dev_flow->handle->fate_action =
13609                                        MLX5_FLOW_FATE_DEFAULT_MISS;
13610                        break;
13611                case RTE_FLOW_ACTION_TYPE_METER:
13612                        if (!wks->fm)
13613                                return rte_flow_error_set(error, rte_errno,
13614                                        RTE_FLOW_ERROR_TYPE_ACTION,
13615                                        NULL, "Failed to get meter in flow.");
13616                        /* Set the meter action. */
13617                        dev_flow->dv.actions[actions_n++] =
13618                                wks->fm->meter_action_g;
13619                        action_flags |= MLX5_FLOW_ACTION_METER;
13620                        break;
13621                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13622                        if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13623                                                              actions, error))
13624                                return -rte_errno;
13625                        action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13626                        break;
13627                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13628                        if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13629                                                              actions, error))
13630                                return -rte_errno;
13631                        action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13632                        break;
13633                case RTE_FLOW_ACTION_TYPE_SAMPLE:
13634                        sample_act_pos = actions_n;
13635                        sample = (const struct rte_flow_action_sample *)
13636                                 action->conf;
13637                        actions_n++;
13638                        action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13639                        /* put encap action into group if work with port id */
13640                        if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13641                            (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13642                                sample_act->action_flags |=
13643                                                        MLX5_FLOW_ACTION_ENCAP;
13644                        break;
13645                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13646                        if (flow_dv_convert_action_modify_field
13647                                        (dev, mhdr_res, actions, attr, error))
13648                                return -rte_errno;
13649                        action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13650                        break;
13651                case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13652                        owner_idx = (uint32_t)(uintptr_t)action->conf;
13653                        ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13654                        if (!ct)
13655                                return rte_flow_error_set(error, EINVAL,
13656                                                RTE_FLOW_ERROR_TYPE_ACTION,
13657                                                NULL,
13658                                                "Failed to get CT object.");
13659                        if (mlx5_aso_ct_available(priv->sh, ct))
13660                                return rte_flow_error_set(error, rte_errno,
13661                                                RTE_FLOW_ERROR_TYPE_ACTION,
13662                                                NULL,
13663                                                "CT is unavailable.");
13664                        if (ct->is_original)
13665                                dev_flow->dv.actions[actions_n] =
13666                                                        ct->dr_action_orig;
13667                        else
13668                                dev_flow->dv.actions[actions_n] =
13669                                                        ct->dr_action_rply;
13670                        if (flow->ct == 0) {
13671                                flow->indirect_type =
13672                                                MLX5_INDIRECT_ACTION_TYPE_CT;
13673                                flow->ct = owner_idx;
13674                                __atomic_fetch_add(&ct->refcnt, 1,
13675                                                   __ATOMIC_RELAXED);
13676                        }
13677                        actions_n++;
13678                        action_flags |= MLX5_FLOW_ACTION_CT;
13679                        break;
13680                case RTE_FLOW_ACTION_TYPE_END:
13681                        actions_end = true;
13682                        if (mhdr_res->actions_num) {
13683                                /* create modify action if needed. */
13684                                if (flow_dv_modify_hdr_resource_register
13685                                        (dev, mhdr_res, dev_flow, error))
13686                                        return -rte_errno;
13687                                dev_flow->dv.actions[modify_action_position] =
13688                                        handle->dvh.modify_hdr->action;
13689                        }
13690                        /*
13691                         * Handle AGE and COUNT action by single HW counter
13692                         * when they are not shared.
13693                         */
13694                        if (action_flags & MLX5_FLOW_ACTION_AGE) {
13695                                if ((non_shared_age && count) ||
13696                                    !flow_hit_aso_supported(priv->sh, attr)) {
13697                                        /* Creates age by counters. */
13698                                        cnt_act = flow_dv_prepare_counter
13699                                                                (dev, dev_flow,
13700                                                                 flow, count,
13701                                                                 non_shared_age,
13702                                                                 error);
13703                                        if (!cnt_act)
13704                                                return -rte_errno;
13705                                        dev_flow->dv.actions[age_act_pos] =
13706                                                                cnt_act->action;
13707                                        break;
13708                                }
13709                                if (!flow->age && non_shared_age) {
13710                                        flow->age = flow_dv_aso_age_alloc
13711                                                                (dev, error);
13712                                        if (!flow->age)
13713                                                return -rte_errno;
13714                                        flow_dv_aso_age_params_init
13715                                                    (dev, flow->age,
13716                                                     non_shared_age->context ?
13717                                                     non_shared_age->context :
13718                                                     (void *)(uintptr_t)
13719                                                     (dev_flow->flow_idx),
13720                                                     non_shared_age->timeout);
13721                                }
13722                                age_act = flow_aso_age_get_by_idx(dev,
13723                                                                  flow->age);
13724                                dev_flow->dv.actions[age_act_pos] =
13725                                                             age_act->dr_action;
13726                        }
13727                        if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13728                                /*
13729                                 * Create one count action, to be used
13730                                 * by all sub-flows.
13731                                 */
13732                                cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13733                                                                  flow, count,
13734                                                                  NULL, error);
13735                                if (!cnt_act)
13736                                        return -rte_errno;
13737                                dev_flow->dv.actions[actions_n++] =
13738                                                                cnt_act->action;
13739                        }
13740                default:
13741                        break;
13742                }
13743                if (mhdr_res->actions_num &&
13744                    modify_action_position == UINT32_MAX)
13745                        modify_action_position = actions_n++;
13746        }
13747        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13748                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13749                int item_type = items->type;
13750
13751                if (!mlx5_flow_os_item_supported(item_type))
13752                        return rte_flow_error_set(error, ENOTSUP,
13753                                                  RTE_FLOW_ERROR_TYPE_ITEM,
13754                                                  NULL, "item not supported");
13755                switch (item_type) {
13756                case RTE_FLOW_ITEM_TYPE_ESP:
13757                        flow_dv_translate_item_esp(match_mask, match_value,
13758                                                   items, tunnel);
13759                        last_item = MLX5_FLOW_ITEM_ESP;
13760                        break;
13761                case RTE_FLOW_ITEM_TYPE_PORT_ID:
13762                        flow_dv_translate_item_port_id
13763                                (dev, match_mask, match_value, items, attr);
13764                        last_item = MLX5_FLOW_ITEM_PORT_ID;
13765                        break;
13766                case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
13767                        flow_dv_translate_item_represented_port
13768                                (dev, match_mask, match_value, items, attr);
13769                        last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
13770                        break;
13771                case RTE_FLOW_ITEM_TYPE_ETH:
13772                        flow_dv_translate_item_eth(match_mask, match_value,
13773                                                   items, tunnel,
13774                                                   dev_flow->dv.group);
13775                        matcher.priority = action_flags &
13776                                        MLX5_FLOW_ACTION_DEFAULT_MISS &&
13777                                        !dev_flow->external ?
13778                                        MLX5_PRIORITY_MAP_L3 :
13779                                        MLX5_PRIORITY_MAP_L2;
13780                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13781                                             MLX5_FLOW_LAYER_OUTER_L2;
13782                        break;
13783                case RTE_FLOW_ITEM_TYPE_VLAN:
13784                        flow_dv_translate_item_vlan(dev_flow,
13785                                                    match_mask, match_value,
13786                                                    items, tunnel,
13787                                                    dev_flow->dv.group);
13788                        matcher.priority = MLX5_PRIORITY_MAP_L2;
13789                        last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13790                                              MLX5_FLOW_LAYER_INNER_VLAN) :
13791                                             (MLX5_FLOW_LAYER_OUTER_L2 |
13792                                              MLX5_FLOW_LAYER_OUTER_VLAN);
13793                        break;
13794                case RTE_FLOW_ITEM_TYPE_IPV4:
13795                        mlx5_flow_tunnel_ip_check(items, next_protocol,
13796                                                  &item_flags, &tunnel);
13797                        flow_dv_translate_item_ipv4(match_mask, match_value,
13798                                                    items, tunnel,
13799                                                    dev_flow->dv.group);
13800                        matcher.priority = MLX5_PRIORITY_MAP_L3;
13801                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13802                                             MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13803                        if (items->mask != NULL &&
13804                            ((const struct rte_flow_item_ipv4 *)
13805                             items->mask)->hdr.next_proto_id) {
13806                                next_protocol =
13807                                        ((const struct rte_flow_item_ipv4 *)
13808                                         (items->spec))->hdr.next_proto_id;
13809                                next_protocol &=
13810                                        ((const struct rte_flow_item_ipv4 *)
13811                                         (items->mask))->hdr.next_proto_id;
13812                        } else {
13813                                /* Reset for inner layer. */
13814                                next_protocol = 0xff;
13815                        }
13816                        break;
13817                case RTE_FLOW_ITEM_TYPE_IPV6:
13818                        mlx5_flow_tunnel_ip_check(items, next_protocol,
13819                                                  &item_flags, &tunnel);
13820                        flow_dv_translate_item_ipv6(match_mask, match_value,
13821                                                    items, tunnel,
13822                                                    dev_flow->dv.group);
13823                        matcher.priority = MLX5_PRIORITY_MAP_L3;
13824                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13825                                             MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13826                        if (items->mask != NULL &&
13827                            ((const struct rte_flow_item_ipv6 *)
13828                             items->mask)->hdr.proto) {
13829                                next_protocol =
13830                                        ((const struct rte_flow_item_ipv6 *)
13831                                         items->spec)->hdr.proto;
13832                                next_protocol &=
13833                                        ((const struct rte_flow_item_ipv6 *)
13834                                         items->mask)->hdr.proto;
13835                        } else {
13836                                /* Reset for inner layer. */
13837                                next_protocol = 0xff;
13838                        }
13839                        break;
13840                case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13841                        flow_dv_translate_item_ipv6_frag_ext(match_mask,
13842                                                             match_value,
13843                                                             items, tunnel);
13844                        last_item = tunnel ?
13845                                        MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13846                                        MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13847                        if (items->mask != NULL &&
13848                            ((const struct rte_flow_item_ipv6_frag_ext *)
13849                             items->mask)->hdr.next_header) {
13850                                next_protocol =
13851                                ((const struct rte_flow_item_ipv6_frag_ext *)
13852                                 items->spec)->hdr.next_header;
13853                                next_protocol &=
13854                                ((const struct rte_flow_item_ipv6_frag_ext *)
13855                                 items->mask)->hdr.next_header;
13856                        } else {
13857                                /* Reset for inner layer. */
13858                                next_protocol = 0xff;
13859                        }
13860                        break;
13861                case RTE_FLOW_ITEM_TYPE_TCP:
13862                        flow_dv_translate_item_tcp(match_mask, match_value,
13863                                                   items, tunnel);
13864                        matcher.priority = MLX5_PRIORITY_MAP_L4;
13865                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13866                                             MLX5_FLOW_LAYER_OUTER_L4_TCP;
13867                        break;
13868                case RTE_FLOW_ITEM_TYPE_UDP:
13869                        flow_dv_translate_item_udp(match_mask, match_value,
13870                                                   items, tunnel);
13871                        matcher.priority = MLX5_PRIORITY_MAP_L4;
13872                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13873                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
13874                        break;
13875                case RTE_FLOW_ITEM_TYPE_GRE:
13876                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13877                        last_item = MLX5_FLOW_LAYER_GRE;
13878                        tunnel_item = items;
13879                        gre_item = items;
13880                        break;
13881                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13882                        flow_dv_translate_item_gre_key(match_mask,
13883                                                       match_value, items);
13884                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
13885                        break;
13886                case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
13887                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13888                        last_item = MLX5_FLOW_LAYER_GRE;
13889                        tunnel_item = items;
13890                        break;
13891                case RTE_FLOW_ITEM_TYPE_NVGRE:
13892                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13893                        last_item = MLX5_FLOW_LAYER_GRE;
13894                        tunnel_item = items;
13895                        break;
13896                case RTE_FLOW_ITEM_TYPE_VXLAN:
13897                        flow_dv_translate_item_vxlan(dev, attr,
13898                                                     match_mask, match_value,
13899                                                     items, tunnel);
13900                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13901                        last_item = MLX5_FLOW_LAYER_VXLAN;
13902                        break;
13903                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13904                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13905                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13906                        tunnel_item = items;
13907                        break;
13908                case RTE_FLOW_ITEM_TYPE_GENEVE:
13909                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13910                        last_item = MLX5_FLOW_LAYER_GENEVE;
13911                        tunnel_item = items;
13912                        break;
13913                case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13914                        ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13915                                                          match_value,
13916                                                          items, error);
13917                        if (ret)
13918                                return rte_flow_error_set(error, -ret,
13919                                        RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13920                                        "cannot create GENEVE TLV option");
13921                        flow->geneve_tlv_option = 1;
13922                        last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13923                        break;
13924                case RTE_FLOW_ITEM_TYPE_MPLS:
13925                        flow_dv_translate_item_mpls(match_mask, match_value,
13926                                                    items, last_item, tunnel);
13927                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13928                        last_item = MLX5_FLOW_LAYER_MPLS;
13929                        break;
13930                case RTE_FLOW_ITEM_TYPE_MARK:
13931                        flow_dv_translate_item_mark(dev, match_mask,
13932                                                    match_value, items);
13933                        last_item = MLX5_FLOW_ITEM_MARK;
13934                        break;
13935                case RTE_FLOW_ITEM_TYPE_META:
13936                        flow_dv_translate_item_meta(dev, match_mask,
13937                                                    match_value, attr, items);
13938                        last_item = MLX5_FLOW_ITEM_METADATA;
13939                        break;
13940                case RTE_FLOW_ITEM_TYPE_ICMP:
13941                        flow_dv_translate_item_icmp(match_mask, match_value,
13942                                                    items, tunnel);
13943                        matcher.priority = MLX5_PRIORITY_MAP_L4;
13944                        last_item = MLX5_FLOW_LAYER_ICMP;
13945                        break;
13946                case RTE_FLOW_ITEM_TYPE_ICMP6:
13947                        flow_dv_translate_item_icmp6(match_mask, match_value,
13948                                                      items, tunnel);
13949                        matcher.priority = MLX5_PRIORITY_MAP_L4;
13950                        last_item = MLX5_FLOW_LAYER_ICMP6;
13951                        break;
13952                case RTE_FLOW_ITEM_TYPE_TAG:
13953                        flow_dv_translate_item_tag(dev, match_mask,
13954                                                   match_value, items);
13955                        last_item = MLX5_FLOW_ITEM_TAG;
13956                        break;
13957                case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13958                        flow_dv_translate_mlx5_item_tag(dev, match_mask,
13959                                                        match_value, items);
13960                        last_item = MLX5_FLOW_ITEM_TAG;
13961                        break;
13962                case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13963                        flow_dv_translate_item_tx_queue(dev, match_mask,
13964                                                        match_value,
13965                                                        items);
13966                        last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13967                        break;
13968                case RTE_FLOW_ITEM_TYPE_GTP:
13969                        flow_dv_translate_item_gtp(match_mask, match_value,
13970                                                   items, tunnel);
13971                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13972                        last_item = MLX5_FLOW_LAYER_GTP;
13973                        break;
13974                case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13975                        ret = flow_dv_translate_item_gtp_psc(match_mask,
13976                                                          match_value,
13977                                                          items);
13978                        if (ret)
13979                                return rte_flow_error_set(error, -ret,
13980                                        RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13981                                        "cannot create GTP PSC item");
13982                        last_item = MLX5_FLOW_LAYER_GTP_PSC;
13983                        break;
13984                case RTE_FLOW_ITEM_TYPE_ECPRI:
13985                        if (!mlx5_flex_parser_ecpri_exist(dev)) {
13986                                /* Create it only the first time to be used. */
13987                                ret = mlx5_flex_parser_ecpri_alloc(dev);
13988                                if (ret)
13989                                        return rte_flow_error_set
13990                                                (error, -ret,
13991                                                RTE_FLOW_ERROR_TYPE_ITEM,
13992                                                NULL,
13993                                                "cannot create eCPRI parser");
13994                        }
13995                        flow_dv_translate_item_ecpri(dev, match_mask,
13996                                                     match_value, items,
13997                                                     last_item);
13998                        /* No other protocol should follow eCPRI layer. */
13999                        last_item = MLX5_FLOW_LAYER_ECPRI;
14000                        break;
14001                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
14002                        flow_dv_translate_item_integrity(items, integrity_items,
14003                                                         &last_item);
14004                        break;
14005                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
14006                        flow_dv_translate_item_aso_ct(dev, match_mask,
14007                                                      match_value, items);
14008                        break;
14009                case RTE_FLOW_ITEM_TYPE_FLEX:
14010                        flow_dv_translate_item_flex(dev, match_mask,
14011                                                    match_value, items,
14012                                                    dev_flow, tunnel != 0);
14013                        last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
14014                                    MLX5_FLOW_ITEM_OUTER_FLEX;
14015                        break;
14016                default:
14017                        break;
14018                }
14019                item_flags |= last_item;
14020        }
14021        /*
14022         * When E-Switch mode is enabled, we have two cases where we need to
14023         * set the source port manually.
14024         * The first one, is in case of NIC ingress steering rule, and the
14025         * second is E-Switch rule where no port_id item was found.
14026         * In both cases the source port is set according the current port
14027         * in use.
14028         */
14029        if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
14030            !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&
14031            !(attr->egress && !attr->transfer)) {
14032                if (flow_dv_translate_item_port_id(dev, match_mask,
14033                                                   match_value, NULL, attr))
14034                        return -rte_errno;
14035        }
14036        if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
14037                flow_dv_translate_item_integrity_post(match_mask, match_value,
14038                                                      integrity_items,
14039                                                      item_flags);
14040        }
14041        if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
14042                flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
14043                                                 tunnel_item, item_flags);
14044        else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
14045                flow_dv_translate_item_geneve(match_mask, match_value,
14046                                              tunnel_item, item_flags);
14047        else if (item_flags & MLX5_FLOW_LAYER_GRE) {
14048                if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
14049                        flow_dv_translate_item_gre(match_mask, match_value,
14050                                                   tunnel_item, item_flags);
14051                else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
14052                        flow_dv_translate_item_nvgre(match_mask, match_value,
14053                                                     tunnel_item, item_flags);
14054                else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
14055                        flow_dv_translate_item_gre_option(match_mask, match_value,
14056                                        tunnel_item, gre_item, item_flags);
14057                else
14058                        MLX5_ASSERT(false);
14059        }
14060#ifdef RTE_LIBRTE_MLX5_DEBUG
14061        MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
14062                                              dev_flow->dv.value.buf));
14063#endif
14064        /*
14065         * Layers may be already initialized from prefix flow if this dev_flow
14066         * is the suffix flow.
14067         */
14068        handle->layers |= item_flags;
14069        if (action_flags & MLX5_FLOW_ACTION_RSS)
14070                flow_dv_hashfields_set(dev_flow->handle->layers,
14071                                       rss_desc,
14072                                       &dev_flow->hash_fields);
14073        /* If has RSS action in the sample action, the Sample/Mirror resource
14074         * should be registered after the hash filed be update.
14075         */
14076        if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
14077                ret = flow_dv_translate_action_sample(dev,
14078                                                      sample,
14079                                                      dev_flow, attr,
14080                                                      &num_of_dest,
14081                                                      sample_actions,
14082                                                      &sample_res,
14083                                                      error);
14084                if (ret < 0)
14085                        return ret;
14086                ret = flow_dv_create_action_sample(dev,
14087                                                   dev_flow,
14088                                                   num_of_dest,
14089                                                   &sample_res,
14090                                                   &mdest_res,
14091                                                   sample_actions,
14092                                                   action_flags,
14093                                                   error);
14094                if (ret < 0)
14095                        return rte_flow_error_set
14096                                                (error, rte_errno,
14097                                                RTE_FLOW_ERROR_TYPE_ACTION,
14098                                                NULL,
14099                                                "cannot create sample action");
14100                if (num_of_dest > 1) {
14101                        dev_flow->dv.actions[sample_act_pos] =
14102                        dev_flow->dv.dest_array_res->action;
14103                } else {
14104                        dev_flow->dv.actions[sample_act_pos] =
14105                        dev_flow->dv.sample_res->verbs_action;
14106                }
14107        }
14108        /*
14109         * For multiple destination (sample action with ratio=1), the encap
14110         * action and port id action will be combined into group action.
14111         * So need remove the original these actions in the flow and only
14112         * use the sample action instead of.
14113         */
14114        if (num_of_dest > 1 &&
14115            (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
14116                int i;
14117                void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
14118
14119                for (i = 0; i < actions_n; i++) {
14120                        if ((sample_act->dr_encap_action &&
14121                                sample_act->dr_encap_action ==
14122                                dev_flow->dv.actions[i]) ||
14123                                (sample_act->dr_port_id_action &&
14124                                sample_act->dr_port_id_action ==
14125                                dev_flow->dv.actions[i]) ||
14126                                (sample_act->dr_jump_action &&
14127                                sample_act->dr_jump_action ==
14128                                dev_flow->dv.actions[i]))
14129                                continue;
14130                        temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
14131                }
14132                memcpy((void *)dev_flow->dv.actions,
14133                                (void *)temp_actions,
14134                                tmp_actions_n * sizeof(void *));
14135                actions_n = tmp_actions_n;
14136        }
14137        dev_flow->dv.actions_n = actions_n;
14138        dev_flow->act_flags = action_flags;
14139        if (wks->skip_matcher_reg)
14140                return 0;
14141        /* Register matcher. */
14142        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14143                                    matcher.mask.size);
14144        matcher.priority = mlx5_get_matcher_priority(dev, attr,
14145                                                     matcher.priority,
14146                                                     dev_flow->external);
14147        /**
14148         * When creating meter drop flow in drop table, using original
14149         * 5-tuple match, the matcher priority should be lower than
14150         * mtr_id matcher.
14151         */
14152        if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
14153            dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
14154            matcher.priority <= MLX5_REG_BITS)
14155                matcher.priority += MLX5_REG_BITS;
14156        /* reserved field no needs to be set to 0 here. */
14157        tbl_key.is_fdb = attr->transfer;
14158        tbl_key.is_egress = attr->egress;
14159        tbl_key.level = dev_flow->dv.group;
14160        tbl_key.id = dev_flow->dv.table_id;
14161        if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
14162                                     tunnel, attr->group, error))
14163                return -rte_errno;
14164        return 0;
14165}
14166
14167/**
14168 * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
14169 * and tunnel.
14170 *
14171 * @param[in, out] action
14172 *   Shred RSS action holding hash RX queue objects.
14173 * @param[in] hash_fields
14174 *   Defines combination of packet fields to participate in RX hash.
14175 * @param[in] tunnel
14176 *   Tunnel type
14177 * @param[in] hrxq_idx
14178 *   Hash RX queue index to set.
14179 *
14180 * @return
14181 *   0 on success, otherwise negative errno value.
14182 */
14183static int
14184__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
14185                              const uint64_t hash_fields,
14186                              uint32_t hrxq_idx)
14187{
14188        uint32_t *hrxqs = action->hrxq;
14189
14190        switch (hash_fields & ~IBV_RX_HASH_INNER) {
14191        case MLX5_RSS_HASH_IPV4:
14192                /* fall-through. */
14193        case MLX5_RSS_HASH_IPV4_DST_ONLY:
14194                /* fall-through. */
14195        case MLX5_RSS_HASH_IPV4_SRC_ONLY:
14196                hrxqs[0] = hrxq_idx;
14197                return 0;
14198        case MLX5_RSS_HASH_IPV4_TCP:
14199                /* fall-through. */
14200        case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
14201                /* fall-through. */
14202        case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
14203                hrxqs[1] = hrxq_idx;
14204                return 0;
14205        case MLX5_RSS_HASH_IPV4_UDP:
14206                /* fall-through. */
14207        case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
14208                /* fall-through. */
14209        case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
14210                hrxqs[2] = hrxq_idx;
14211                return 0;
14212        case MLX5_RSS_HASH_IPV6:
14213                /* fall-through. */
14214        case MLX5_RSS_HASH_IPV6_DST_ONLY:
14215                /* fall-through. */
14216        case MLX5_RSS_HASH_IPV6_SRC_ONLY:
14217                hrxqs[3] = hrxq_idx;
14218                return 0;
14219        case MLX5_RSS_HASH_IPV6_TCP:
14220                /* fall-through. */
14221        case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
14222                /* fall-through. */
14223        case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
14224                hrxqs[4] = hrxq_idx;
14225                return 0;
14226        case MLX5_RSS_HASH_IPV6_UDP:
14227                /* fall-through. */
14228        case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14229                /* fall-through. */
14230        case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14231                hrxqs[5] = hrxq_idx;
14232                return 0;
14233        case MLX5_RSS_HASH_NONE:
14234                hrxqs[6] = hrxq_idx;
14235                return 0;
14236        case MLX5_RSS_HASH_IPV4_ESP:
14237                hrxqs[7] = hrxq_idx;
14238                return 0;
14239        case MLX5_RSS_HASH_IPV6_ESP:
14240                hrxqs[8] = hrxq_idx;
14241                return 0;
14242        case MLX5_RSS_HASH_ESP_SPI:
14243                hrxqs[9] = hrxq_idx;
14244                return 0;
14245        default:
14246                return -1;
14247        }
14248}
14249
14250/**
14251 * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
14252 * and tunnel.
14253 *
14254 * @param[in] dev
14255 *   Pointer to the Ethernet device structure.
14256 * @param[in] idx
14257 *   Shared RSS action ID holding hash RX queue objects.
14258 * @param[in] hash_fields
14259 *   Defines combination of packet fields to participate in RX hash.
14260 * @param[in] tunnel
14261 *   Tunnel type
14262 *
14263 * @return
14264 *   Valid hash RX queue index, otherwise 0.
14265 */
14266uint32_t
14267flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
14268                               const uint64_t hash_fields)
14269{
14270        struct mlx5_priv *priv = dev->data->dev_private;
14271        struct mlx5_shared_action_rss *shared_rss =
14272            mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14273        const uint32_t *hrxqs = shared_rss->hrxq;
14274
14275        switch (hash_fields & ~IBV_RX_HASH_INNER) {
14276        case MLX5_RSS_HASH_IPV4:
14277                /* fall-through. */
14278        case MLX5_RSS_HASH_IPV4_DST_ONLY:
14279                /* fall-through. */
14280        case MLX5_RSS_HASH_IPV4_SRC_ONLY:
14281                return hrxqs[0];
14282        case MLX5_RSS_HASH_IPV4_TCP:
14283                /* fall-through. */
14284        case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
14285                /* fall-through. */
14286        case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
14287                return hrxqs[1];
14288        case MLX5_RSS_HASH_IPV4_UDP:
14289                /* fall-through. */
14290        case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
14291                /* fall-through. */
14292        case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
14293                return hrxqs[2];
14294        case MLX5_RSS_HASH_IPV6:
14295                /* fall-through. */
14296        case MLX5_RSS_HASH_IPV6_DST_ONLY:
14297                /* fall-through. */
14298        case MLX5_RSS_HASH_IPV6_SRC_ONLY:
14299                return hrxqs[3];
14300        case MLX5_RSS_HASH_IPV6_TCP:
14301                /* fall-through. */
14302        case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
14303                /* fall-through. */
14304        case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
14305                return hrxqs[4];
14306        case MLX5_RSS_HASH_IPV6_UDP:
14307                /* fall-through. */
14308        case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14309                /* fall-through. */
14310        case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14311                return hrxqs[5];
14312        case MLX5_RSS_HASH_NONE:
14313                return hrxqs[6];
14314        case MLX5_RSS_HASH_IPV4_ESP:
14315                return hrxqs[7];
14316        case MLX5_RSS_HASH_IPV6_ESP:
14317                return hrxqs[8];
14318        case MLX5_RSS_HASH_ESP_SPI:
14319                return hrxqs[9];
14320        default:
14321                return 0;
14322        }
14323
14324}
14325
14326/**
14327 * Apply the flow to the NIC, lock free,
14328 * (mutex should be acquired by caller).
14329 *
14330 * @param[in] dev
14331 *   Pointer to the Ethernet device structure.
14332 * @param[in, out] flow
14333 *   Pointer to flow structure.
14334 * @param[out] error
14335 *   Pointer to error structure.
14336 *
14337 * @return
14338 *   0 on success, a negative errno value otherwise and rte_errno is set.
14339 */
14340static int
14341flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
14342              struct rte_flow_error *error)
14343{
14344        struct mlx5_flow_dv_workspace *dv;
14345        struct mlx5_flow_handle *dh;
14346        struct mlx5_flow_handle_dv *dv_h;
14347        struct mlx5_flow *dev_flow;
14348        struct mlx5_priv *priv = dev->data->dev_private;
14349        uint32_t handle_idx;
14350        int n;
14351        int err;
14352        int idx;
14353        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
14354        struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
14355        uint8_t misc_mask;
14356
14357        MLX5_ASSERT(wks);
14358        for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
14359                dev_flow = &wks->flows[idx];
14360                dv = &dev_flow->dv;
14361                dh = dev_flow->handle;
14362                dv_h = &dh->dvh;
14363                n = dv->actions_n;
14364                if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
14365                        if (dv->transfer) {
14366                                MLX5_ASSERT(priv->sh->dr_drop_action);
14367                                dv->actions[n++] = priv->sh->dr_drop_action;
14368                        } else {
14369#ifdef HAVE_MLX5DV_DR
14370                                /* DR supports drop action placeholder. */
14371                                MLX5_ASSERT(priv->sh->dr_drop_action);
14372                                dv->actions[n++] = dv->group ?
14373                                        priv->sh->dr_drop_action :
14374                                        priv->root_drop_action;
14375#else
14376                                /* For DV we use the explicit drop queue. */
14377                                MLX5_ASSERT(priv->drop_queue.hrxq);
14378                                dv->actions[n++] =
14379                                                priv->drop_queue.hrxq->action;
14380#endif
14381                        }
14382                } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
14383                           !dv_h->rix_sample && !dv_h->rix_dest_array)) {
14384                        struct mlx5_hrxq *hrxq;
14385                        uint32_t hrxq_idx;
14386
14387                        hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
14388                                                    &hrxq_idx);
14389                        if (!hrxq) {
14390                                rte_flow_error_set
14391                                        (error, rte_errno,
14392                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14393                                         "cannot get hash queue");
14394                                goto error;
14395                        }
14396                        dh->rix_hrxq = hrxq_idx;
14397                        dv->actions[n++] = hrxq->action;
14398                } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14399                        struct mlx5_hrxq *hrxq = NULL;
14400                        uint32_t hrxq_idx;
14401
14402                        hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,
14403                                                rss_desc->shared_rss,
14404                                                dev_flow->hash_fields);
14405                        if (hrxq_idx)
14406                                hrxq = mlx5_ipool_get
14407                                        (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14408                                         hrxq_idx);
14409                        if (!hrxq) {
14410                                rte_flow_error_set
14411                                        (error, rte_errno,
14412                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14413                                         "cannot get hash queue");
14414                                goto error;
14415                        }
14416                        dh->rix_srss = rss_desc->shared_rss;
14417                        dv->actions[n++] = hrxq->action;
14418                } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
14419                        if (!priv->sh->default_miss_action) {
14420                                rte_flow_error_set
14421                                        (error, rte_errno,
14422                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14423                                         "default miss action not be created.");
14424                                goto error;
14425                        }
14426                        dv->actions[n++] = priv->sh->default_miss_action;
14427                }
14428                misc_mask = flow_dv_matcher_enable(dv->value.buf);
14429                __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
14430                err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
14431                                               (void *)&dv->value, n,
14432                                               dv->actions, &dh->drv_flow);
14433                if (err) {
14434                        rte_flow_error_set
14435                                (error, errno,
14436                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14437                                NULL,
14438                                (!priv->sh->config.allow_duplicate_pattern &&
14439                                errno == EEXIST) ?
14440                                "duplicating pattern is not allowed" :
14441                                "hardware refuses to create flow");
14442                        goto error;
14443                }
14444                if (priv->vmwa_context &&
14445                    dh->vf_vlan.tag && !dh->vf_vlan.created) {
14446                        /*
14447                         * The rule contains the VLAN pattern.
14448                         * For VF we are going to create VLAN
14449                         * interface to make hypervisor set correct
14450                         * e-Switch vport context.
14451                         */
14452                        mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14453                }
14454        }
14455        return 0;
14456error:
14457        err = rte_errno; /* Save rte_errno before cleanup. */
14458        SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14459                       handle_idx, dh, next) {
14460                /* hrxq is union, don't clear it if the flag is not set. */
14461                if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14462                        mlx5_hrxq_release(dev, dh->rix_hrxq);
14463                        dh->rix_hrxq = 0;
14464                } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14465                        dh->rix_srss = 0;
14466                }
14467                if (dh->vf_vlan.tag && dh->vf_vlan.created)
14468                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14469        }
14470        rte_errno = err; /* Restore rte_errno. */
14471        return -rte_errno;
14472}
14473
14474void
14475flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14476                          struct mlx5_list_entry *entry)
14477{
14478        struct mlx5_flow_dv_matcher *resource = container_of(entry,
14479                                                             typeof(*resource),
14480                                                             entry);
14481
14482        claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14483        mlx5_free(resource);
14484}
14485
14486/**
14487 * Release the flow matcher.
14488 *
14489 * @param dev
14490 *   Pointer to Ethernet device.
14491 * @param port_id
14492 *   Index to port ID action resource.
14493 *
14494 * @return
14495 *   1 while a reference on it exists, 0 when freed.
14496 */
14497static int
14498flow_dv_matcher_release(struct rte_eth_dev *dev,
14499                        struct mlx5_flow_handle *handle)
14500{
14501        struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14502        struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14503                                                            typeof(*tbl), tbl);
14504        int ret;
14505
14506        MLX5_ASSERT(matcher->matcher_object);
14507        ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14508        flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14509        return ret;
14510}
14511
14512void
14513flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14514{
14515        struct mlx5_dev_ctx_shared *sh = tool_ctx;
14516        struct mlx5_flow_dv_encap_decap_resource *res =
14517                                       container_of(entry, typeof(*res), entry);
14518
14519        claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14520        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14521}
14522
14523/**
14524 * Release an encap/decap resource.
14525 *
14526 * @param dev
14527 *   Pointer to Ethernet device.
14528 * @param encap_decap_idx
14529 *   Index of encap decap resource.
14530 *
14531 * @return
14532 *   1 while a reference on it exists, 0 when freed.
14533 */
14534static int
14535flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14536                                     uint32_t encap_decap_idx)
14537{
14538        struct mlx5_priv *priv = dev->data->dev_private;
14539        struct mlx5_flow_dv_encap_decap_resource *resource;
14540
14541        resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14542                                  encap_decap_idx);
14543        if (!resource)
14544                return 0;
14545        MLX5_ASSERT(resource->action);
14546        return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14547}
14548
14549/**
14550 * Release an jump to table action resource.
14551 *
14552 * @param dev
14553 *   Pointer to Ethernet device.
14554 * @param rix_jump
14555 *   Index to the jump action resource.
14556 *
14557 * @return
14558 *   1 while a reference on it exists, 0 when freed.
14559 */
14560static int
14561flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14562                                  uint32_t rix_jump)
14563{
14564        struct mlx5_priv *priv = dev->data->dev_private;
14565        struct mlx5_flow_tbl_data_entry *tbl_data;
14566
14567        tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14568                                  rix_jump);
14569        if (!tbl_data)
14570                return 0;
14571        return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14572}
14573
14574void
14575flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14576{
14577        struct mlx5_flow_dv_modify_hdr_resource *res =
14578                container_of(entry, typeof(*res), entry);
14579        struct mlx5_dev_ctx_shared *sh = tool_ctx;
14580
14581        claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14582        mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14583}
14584
14585/**
14586 * Release a modify-header resource.
14587 *
14588 * @param dev
14589 *   Pointer to Ethernet device.
14590 * @param handle
14591 *   Pointer to mlx5_flow_handle.
14592 *
14593 * @return
14594 *   1 while a reference on it exists, 0 when freed.
14595 */
14596static int
14597flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14598                                    struct mlx5_flow_handle *handle)
14599{
14600        struct mlx5_priv *priv = dev->data->dev_private;
14601        struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14602
14603        MLX5_ASSERT(entry->action);
14604        return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14605}
14606
14607void
14608flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14609{
14610        struct mlx5_dev_ctx_shared *sh = tool_ctx;
14611        struct mlx5_flow_dv_port_id_action_resource *resource =
14612                                  container_of(entry, typeof(*resource), entry);
14613
14614        claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14615        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14616}
14617
14618/**
14619 * Release port ID action resource.
14620 *
14621 * @param dev
14622 *   Pointer to Ethernet device.
14623 * @param handle
14624 *   Pointer to mlx5_flow_handle.
14625 *
14626 * @return
14627 *   1 while a reference on it exists, 0 when freed.
14628 */
14629static int
14630flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14631                                        uint32_t port_id)
14632{
14633        struct mlx5_priv *priv = dev->data->dev_private;
14634        struct mlx5_flow_dv_port_id_action_resource *resource;
14635
14636        resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14637        if (!resource)
14638                return 0;
14639        MLX5_ASSERT(resource->action);
14640        return mlx5_list_unregister(priv->sh->port_id_action_list,
14641                                    &resource->entry);
14642}
14643
14644/**
14645 * Release shared RSS action resource.
14646 *
14647 * @param dev
14648 *   Pointer to Ethernet device.
14649 * @param srss
14650 *   Shared RSS action index.
14651 */
14652static void
14653flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14654{
14655        struct mlx5_priv *priv = dev->data->dev_private;
14656        struct mlx5_shared_action_rss *shared_rss;
14657
14658        shared_rss = mlx5_ipool_get
14659                        (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14660        __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14661}
14662
14663void
14664flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14665{
14666        struct mlx5_dev_ctx_shared *sh = tool_ctx;
14667        struct mlx5_flow_dv_push_vlan_action_resource *resource =
14668                        container_of(entry, typeof(*resource), entry);
14669
14670        claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14671        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14672}
14673
14674/**
14675 * Release push vlan action resource.
14676 *
14677 * @param dev
14678 *   Pointer to Ethernet device.
14679 * @param handle
14680 *   Pointer to mlx5_flow_handle.
14681 *
14682 * @return
14683 *   1 while a reference on it exists, 0 when freed.
14684 */
14685static int
14686flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14687                                          struct mlx5_flow_handle *handle)
14688{
14689        struct mlx5_priv *priv = dev->data->dev_private;
14690        struct mlx5_flow_dv_push_vlan_action_resource *resource;
14691        uint32_t idx = handle->dvh.rix_push_vlan;
14692
14693        resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14694        if (!resource)
14695                return 0;
14696        MLX5_ASSERT(resource->action);
14697        return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14698                                    &resource->entry);
14699}
14700
14701/**
14702 * Release the fate resource.
14703 *
14704 * @param dev
14705 *   Pointer to Ethernet device.
14706 * @param handle
14707 *   Pointer to mlx5_flow_handle.
14708 */
14709static void
14710flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14711                               struct mlx5_flow_handle *handle)
14712{
14713        if (!handle->rix_fate)
14714                return;
14715        switch (handle->fate_action) {
14716        case MLX5_FLOW_FATE_QUEUE:
14717                if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14718                        mlx5_hrxq_release(dev, handle->rix_hrxq);
14719                break;
14720        case MLX5_FLOW_FATE_JUMP:
14721                flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14722                break;
14723        case MLX5_FLOW_FATE_PORT_ID:
14724                flow_dv_port_id_action_resource_release(dev,
14725                                handle->rix_port_id_action);
14726                break;
14727        default:
14728                DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14729                break;
14730        }
14731        handle->rix_fate = 0;
14732}
14733
14734void
14735flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14736                         struct mlx5_list_entry *entry)
14737{
14738        struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14739                                                              typeof(*resource),
14740                                                              entry);
14741        struct rte_eth_dev *dev = resource->dev;
14742        struct mlx5_priv *priv = dev->data->dev_private;
14743
14744        if (resource->verbs_action)
14745                claim_zero(mlx5_flow_os_destroy_flow_action
14746                                                      (resource->verbs_action));
14747        if (resource->normal_path_tbl)
14748                flow_dv_tbl_resource_release(MLX5_SH(dev),
14749                                             resource->normal_path_tbl);
14750        flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14751        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14752        DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14753}
14754
14755/**
14756 * Release an sample resource.
14757 *
14758 * @param dev
14759 *   Pointer to Ethernet device.
14760 * @param handle
14761 *   Pointer to mlx5_flow_handle.
14762 *
14763 * @return
14764 *   1 while a reference on it exists, 0 when freed.
14765 */
14766static int
14767flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14768                                     struct mlx5_flow_handle *handle)
14769{
14770        struct mlx5_priv *priv = dev->data->dev_private;
14771        struct mlx5_flow_dv_sample_resource *resource;
14772
14773        resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14774                                  handle->dvh.rix_sample);
14775        if (!resource)
14776                return 0;
14777        MLX5_ASSERT(resource->verbs_action);
14778        return mlx5_list_unregister(priv->sh->sample_action_list,
14779                                    &resource->entry);
14780}
14781
14782void
14783flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14784                             struct mlx5_list_entry *entry)
14785{
14786        struct mlx5_flow_dv_dest_array_resource *resource =
14787                        container_of(entry, typeof(*resource), entry);
14788        struct rte_eth_dev *dev = resource->dev;
14789        struct mlx5_priv *priv = dev->data->dev_private;
14790        uint32_t i = 0;
14791
14792        MLX5_ASSERT(resource->action);
14793        if (resource->action)
14794                claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14795        for (; i < resource->num_of_dest; i++)
14796                flow_dv_sample_sub_actions_release(dev,
14797                                                   &resource->sample_idx[i]);
14798        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14799        DRV_LOG(DEBUG, "destination array resource %p: removed",
14800                (void *)resource);
14801}
14802
14803/**
14804 * Release an destination array resource.
14805 *
14806 * @param dev
14807 *   Pointer to Ethernet device.
14808 * @param handle
14809 *   Pointer to mlx5_flow_handle.
14810 *
14811 * @return
14812 *   1 while a reference on it exists, 0 when freed.
14813 */
14814static int
14815flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14816                                    struct mlx5_flow_handle *handle)
14817{
14818        struct mlx5_priv *priv = dev->data->dev_private;
14819        struct mlx5_flow_dv_dest_array_resource *resource;
14820
14821        resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14822                                  handle->dvh.rix_dest_array);
14823        if (!resource)
14824                return 0;
14825        MLX5_ASSERT(resource->action);
14826        return mlx5_list_unregister(priv->sh->dest_array_list,
14827                                    &resource->entry);
14828}
14829
14830static void
14831flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14832{
14833        struct mlx5_priv *priv = dev->data->dev_private;
14834        struct mlx5_dev_ctx_shared *sh = priv->sh;
14835        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14836                                sh->geneve_tlv_option_resource;
14837        rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14838        if (geneve_opt_resource) {
14839                if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14840                                         __ATOMIC_RELAXED))) {
14841                        claim_zero(mlx5_devx_cmd_destroy
14842                                        (geneve_opt_resource->obj));
14843                        mlx5_free(sh->geneve_tlv_option_resource);
14844                        sh->geneve_tlv_option_resource = NULL;
14845                }
14846        }
14847        rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14848}
14849
14850/**
14851 * Remove the flow from the NIC but keeps it in memory.
14852 * Lock free, (mutex should be acquired by caller).
14853 *
14854 * @param[in] dev
14855 *   Pointer to Ethernet device.
14856 * @param[in, out] flow
14857 *   Pointer to flow structure.
14858 */
14859static void
14860flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14861{
14862        struct mlx5_flow_handle *dh;
14863        uint32_t handle_idx;
14864        struct mlx5_priv *priv = dev->data->dev_private;
14865
14866        if (!flow)
14867                return;
14868        handle_idx = flow->dev_handles;
14869        while (handle_idx) {
14870                dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14871                                    handle_idx);
14872                if (!dh)
14873                        return;
14874                if (dh->drv_flow) {
14875                        claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14876                        dh->drv_flow = NULL;
14877                }
14878                if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14879                        flow_dv_fate_resource_release(dev, dh);
14880                if (dh->vf_vlan.tag && dh->vf_vlan.created)
14881                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14882                handle_idx = dh->next.next;
14883        }
14884}
14885
14886/**
14887 * Remove the flow from the NIC and the memory.
14888 * Lock free, (mutex should be acquired by caller).
14889 *
14890 * @param[in] dev
14891 *   Pointer to the Ethernet device structure.
14892 * @param[in, out] flow
14893 *   Pointer to flow structure.
14894 */
14895static void
14896flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14897{
14898        struct mlx5_flow_handle *dev_handle;
14899        struct mlx5_priv *priv = dev->data->dev_private;
14900        struct mlx5_flow_meter_info *fm = NULL;
14901        uint32_t srss = 0;
14902
14903        if (!flow)
14904                return;
14905        flow_dv_remove(dev, flow);
14906        if (flow->counter) {
14907                flow_dv_counter_free(dev, flow->counter);
14908                flow->counter = 0;
14909        }
14910        if (flow->meter) {
14911                fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14912                if (fm)
14913                        mlx5_flow_meter_detach(priv, fm);
14914                flow->meter = 0;
14915        }
14916        /* Keep the current age handling by default. */
14917        if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14918                flow_dv_aso_ct_release(dev, flow->ct, NULL);
14919        else if (flow->age)
14920                flow_dv_aso_age_release(dev, flow->age);
14921        if (flow->geneve_tlv_option) {
14922                flow_dv_geneve_tlv_option_resource_release(dev);
14923                flow->geneve_tlv_option = 0;
14924        }
14925        while (flow->dev_handles) {
14926                uint32_t tmp_idx = flow->dev_handles;
14927
14928                dev_handle = mlx5_ipool_get(priv->sh->ipool
14929                                            [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14930                if (!dev_handle)
14931                        return;
14932                flow->dev_handles = dev_handle->next.next;
14933                while (dev_handle->flex_item) {
14934                        int index = rte_bsf32(dev_handle->flex_item);
14935
14936                        mlx5_flex_release_index(dev, index);
14937                        dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
14938                }
14939                if (dev_handle->dvh.matcher)
14940                        flow_dv_matcher_release(dev, dev_handle);
14941                if (dev_handle->dvh.rix_sample)
14942                        flow_dv_sample_resource_release(dev, dev_handle);
14943                if (dev_handle->dvh.rix_dest_array)
14944                        flow_dv_dest_array_resource_release(dev, dev_handle);
14945                if (dev_handle->dvh.rix_encap_decap)
14946                        flow_dv_encap_decap_resource_release(dev,
14947                                dev_handle->dvh.rix_encap_decap);
14948                if (dev_handle->dvh.modify_hdr)
14949                        flow_dv_modify_hdr_resource_release(dev, dev_handle);
14950                if (dev_handle->dvh.rix_push_vlan)
14951                        flow_dv_push_vlan_action_resource_release(dev,
14952                                                                  dev_handle);
14953                if (dev_handle->dvh.rix_tag)
14954                        flow_dv_tag_release(dev,
14955                                            dev_handle->dvh.rix_tag);
14956                if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14957                        flow_dv_fate_resource_release(dev, dev_handle);
14958                else if (!srss)
14959                        srss = dev_handle->rix_srss;
14960                if (fm && dev_handle->is_meter_flow_id &&
14961                    dev_handle->split_flow_id)
14962                        mlx5_ipool_free(fm->flow_ipool,
14963                                        dev_handle->split_flow_id);
14964                else if (dev_handle->split_flow_id &&
14965                    !dev_handle->is_meter_flow_id)
14966                        mlx5_ipool_free(priv->sh->ipool
14967                                        [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14968                                        dev_handle->split_flow_id);
14969                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14970                           tmp_idx);
14971        }
14972        if (srss)
14973                flow_dv_shared_rss_action_release(dev, srss);
14974}
14975
14976/**
14977 * Release array of hash RX queue objects.
14978 * Helper function.
14979 *
14980 * @param[in] dev
14981 *   Pointer to the Ethernet device structure.
14982 * @param[in, out] hrxqs
14983 *   Array of hash RX queue objects.
14984 *
14985 * @return
14986 *   Total number of references to hash RX queue objects in *hrxqs* array
14987 *   after this operation.
14988 */
14989static int
14990__flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14991                        uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14992{
14993        size_t i;
14994        int remaining = 0;
14995
14996        for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14997                int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14998
14999                if (!ret)
15000                        (*hrxqs)[i] = 0;
15001                remaining += ret;
15002        }
15003        return remaining;
15004}
15005
15006/**
15007 * Release all hash RX queue objects representing shared RSS action.
15008 *
15009 * @param[in] dev
15010 *   Pointer to the Ethernet device structure.
15011 * @param[in, out] action
15012 *   Shared RSS action to remove hash RX queue objects from.
15013 *
15014 * @return
15015 *   Total number of references to hash RX queue objects stored in *action*
15016 *   after this operation.
15017 *   Expected to be 0 if no external references held.
15018 */
15019static int
15020__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
15021                                 struct mlx5_shared_action_rss *shared_rss)
15022{
15023        return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
15024}
15025
15026/**
15027 * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
15028 * user input.
15029 *
15030 * Only one hash value is available for one L3+L4 combination:
15031 * for example:
15032 * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
15033 * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
15034 * same slot in mlx5_rss_hash_fields.
15035 *
15036 * @param[in] orig_rss_types
15037 *   RSS type as provided in shared RSS action.
15038 * @param[in, out] hash_field
15039 *   hash_field variable needed to be adjusted.
15040 *
15041 * @return
15042 *   void
15043 */
15044void
15045flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
15046                                   uint64_t *hash_field)
15047{
15048        uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
15049
15050        switch (*hash_field & ~IBV_RX_HASH_INNER) {
15051        case MLX5_RSS_HASH_IPV4:
15052                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
15053                        *hash_field &= ~MLX5_RSS_HASH_IPV4;
15054                        if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
15055                                *hash_field |= IBV_RX_HASH_DST_IPV4;
15056                        else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
15057                                *hash_field |= IBV_RX_HASH_SRC_IPV4;
15058                        else
15059                                *hash_field |= MLX5_RSS_HASH_IPV4;
15060                }
15061                return;
15062        case MLX5_RSS_HASH_IPV6:
15063                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
15064                        *hash_field &= ~MLX5_RSS_HASH_IPV6;
15065                        if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
15066                                *hash_field |= IBV_RX_HASH_DST_IPV6;
15067                        else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
15068                                *hash_field |= IBV_RX_HASH_SRC_IPV6;
15069                        else
15070                                *hash_field |= MLX5_RSS_HASH_IPV6;
15071                }
15072                return;
15073        case MLX5_RSS_HASH_IPV4_UDP:
15074                /* fall-through. */
15075        case MLX5_RSS_HASH_IPV6_UDP:
15076                if (rss_types & RTE_ETH_RSS_UDP) {
15077                        *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
15078                        if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
15079                                *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
15080                        else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
15081                                *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
15082                        else
15083                                *hash_field |= MLX5_UDP_IBV_RX_HASH;
15084                }
15085                return;
15086        case MLX5_RSS_HASH_IPV4_TCP:
15087                /* fall-through. */
15088        case MLX5_RSS_HASH_IPV6_TCP:
15089                if (rss_types & RTE_ETH_RSS_TCP) {
15090                        *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
15091                        if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
15092                                *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
15093                        else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
15094                                *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
15095                        else
15096                                *hash_field |= MLX5_TCP_IBV_RX_HASH;
15097                }
15098                return;
15099        default:
15100                return;
15101        }
15102}
15103
15104/**
15105 * Setup shared RSS action.
15106 * Prepare set of hash RX queue objects sufficient to handle all valid
15107 * hash_fields combinations (see enum ibv_rx_hash_fields).
15108 *
15109 * @param[in] dev
15110 *   Pointer to the Ethernet device structure.
15111 * @param[in] action_idx
15112 *   Shared RSS action ipool index.
15113 * @param[in, out] action
15114 *   Partially initialized shared RSS action.
15115 * @param[out] error
15116 *   Perform verbose error reporting if not NULL. Initialized in case of
15117 *   error only.
15118 *
15119 * @return
15120 *   0 on success, otherwise negative errno value.
15121 */
15122static int
15123__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
15124                           uint32_t action_idx,
15125                           struct mlx5_shared_action_rss *shared_rss,
15126                           struct rte_flow_error *error)
15127{
15128        struct mlx5_priv *priv = dev->data->dev_private;
15129        struct mlx5_flow_rss_desc rss_desc = { 0 };
15130        size_t i;
15131        int err;
15132
15133        shared_rss->ind_tbl = mlx5_ind_table_obj_new
15134                              (dev, shared_rss->origin.queue,
15135                               shared_rss->origin.queue_num,
15136                               true,
15137                               !!dev->data->dev_started);
15138        if (!shared_rss->ind_tbl)
15139                return rte_flow_error_set(error, rte_errno,
15140                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15141                                          "cannot setup indirection table");
15142        memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
15143        rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
15144        rss_desc.const_q = shared_rss->origin.queue;
15145        rss_desc.queue_num = shared_rss->origin.queue_num;
15146        /* Set non-zero value to indicate a shared RSS. */
15147        rss_desc.shared_rss = action_idx;
15148        rss_desc.ind_tbl = shared_rss->ind_tbl;
15149        if (priv->sh->config.dv_flow_en == 2)
15150                rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;
15151        for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
15152                struct mlx5_hrxq *hrxq;
15153                uint64_t hash_fields = mlx5_rss_hash_fields[i];
15154                int tunnel = 0;
15155
15156                flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
15157                                                   &hash_fields);
15158                if (shared_rss->origin.level > 1) {
15159                        hash_fields |= IBV_RX_HASH_INNER;
15160                        tunnel = 1;
15161                }
15162                rss_desc.tunnel = tunnel;
15163                rss_desc.hash_fields = hash_fields;
15164                hrxq = mlx5_hrxq_get(dev, &rss_desc);
15165                if (!hrxq) {
15166                        rte_flow_error_set
15167                                (error, rte_errno,
15168                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15169                                 "cannot get hash queue");
15170                        goto error_hrxq_new;
15171                }
15172                err = __flow_dv_action_rss_hrxq_set
15173                        (shared_rss, hash_fields, hrxq->idx);
15174                MLX5_ASSERT(!err);
15175        }
15176        return 0;
15177error_hrxq_new:
15178        err = rte_errno;
15179        __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
15180        if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
15181                shared_rss->ind_tbl = NULL;
15182        rte_errno = err;
15183        return -rte_errno;
15184}
15185
15186/**
15187 * Create shared RSS action.
15188 *
15189 * @param[in] dev
15190 *   Pointer to the Ethernet device structure.
15191 * @param[in] conf
15192 *   Shared action configuration.
15193 * @param[in] rss
15194 *   RSS action specification used to create shared action.
15195 * @param[out] error
15196 *   Perform verbose error reporting if not NULL. Initialized in case of
15197 *   error only.
15198 *
15199 * @return
15200 *   A valid shared action ID in case of success, 0 otherwise and
15201 *   rte_errno is set.
15202 */
15203static uint32_t
15204__flow_dv_action_rss_create(struct rte_eth_dev *dev,
15205                            const struct rte_flow_indir_action_conf *conf,
15206                            const struct rte_flow_action_rss *rss,
15207                            struct rte_flow_error *error)
15208{
15209        struct mlx5_priv *priv = dev->data->dev_private;
15210        struct mlx5_shared_action_rss *shared_rss = NULL;
15211        struct rte_flow_action_rss *origin;
15212        const uint8_t *rss_key;
15213        uint32_t idx;
15214
15215        RTE_SET_USED(conf);
15216        shared_rss = mlx5_ipool_zmalloc
15217                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
15218        if (!shared_rss) {
15219                rte_flow_error_set(error, ENOMEM,
15220                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15221                                   "cannot allocate resource memory");
15222                goto error_rss_init;
15223        }
15224        if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
15225                rte_flow_error_set(error, E2BIG,
15226                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15227                                   "rss action number out of range");
15228                goto error_rss_init;
15229        }
15230        origin = &shared_rss->origin;
15231        origin->func = rss->func;
15232        origin->level = rss->level;
15233        /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
15234        origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
15235        /* NULL RSS key indicates default RSS key. */
15236        rss_key = !rss->key ? rss_hash_default_key : rss->key;
15237        memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
15238        origin->key = &shared_rss->key[0];
15239        origin->key_len = MLX5_RSS_HASH_KEY_LEN;
15240        origin->queue = rss->queue;
15241        origin->queue_num = rss->queue_num;
15242        if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
15243                goto error_rss_init;
15244        /* Update queue with indirect table queue memoyr. */
15245        origin->queue = shared_rss->ind_tbl->queues;
15246        rte_spinlock_init(&shared_rss->action_rss_sl);
15247        __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
15248        rte_spinlock_lock(&priv->shared_act_sl);
15249        ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15250                     &priv->rss_shared_actions, idx, shared_rss, next);
15251        rte_spinlock_unlock(&priv->shared_act_sl);
15252        return idx;
15253error_rss_init:
15254        if (shared_rss) {
15255                if (shared_rss->ind_tbl)
15256                        mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15257                                                   !!dev->data->dev_started);
15258                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15259                                idx);
15260        }
15261        return 0;
15262}
15263
15264/**
15265 * Destroy the shared RSS action.
15266 * Release related hash RX queue objects.
15267 *
15268 * @param[in] dev
15269 *   Pointer to the Ethernet device structure.
15270 * @param[in] idx
15271 *   The shared RSS action object ID to be removed.
15272 * @param[out] error
15273 *   Perform verbose error reporting if not NULL. Initialized in case of
15274 *   error only.
15275 *
15276 * @return
15277 *   0 on success, otherwise negative errno value.
15278 */
15279static int
15280__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
15281                             struct rte_flow_error *error)
15282{
15283        struct mlx5_priv *priv = dev->data->dev_private;
15284        struct mlx5_shared_action_rss *shared_rss =
15285            mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15286        uint32_t old_refcnt = 1;
15287        int remaining;
15288
15289        if (!shared_rss)
15290                return rte_flow_error_set(error, EINVAL,
15291                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15292                                          "invalid shared action");
15293        if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
15294                                         0, 0, __ATOMIC_ACQUIRE,
15295                                         __ATOMIC_RELAXED))
15296                return rte_flow_error_set(error, EBUSY,
15297                                          RTE_FLOW_ERROR_TYPE_ACTION,
15298                                          NULL,
15299                                          "shared rss has references");
15300        remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
15301        if (remaining)
15302                return rte_flow_error_set(error, EBUSY,
15303                                          RTE_FLOW_ERROR_TYPE_ACTION,
15304                                          NULL,
15305                                          "shared rss hrxq has references");
15306        remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15307                                               !!dev->data->dev_started);
15308        if (remaining)
15309                return rte_flow_error_set(error, EBUSY,
15310                                          RTE_FLOW_ERROR_TYPE_ACTION,
15311                                          NULL,
15312                                          "shared rss indirection table has"
15313                                          " references");
15314        rte_spinlock_lock(&priv->shared_act_sl);
15315        ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15316                     &priv->rss_shared_actions, idx, shared_rss, next);
15317        rte_spinlock_unlock(&priv->shared_act_sl);
15318        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15319                        idx);
15320        return 0;
15321}
15322
15323/**
15324 * Create indirect action, lock free,
15325 * (mutex should be acquired by caller).
15326 * Dispatcher for action type specific call.
15327 *
15328 * @param[in] dev
15329 *   Pointer to the Ethernet device structure.
15330 * @param[in] conf
15331 *   Shared action configuration.
15332 * @param[in] action
15333 *   Action specification used to create indirect action.
15334 * @param[out] error
15335 *   Perform verbose error reporting if not NULL. Initialized in case of
15336 *   error only.
15337 *
15338 * @return
15339 *   A valid shared action handle in case of success, NULL otherwise and
15340 *   rte_errno is set.
15341 */
15342struct rte_flow_action_handle *
15343flow_dv_action_create(struct rte_eth_dev *dev,
15344                      const struct rte_flow_indir_action_conf *conf,
15345                      const struct rte_flow_action *action,
15346                      struct rte_flow_error *err)
15347{
15348        struct mlx5_priv *priv = dev->data->dev_private;
15349        uint32_t age_idx = 0;
15350        uint32_t idx = 0;
15351        uint32_t ret = 0;
15352
15353        switch (action->type) {
15354        case RTE_FLOW_ACTION_TYPE_RSS:
15355                ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
15356                idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
15357                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15358                break;
15359        case RTE_FLOW_ACTION_TYPE_AGE:
15360                age_idx = flow_dv_aso_age_alloc(dev, err);
15361                if (!age_idx) {
15362                        ret = -rte_errno;
15363                        break;
15364                }
15365                idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
15366                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
15367                flow_dv_aso_age_params_init(dev, age_idx,
15368                                        ((const struct rte_flow_action_age *)
15369                                                action->conf)->context ?
15370                                        ((const struct rte_flow_action_age *)
15371                                                action->conf)->context :
15372                                        (void *)(uintptr_t)idx,
15373                                        ((const struct rte_flow_action_age *)
15374                                                action->conf)->timeout);
15375                ret = age_idx;
15376                break;
15377        case RTE_FLOW_ACTION_TYPE_COUNT:
15378                ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
15379                idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
15380                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15381                break;
15382        case RTE_FLOW_ACTION_TYPE_CONNTRACK:
15383                ret = flow_dv_translate_create_conntrack(dev, action->conf,
15384                                                         err);
15385                idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
15386                break;
15387        default:
15388                rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
15389                                   NULL, "action type not supported");
15390                break;
15391        }
15392        return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
15393}
15394
15395/**
15396 * Destroy the indirect action.
15397 * Release action related resources on the NIC and the memory.
15398 * Lock free, (mutex should be acquired by caller).
15399 * Dispatcher for action type specific call.
15400 *
15401 * @param[in] dev
15402 *   Pointer to the Ethernet device structure.
15403 * @param[in] handle
15404 *   The indirect action object handle to be removed.
15405 * @param[out] error
15406 *   Perform verbose error reporting if not NULL. Initialized in case of
15407 *   error only.
15408 *
15409 * @return
15410 *   0 on success, otherwise negative errno value.
15411 */
15412int
15413flow_dv_action_destroy(struct rte_eth_dev *dev,
15414                       struct rte_flow_action_handle *handle,
15415                       struct rte_flow_error *error)
15416{
15417        uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15418        uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15419        uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15420        struct mlx5_flow_counter *cnt;
15421        uint32_t no_flow_refcnt = 1;
15422        int ret;
15423
15424        switch (type) {
15425        case MLX5_INDIRECT_ACTION_TYPE_RSS:
15426                return __flow_dv_action_rss_release(dev, idx, error);
15427        case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15428                cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15429                if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15430                                                 &no_flow_refcnt, 1, false,
15431                                                 __ATOMIC_ACQUIRE,
15432                                                 __ATOMIC_RELAXED))
15433                        return rte_flow_error_set(error, EBUSY,
15434                                                  RTE_FLOW_ERROR_TYPE_ACTION,
15435                                                  NULL,
15436                                                  "Indirect count action has references");
15437                flow_dv_counter_free(dev, idx);
15438                return 0;
15439        case MLX5_INDIRECT_ACTION_TYPE_AGE:
15440                ret = flow_dv_aso_age_release(dev, idx);
15441                if (ret)
15442                        /*
15443                         * In this case, the last flow has a reference will
15444                         * actually release the age action.
15445                         */
15446                        DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15447                                " released with references %d.", idx, ret);
15448                return 0;
15449        case MLX5_INDIRECT_ACTION_TYPE_CT:
15450                ret = flow_dv_aso_ct_release(dev, idx, error);
15451                if (ret < 0)
15452                        return ret;
15453                if (ret > 0)
15454                        DRV_LOG(DEBUG, "Connection tracking object %u still "
15455                                "has references %d.", idx, ret);
15456                return 0;
15457        default:
15458                return rte_flow_error_set(error, ENOTSUP,
15459                                          RTE_FLOW_ERROR_TYPE_ACTION,
15460                                          NULL,
15461                                          "action type not supported");
15462        }
15463}
15464
15465/**
15466 * Updates in place shared RSS action configuration.
15467 *
15468 * @param[in] dev
15469 *   Pointer to the Ethernet device structure.
15470 * @param[in] idx
15471 *   The shared RSS action object ID to be updated.
15472 * @param[in] action_conf
15473 *   RSS action specification used to modify *shared_rss*.
15474 * @param[out] error
15475 *   Perform verbose error reporting if not NULL. Initialized in case of
15476 *   error only.
15477 *
15478 * @return
15479 *   0 on success, otherwise negative errno value.
15480 * @note: currently only support update of RSS queues.
15481 */
15482static int
15483__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15484                            const struct rte_flow_action_rss *action_conf,
15485                            struct rte_flow_error *error)
15486{
15487        struct mlx5_priv *priv = dev->data->dev_private;
15488        struct mlx5_shared_action_rss *shared_rss =
15489            mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15490        int ret = 0;
15491        void *queue = NULL;
15492        void *queue_i = NULL;
15493        uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15494        bool dev_started = !!dev->data->dev_started;
15495
15496        if (!shared_rss)
15497                return rte_flow_error_set(error, EINVAL,
15498                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15499                                          "invalid shared action to update");
15500        if (priv->obj_ops.ind_table_modify == NULL)
15501                return rte_flow_error_set(error, ENOTSUP,
15502                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15503                                          "cannot modify indirection table");
15504        queue = mlx5_malloc(MLX5_MEM_ZERO,
15505                            RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15506                            0, SOCKET_ID_ANY);
15507        if (!queue)
15508                return rte_flow_error_set(error, ENOMEM,
15509                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15510                                          NULL,
15511                                          "cannot allocate resource memory");
15512        memcpy(queue, action_conf->queue, queue_size);
15513        MLX5_ASSERT(shared_rss->ind_tbl);
15514        rte_spinlock_lock(&shared_rss->action_rss_sl);
15515        queue_i = shared_rss->ind_tbl->queues;
15516        ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15517                                        queue, action_conf->queue_num,
15518                                        true /* standalone */,
15519                                        dev_started /* ref_new_qs */,
15520                                        dev_started /* deref_old_qs */);
15521        if (ret) {
15522                ret = rte_flow_error_set(error, rte_errno,
15523                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15524                                          "cannot update indirection table");
15525        } else {
15526                /* Restore the queue to indirect table internal queue. */
15527                memcpy(queue_i, queue, queue_size);
15528                shared_rss->ind_tbl->queues = queue_i;
15529                shared_rss->origin.queue_num = action_conf->queue_num;
15530        }
15531        mlx5_free(queue);
15532        rte_spinlock_unlock(&shared_rss->action_rss_sl);
15533        return ret;
15534}
15535
15536/*
15537 * Updates in place conntrack context or direction.
15538 * Context update should be synchronized.
15539 *
15540 * @param[in] dev
15541 *   Pointer to the Ethernet device structure.
15542 * @param[in] idx
15543 *   The conntrack object ID to be updated.
15544 * @param[in] update
15545 *   Pointer to the structure of information to update.
15546 * @param[out] error
15547 *   Perform verbose error reporting if not NULL. Initialized in case of
15548 *   error only.
15549 *
15550 * @return
15551 *   0 on success, otherwise negative errno value.
15552 */
15553static int
15554__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15555                           const struct rte_flow_modify_conntrack *update,
15556                           struct rte_flow_error *error)
15557{
15558        struct mlx5_priv *priv = dev->data->dev_private;
15559        struct mlx5_aso_ct_action *ct;
15560        const struct rte_flow_action_conntrack *new_prf;
15561        int ret = 0;
15562        uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15563        uint32_t dev_idx;
15564
15565        if (PORT_ID(priv) != owner)
15566                return rte_flow_error_set(error, EACCES,
15567                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15568                                          NULL,
15569                                          "CT object owned by another port");
15570        dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15571        ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15572        if (!ct->refcnt)
15573                return rte_flow_error_set(error, ENOMEM,
15574                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15575                                          NULL,
15576                                          "CT object is inactive");
15577        new_prf = &update->new_ct;
15578        if (update->direction)
15579                ct->is_original = !!new_prf->is_original_dir;
15580        if (update->state) {
15581                /* Only validate the profile when it needs to be updated. */
15582                ret = mlx5_validate_action_ct(dev, new_prf, error);
15583                if (ret)
15584                        return ret;
15585                ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15586                if (ret)
15587                        return rte_flow_error_set(error, EIO,
15588                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15589                                        NULL,
15590                                        "Failed to send CT context update WQE");
15591                /* Block until ready or a failure. */
15592                ret = mlx5_aso_ct_available(priv->sh, ct);
15593                if (ret)
15594                        rte_flow_error_set(error, rte_errno,
15595                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15596                                           NULL,
15597                                           "Timeout to get the CT update");
15598        }
15599        return ret;
15600}
15601
15602/**
15603 * Updates in place shared action configuration, lock free,
15604 * (mutex should be acquired by caller).
15605 *
15606 * @param[in] dev
15607 *   Pointer to the Ethernet device structure.
15608 * @param[in] handle
15609 *   The indirect action object handle to be updated.
15610 * @param[in] update
15611 *   Action specification used to modify the action pointed by *handle*.
15612 *   *update* could be of same type with the action pointed by the *handle*
15613 *   handle argument, or some other structures like a wrapper, depending on
15614 *   the indirect action type.
15615 * @param[out] error
15616 *   Perform verbose error reporting if not NULL. Initialized in case of
15617 *   error only.
15618 *
15619 * @return
15620 *   0 on success, otherwise negative errno value.
15621 */
15622int
15623flow_dv_action_update(struct rte_eth_dev *dev,
15624                        struct rte_flow_action_handle *handle,
15625                        const void *update,
15626                        struct rte_flow_error *err)
15627{
15628        uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15629        uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15630        uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15631        const void *action_conf;
15632
15633        switch (type) {
15634        case MLX5_INDIRECT_ACTION_TYPE_RSS:
15635                action_conf = ((const struct rte_flow_action *)update)->conf;
15636                return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15637        case MLX5_INDIRECT_ACTION_TYPE_CT:
15638                return __flow_dv_action_ct_update(dev, idx, update, err);
15639        default:
15640                return rte_flow_error_set(err, ENOTSUP,
15641                                          RTE_FLOW_ERROR_TYPE_ACTION,
15642                                          NULL,
15643                                          "action type update not supported");
15644        }
15645}
15646
15647/**
15648 * Destroy the meter sub policy table rules.
15649 * Lock free, (mutex should be acquired by caller).
15650 *
15651 * @param[in] dev
15652 *   Pointer to Ethernet device.
15653 * @param[in] sub_policy
15654 *   Pointer to meter sub policy table.
15655 */
15656static void
15657__flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15658                             struct mlx5_flow_meter_sub_policy *sub_policy)
15659{
15660        struct mlx5_priv *priv = dev->data->dev_private;
15661        struct mlx5_flow_tbl_data_entry *tbl;
15662        struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15663        struct mlx5_flow_meter_info *next_fm;
15664        struct mlx5_sub_policy_color_rule *color_rule;
15665        void *tmp;
15666        uint32_t i;
15667
15668        for (i = 0; i < RTE_COLORS; i++) {
15669                next_fm = NULL;
15670                if (i <= RTE_COLOR_YELLOW && policy &&
15671                    policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15672                        next_fm = mlx5_flow_meter_find(priv,
15673                                        policy->act_cnt[i].next_mtr_id, NULL);
15674                RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15675                                   next_port, tmp) {
15676                        claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15677                        tbl = container_of(color_rule->matcher->tbl,
15678                                           typeof(*tbl), tbl);
15679                        mlx5_list_unregister(tbl->matchers,
15680                                             &color_rule->matcher->entry);
15681                        TAILQ_REMOVE(&sub_policy->color_rules[i],
15682                                     color_rule, next_port);
15683                        mlx5_free(color_rule);
15684                        if (next_fm)
15685                                mlx5_flow_meter_detach(priv, next_fm);
15686                }
15687        }
15688        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15689                if (sub_policy->rix_hrxq[i]) {
15690                        if (policy && !policy->is_hierarchy)
15691                                mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15692                        sub_policy->rix_hrxq[i] = 0;
15693                }
15694                if (sub_policy->jump_tbl[i]) {
15695                        flow_dv_tbl_resource_release(MLX5_SH(dev),
15696                                                     sub_policy->jump_tbl[i]);
15697                        sub_policy->jump_tbl[i] = NULL;
15698                }
15699        }
15700        if (sub_policy->tbl_rsc) {
15701                flow_dv_tbl_resource_release(MLX5_SH(dev),
15702                                             sub_policy->tbl_rsc);
15703                sub_policy->tbl_rsc = NULL;
15704        }
15705}
15706
15707/**
15708 * Destroy policy rules, lock free,
15709 * (mutex should be acquired by caller).
15710 * Dispatcher for action type specific call.
15711 *
15712 * @param[in] dev
15713 *   Pointer to the Ethernet device structure.
15714 * @param[in] mtr_policy
15715 *   Meter policy struct.
15716 */
15717static void
15718flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15719                             struct mlx5_flow_meter_policy *mtr_policy)
15720{
15721        uint32_t i, j;
15722        struct mlx5_flow_meter_sub_policy *sub_policy;
15723        uint16_t sub_policy_num;
15724
15725        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15726                sub_policy_num = (mtr_policy->sub_policy_num >>
15727                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15728                        MLX5_MTR_SUB_POLICY_NUM_MASK;
15729                for (j = 0; j < sub_policy_num; j++) {
15730                        sub_policy = mtr_policy->sub_policys[i][j];
15731                        if (sub_policy)
15732                                __flow_dv_destroy_sub_policy_rules(dev,
15733                                                                   sub_policy);
15734                }
15735        }
15736}
15737
15738/**
15739 * Destroy policy action, lock free,
15740 * (mutex should be acquired by caller).
15741 * Dispatcher for action type specific call.
15742 *
15743 * @param[in] dev
15744 *   Pointer to the Ethernet device structure.
15745 * @param[in] mtr_policy
15746 *   Meter policy struct.
15747 */
15748static void
15749flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15750                      struct mlx5_flow_meter_policy *mtr_policy)
15751{
15752        struct rte_flow_action *rss_action;
15753        struct mlx5_flow_handle dev_handle;
15754        uint32_t i, j;
15755
15756        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15757                if (mtr_policy->act_cnt[i].rix_mark) {
15758                        flow_dv_tag_release(dev,
15759                                mtr_policy->act_cnt[i].rix_mark);
15760                        mtr_policy->act_cnt[i].rix_mark = 0;
15761                }
15762                if (mtr_policy->act_cnt[i].modify_hdr) {
15763                        dev_handle.dvh.modify_hdr =
15764                                mtr_policy->act_cnt[i].modify_hdr;
15765                        flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15766                }
15767                switch (mtr_policy->act_cnt[i].fate_action) {
15768                case MLX5_FLOW_FATE_SHARED_RSS:
15769                        rss_action = mtr_policy->act_cnt[i].rss;
15770                        mlx5_free(rss_action);
15771                        break;
15772                case MLX5_FLOW_FATE_PORT_ID:
15773                        if (mtr_policy->act_cnt[i].rix_port_id_action) {
15774                                flow_dv_port_id_action_resource_release(dev,
15775                                mtr_policy->act_cnt[i].rix_port_id_action);
15776                                mtr_policy->act_cnt[i].rix_port_id_action = 0;
15777                        }
15778                        break;
15779                case MLX5_FLOW_FATE_DROP:
15780                case MLX5_FLOW_FATE_JUMP:
15781                        for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15782                                mtr_policy->act_cnt[i].dr_jump_action[j] =
15783                                                NULL;
15784                        break;
15785                default:
15786                        /*Queue action do nothing*/
15787                        break;
15788                }
15789        }
15790        for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15791                mtr_policy->dr_drop_action[j] = NULL;
15792}
15793
15794/**
15795 * Create yellow action for color aware meter.
15796 *
15797 * @param[in] dev
15798 *   Pointer to the Ethernet device structure.
15799 * @param[in] fm
15800 *   Meter information table.
15801 * @param[out] error
15802 *   Perform verbose error reporting if not NULL. Initialized in case of
15803 *   error only.
15804 *
15805 * @return
15806 *   0 on success, a negative errno value otherwise and rte_errno is set.
15807 */
15808static int
15809__flow_dv_create_mtr_yellow_action(struct rte_eth_dev *dev,
15810                                   struct mlx5_flow_meter_info *fm,
15811                                   struct rte_mtr_error *error)
15812{
15813#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
15814        struct mlx5_priv *priv = dev->data->dev_private;
15815        struct rte_flow_error flow_err;
15816        struct mlx5_aso_mtr *aso_mtr;
15817        struct mlx5_aso_mtr_pool *pool;
15818        uint8_t reg_id;
15819
15820        aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
15821        pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool, mtrs[aso_mtr->offset]);
15822        reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15823        fm->meter_action_y =
15824                mlx5_glue->dv_create_flow_action_aso(priv->sh->rx_domain,
15825                                                     pool->devx_obj->obj,
15826                                                     aso_mtr->offset,
15827                                                     (1 << MLX5_FLOW_COLOR_YELLOW),
15828                                                     reg_id - REG_C_0);
15829#else
15830        RTE_SET_USED(dev);
15831#endif
15832        if (!fm->meter_action_y) {
15833                return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15834                                          "Fail to create yellow meter action.");
15835        }
15836        return 0;
15837}
15838
15839/**
15840 * Create policy action per domain, lock free,
15841 * (mutex should be acquired by caller).
15842 * Dispatcher for action type specific call.
15843 *
15844 * @param[in] dev
15845 *   Pointer to the Ethernet device structure.
15846 * @param[in] mtr_policy
15847 *   Meter policy struct.
15848 * @param[in] action
15849 *   Action specification used to create meter actions.
15850 * @param[in] attr
15851 *   Pointer to the flow attributes.
15852 * @param[out] error
15853 *   Perform verbose error reporting if not NULL. Initialized in case of
15854 *   error only.
15855 *
15856 * @return
15857 *   0 on success, otherwise negative errno value.
15858 */
15859static int
15860__flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15861                        struct mlx5_flow_meter_policy *mtr_policy,
15862                        const struct rte_flow_action *actions[RTE_COLORS],
15863                        struct rte_flow_attr *attr,
15864                        enum mlx5_meter_domain domain,
15865                        struct rte_mtr_error *error)
15866{
15867        struct mlx5_priv *priv = dev->data->dev_private;
15868        struct rte_flow_error flow_err;
15869        const struct rte_flow_action *act;
15870        uint64_t action_flags;
15871        struct mlx5_flow_handle dh;
15872        struct mlx5_flow dev_flow;
15873        struct mlx5_flow_dv_port_id_action_resource port_id_action;
15874        int i, ret;
15875        uint8_t egress, transfer;
15876        struct mlx5_meter_policy_action_container *act_cnt = NULL;
15877        union {
15878                struct mlx5_flow_dv_modify_hdr_resource res;
15879                uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15880                            sizeof(struct mlx5_modification_cmd) *
15881                            (MLX5_MAX_MODIFY_NUM + 1)];
15882        } mhdr_dummy;
15883        struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15884
15885        egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15886        transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15887        memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15888        memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15889        memset(&port_id_action, 0,
15890               sizeof(struct mlx5_flow_dv_port_id_action_resource));
15891        memset(mhdr_res, 0, sizeof(*mhdr_res));
15892        mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15893                                       (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15894                                        MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15895        dev_flow.handle = &dh;
15896        dev_flow.dv.port_id_action = &port_id_action;
15897        dev_flow.external = true;
15898        for (i = 0; i < RTE_COLORS; i++) {
15899                if (i < MLX5_MTR_RTE_COLORS)
15900                        act_cnt = &mtr_policy->act_cnt[i];
15901                /* Skip the color policy actions creation. */
15902                if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15903                    (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15904                        continue;
15905                action_flags = 0;
15906                for (act = actions[i];
15907                     act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15908                        switch (act->type) {
15909                        case RTE_FLOW_ACTION_TYPE_MARK:
15910                        {
15911                                uint32_t tag_be = mlx5_flow_mark_set
15912                                        (((const struct rte_flow_action_mark *)
15913                                        (act->conf))->id);
15914
15915                                if (i >= MLX5_MTR_RTE_COLORS)
15916                                        return -rte_mtr_error_set(error,
15917                                          ENOTSUP,
15918                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
15919                                          NULL,
15920                                          "cannot create policy "
15921                                          "mark action for this color");
15922                                if (flow_dv_tag_resource_register(dev, tag_be,
15923                                                  &dev_flow, &flow_err))
15924                                        return -rte_mtr_error_set(error,
15925                                        ENOTSUP,
15926                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
15927                                        NULL,
15928                                        "cannot setup policy mark action");
15929                                MLX5_ASSERT(dev_flow.dv.tag_resource);
15930                                act_cnt->rix_mark =
15931                                        dev_flow.handle->dvh.rix_tag;
15932                                action_flags |= MLX5_FLOW_ACTION_MARK;
15933                                mtr_policy->mark = 1;
15934                                break;
15935                        }
15936                        case RTE_FLOW_ACTION_TYPE_SET_TAG:
15937                                if (i >= MLX5_MTR_RTE_COLORS)
15938                                        return -rte_mtr_error_set(error,
15939                                          ENOTSUP,
15940                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
15941                                          NULL,
15942                                          "cannot create policy "
15943                                          "set tag action for this color");
15944                                if (flow_dv_convert_action_set_tag
15945                                (dev, mhdr_res,
15946                                (const struct rte_flow_action_set_tag *)
15947                                act->conf,  &flow_err))
15948                                        return -rte_mtr_error_set(error,
15949                                        ENOTSUP,
15950                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
15951                                        NULL, "cannot convert policy "
15952                                        "set tag action");
15953                                if (!mhdr_res->actions_num)
15954                                        return -rte_mtr_error_set(error,
15955                                        ENOTSUP,
15956                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
15957                                        NULL, "cannot find policy "
15958                                        "set tag action");
15959                                action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15960                                break;
15961                        case RTE_FLOW_ACTION_TYPE_DROP:
15962                        {
15963                                struct mlx5_flow_mtr_mng *mtrmng =
15964                                                priv->sh->mtrmng;
15965                                struct mlx5_flow_tbl_data_entry *tbl_data;
15966
15967                                /*
15968                                 * Create the drop table with
15969                                 * METER DROP level.
15970                                 */
15971                                if (!mtrmng->drop_tbl[domain]) {
15972                                        mtrmng->drop_tbl[domain] =
15973                                        flow_dv_tbl_resource_get(dev,
15974                                        MLX5_FLOW_TABLE_LEVEL_METER,
15975                                        egress, transfer, false, NULL, 0,
15976                                        0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15977                                        if (!mtrmng->drop_tbl[domain])
15978                                                return -rte_mtr_error_set
15979                                        (error, ENOTSUP,
15980                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
15981                                        NULL,
15982                                        "Failed to create meter drop table");
15983                                }
15984                                tbl_data = container_of
15985                                (mtrmng->drop_tbl[domain],
15986                                struct mlx5_flow_tbl_data_entry, tbl);
15987                                if (i < MLX5_MTR_RTE_COLORS) {
15988                                        act_cnt->dr_jump_action[domain] =
15989                                                tbl_data->jump.action;
15990                                        act_cnt->fate_action =
15991                                                MLX5_FLOW_FATE_DROP;
15992                                }
15993                                if (i == RTE_COLOR_RED)
15994                                        mtr_policy->dr_drop_action[domain] =
15995                                                tbl_data->jump.action;
15996                                action_flags |= MLX5_FLOW_ACTION_DROP;
15997                                break;
15998                        }
15999                        case RTE_FLOW_ACTION_TYPE_QUEUE:
16000                        {
16001                                if (i >= MLX5_MTR_RTE_COLORS)
16002                                        return -rte_mtr_error_set(error,
16003                                        ENOTSUP,
16004                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16005                                        NULL, "cannot create policy "
16006                                        "fate queue for this color");
16007                                act_cnt->queue =
16008                                ((const struct rte_flow_action_queue *)
16009                                        (act->conf))->index;
16010                                act_cnt->fate_action =
16011                                        MLX5_FLOW_FATE_QUEUE;
16012                                dev_flow.handle->fate_action =
16013                                        MLX5_FLOW_FATE_QUEUE;
16014                                mtr_policy->is_queue = 1;
16015                                action_flags |= MLX5_FLOW_ACTION_QUEUE;
16016                                break;
16017                        }
16018                        case RTE_FLOW_ACTION_TYPE_RSS:
16019                        {
16020                                int rss_size;
16021
16022                                if (i >= MLX5_MTR_RTE_COLORS)
16023                                        return -rte_mtr_error_set(error,
16024                                          ENOTSUP,
16025                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16026                                          NULL,
16027                                          "cannot create policy "
16028                                          "rss action for this color");
16029                                /*
16030                                 * Save RSS conf into policy struct
16031                                 * for translate stage.
16032                                 */
16033                                rss_size = (int)rte_flow_conv
16034                                        (RTE_FLOW_CONV_OP_ACTION,
16035                                        NULL, 0, act, &flow_err);
16036                                if (rss_size <= 0)
16037                                        return -rte_mtr_error_set(error,
16038                                          ENOTSUP,
16039                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16040                                          NULL, "Get the wrong "
16041                                          "rss action struct size");
16042                                act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
16043                                                rss_size, 0, SOCKET_ID_ANY);
16044                                if (!act_cnt->rss)
16045                                        return -rte_mtr_error_set(error,
16046                                          ENOTSUP,
16047                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16048                                          NULL,
16049                                          "Fail to malloc rss action memory");
16050                                ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
16051                                        act_cnt->rss, rss_size,
16052                                        act, &flow_err);
16053                                if (ret < 0)
16054                                        return -rte_mtr_error_set(error,
16055                                          ENOTSUP,
16056                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16057                                          NULL, "Fail to save "
16058                                          "rss action into policy struct");
16059                                act_cnt->fate_action =
16060                                        MLX5_FLOW_FATE_SHARED_RSS;
16061                                action_flags |= MLX5_FLOW_ACTION_RSS;
16062                                break;
16063                        }
16064                        case RTE_FLOW_ACTION_TYPE_PORT_ID:
16065                        case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
16066                        {
16067                                struct mlx5_flow_dv_port_id_action_resource
16068                                        port_id_resource;
16069                                uint32_t port_id = 0;
16070
16071                                if (i >= MLX5_MTR_RTE_COLORS)
16072                                        return -rte_mtr_error_set(error,
16073                                        ENOTSUP,
16074                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16075                                        NULL, "cannot create policy "
16076                                        "port action for this color");
16077                                memset(&port_id_resource, 0,
16078                                        sizeof(port_id_resource));
16079                                if (flow_dv_translate_action_port_id(dev, act,
16080                                                &port_id, &flow_err))
16081                                        return -rte_mtr_error_set(error,
16082                                        ENOTSUP,
16083                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16084                                        NULL, "cannot translate "
16085                                        "policy port action");
16086                                port_id_resource.port_id = port_id;
16087                                if (flow_dv_port_id_action_resource_register
16088                                        (dev, &port_id_resource,
16089                                        &dev_flow, &flow_err))
16090                                        return -rte_mtr_error_set(error,
16091                                        ENOTSUP,
16092                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16093                                        NULL, "cannot setup "
16094                                        "policy port action");
16095                                act_cnt->rix_port_id_action =
16096                                        dev_flow.handle->rix_port_id_action;
16097                                act_cnt->fate_action =
16098                                        MLX5_FLOW_FATE_PORT_ID;
16099                                action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16100                                break;
16101                        }
16102                        case RTE_FLOW_ACTION_TYPE_JUMP:
16103                        {
16104                                uint32_t jump_group = 0;
16105                                uint32_t table = 0;
16106                                struct mlx5_flow_tbl_data_entry *tbl_data;
16107                                struct flow_grp_info grp_info = {
16108                                        .external = !!dev_flow.external,
16109                                        .transfer = !!transfer,
16110                                        .fdb_def_rule = !!priv->fdb_def_rule,
16111                                        .std_tbl_fix = 0,
16112                                        .skip_scale = dev_flow.skip_scale &
16113                                        (1 << MLX5_SCALE_FLOW_GROUP_BIT),
16114                                };
16115                                struct mlx5_flow_meter_sub_policy *sub_policy =
16116                                        mtr_policy->sub_policys[domain][0];
16117
16118                                if (i >= MLX5_MTR_RTE_COLORS)
16119                                        return -rte_mtr_error_set(error,
16120                                          ENOTSUP,
16121                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16122                                          NULL,
16123                                          "cannot create policy "
16124                                          "jump action for this color");
16125                                jump_group =
16126                                ((const struct rte_flow_action_jump *)
16127                                                        act->conf)->group;
16128                                if (mlx5_flow_group_to_table(dev, NULL,
16129                                                       jump_group,
16130                                                       &table,
16131                                                       &grp_info, &flow_err))
16132                                        return -rte_mtr_error_set(error,
16133                                        ENOTSUP,
16134                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16135                                        NULL, "cannot setup "
16136                                        "policy jump action");
16137                                sub_policy->jump_tbl[i] =
16138                                flow_dv_tbl_resource_get(dev,
16139                                        table, egress,
16140                                        transfer,
16141                                        !!dev_flow.external,
16142                                        NULL, jump_group, 0,
16143                                        0, &flow_err);
16144                                if
16145                                (!sub_policy->jump_tbl[i])
16146                                        return  -rte_mtr_error_set(error,
16147                                        ENOTSUP,
16148                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16149                                        NULL, "cannot create jump action.");
16150                                tbl_data = container_of
16151                                (sub_policy->jump_tbl[i],
16152                                struct mlx5_flow_tbl_data_entry, tbl);
16153                                act_cnt->dr_jump_action[domain] =
16154                                        tbl_data->jump.action;
16155                                act_cnt->fate_action =
16156                                        MLX5_FLOW_FATE_JUMP;
16157                                action_flags |= MLX5_FLOW_ACTION_JUMP;
16158                                break;
16159                        }
16160                        case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
16161                        {
16162                                if (i >= MLX5_MTR_RTE_COLORS)
16163                                        return -rte_mtr_error_set(error,
16164                                          ENOTSUP,
16165                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16166                                          NULL,
16167                                          "cannot create policy modify field for this color");
16168                                if (flow_dv_convert_action_modify_field
16169                                        (dev, mhdr_res, act, attr, &flow_err))
16170                                        return -rte_mtr_error_set(error,
16171                                        ENOTSUP,
16172                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16173                                        NULL, "cannot setup policy modify field action");
16174                                if (!mhdr_res->actions_num)
16175                                        return -rte_mtr_error_set(error,
16176                                        ENOTSUP,
16177                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
16178                                        NULL, "cannot find policy modify field action");
16179                                action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
16180                                break;
16181                        }
16182                        /*
16183                         * No need to check meter hierarchy for R colors
16184                         * here since it is done in the validation stage.
16185                         */
16186                        case RTE_FLOW_ACTION_TYPE_METER:
16187                        {
16188                                const struct rte_flow_action_meter *mtr;
16189                                struct mlx5_flow_meter_info *next_fm;
16190                                struct mlx5_flow_meter_policy *next_policy;
16191                                struct rte_flow_action tag_action;
16192                                struct mlx5_rte_flow_action_set_tag set_tag;
16193                                uint32_t next_mtr_idx = 0;
16194
16195                                mtr = act->conf;
16196                                next_fm = mlx5_flow_meter_find(priv,
16197                                                        mtr->mtr_id,
16198                                                        &next_mtr_idx);
16199                                if (!next_fm)
16200                                        return -rte_mtr_error_set(error, EINVAL,
16201                                                RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16202                                                "Fail to find next meter.");
16203                                if (next_fm->def_policy)
16204                                        return -rte_mtr_error_set(error, EINVAL,
16205                                                RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16206                                "Hierarchy only supports termination meter.");
16207                                next_policy = mlx5_flow_meter_policy_find(dev,
16208                                                next_fm->policy_id, NULL);
16209                                MLX5_ASSERT(next_policy);
16210                                if (next_fm->drop_cnt) {
16211                                        set_tag.id =
16212                                                (enum modify_reg)
16213                                                mlx5_flow_get_reg_id(dev,
16214                                                MLX5_MTR_ID,
16215                                                0,
16216                                                (struct rte_flow_error *)error);
16217                                        set_tag.offset = (priv->mtr_reg_share ?
16218                                                MLX5_MTR_COLOR_BITS : 0);
16219                                        set_tag.length = (priv->mtr_reg_share ?
16220                                               MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
16221                                               MLX5_REG_BITS);
16222                                        set_tag.data = next_mtr_idx;
16223                                        tag_action.type =
16224                                                (enum rte_flow_action_type)
16225                                                MLX5_RTE_FLOW_ACTION_TYPE_TAG;
16226                                        tag_action.conf = &set_tag;
16227                                        if (flow_dv_convert_action_set_reg
16228                                                (mhdr_res, &tag_action,
16229                                                (struct rte_flow_error *)error))
16230                                                return -rte_errno;
16231                                        action_flags |=
16232                                                MLX5_FLOW_ACTION_SET_TAG;
16233                                }
16234                                if (i == RTE_COLOR_YELLOW && next_fm->color_aware &&
16235                                    !next_fm->meter_action_y)
16236                                        if (__flow_dv_create_mtr_yellow_action(dev, next_fm, error))
16237                                                return -rte_errno;
16238                                act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
16239                                act_cnt->next_mtr_id = next_fm->meter_id;
16240                                act_cnt->next_sub_policy = NULL;
16241                                mtr_policy->is_hierarchy = 1;
16242                                mtr_policy->dev = next_policy->dev;
16243                                if (next_policy->mark)
16244                                        mtr_policy->mark = 1;
16245                                action_flags |=
16246                                MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
16247                                break;
16248                        }
16249                        default:
16250                                return -rte_mtr_error_set(error, ENOTSUP,
16251                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
16252                                          NULL, "action type not supported");
16253                        }
16254                        if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) ||
16255                            (action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD)) {
16256                                /* create modify action if needed. */
16257                                dev_flow.dv.group = 1;
16258                                if (flow_dv_modify_hdr_resource_register
16259                                        (dev, mhdr_res, &dev_flow, &flow_err))
16260                                        return -rte_mtr_error_set(error,
16261                                                ENOTSUP,
16262                                                RTE_MTR_ERROR_TYPE_METER_POLICY,
16263                                                NULL, "cannot register policy set tag/modify field action");
16264                                act_cnt->modify_hdr =
16265                                        dev_flow.handle->dvh.modify_hdr;
16266                        }
16267                }
16268        }
16269        return 0;
16270}
16271
16272/**
16273 * Create policy action per domain, lock free,
16274 * (mutex should be acquired by caller).
16275 * Dispatcher for action type specific call.
16276 *
16277 * @param[in] dev
16278 *   Pointer to the Ethernet device structure.
16279 * @param[in] mtr_policy
16280 *   Meter policy struct.
16281 * @param[in] action
16282 *   Action specification used to create meter actions.
16283 * @param[in] attr
16284 *   Pointer to the flow attributes.
16285 * @param[out] error
16286 *   Perform verbose error reporting if not NULL. Initialized in case of
16287 *   error only.
16288 *
16289 * @return
16290 *   0 on success, otherwise negative errno value.
16291 */
16292static int
16293flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
16294                      struct mlx5_flow_meter_policy *mtr_policy,
16295                      const struct rte_flow_action *actions[RTE_COLORS],
16296                      struct rte_flow_attr *attr,
16297                      struct rte_mtr_error *error)
16298{
16299        int ret, i;
16300        uint16_t sub_policy_num;
16301
16302        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16303                sub_policy_num = (mtr_policy->sub_policy_num >>
16304                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16305                        MLX5_MTR_SUB_POLICY_NUM_MASK;
16306                if (sub_policy_num) {
16307                        ret = __flow_dv_create_domain_policy_acts(dev,
16308                                mtr_policy, actions, attr,
16309                                (enum mlx5_meter_domain)i, error);
16310                        /* Cleaning resource is done in the caller level. */
16311                        if (ret)
16312                                return ret;
16313                }
16314        }
16315        return 0;
16316}
16317
16318/**
16319 * Query a DV flow rule for its statistics via DevX.
16320 *
16321 * @param[in] dev
16322 *   Pointer to Ethernet device.
16323 * @param[in] cnt_idx
16324 *   Index to the flow counter.
16325 * @param[out] data
16326 *   Data retrieved by the query.
16327 * @param[out] error
16328 *   Perform verbose error reporting if not NULL.
16329 *
16330 * @return
16331 *   0 on success, a negative errno value otherwise and rte_errno is set.
16332 */
16333static int
16334flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
16335                    struct rte_flow_error *error)
16336{
16337        struct mlx5_priv *priv = dev->data->dev_private;
16338        struct rte_flow_query_count *qc = data;
16339
16340        if (!priv->sh->cdev->config.devx)
16341                return rte_flow_error_set(error, ENOTSUP,
16342                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16343                                          NULL,
16344                                          "counters are not supported");
16345        if (cnt_idx) {
16346                uint64_t pkts, bytes;
16347                struct mlx5_flow_counter *cnt;
16348                int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
16349
16350                if (err)
16351                        return rte_flow_error_set(error, -err,
16352                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16353                                        NULL, "cannot read counters");
16354                cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
16355                qc->hits_set = 1;
16356                qc->bytes_set = 1;
16357                qc->hits = pkts - cnt->hits;
16358                qc->bytes = bytes - cnt->bytes;
16359                if (qc->reset) {
16360                        cnt->hits = pkts;
16361                        cnt->bytes = bytes;
16362                }
16363                return 0;
16364        }
16365        return rte_flow_error_set(error, EINVAL,
16366                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16367                                  NULL,
16368                                  "counters are not available");
16369}
16370
16371int
16372flow_dv_action_query(struct rte_eth_dev *dev,
16373                     const struct rte_flow_action_handle *handle, void *data,
16374                     struct rte_flow_error *error)
16375{
16376        struct mlx5_age_param *age_param;
16377        struct rte_flow_query_age *resp;
16378        uint32_t act_idx = (uint32_t)(uintptr_t)handle;
16379        uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
16380        uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
16381        struct mlx5_priv *priv = dev->data->dev_private;
16382        struct mlx5_aso_ct_action *ct;
16383        uint16_t owner;
16384        uint32_t dev_idx;
16385
16386        switch (type) {
16387        case MLX5_INDIRECT_ACTION_TYPE_AGE:
16388                age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
16389                resp = data;
16390                resp->aged = __atomic_load_n(&age_param->state,
16391                                              __ATOMIC_RELAXED) == AGE_TMOUT ?
16392                                                                          1 : 0;
16393                resp->sec_since_last_hit_valid = !resp->aged;
16394                if (resp->sec_since_last_hit_valid)
16395                        resp->sec_since_last_hit = __atomic_load_n
16396                             (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16397                return 0;
16398        case MLX5_INDIRECT_ACTION_TYPE_COUNT:
16399                return flow_dv_query_count(dev, idx, data, error);
16400        case MLX5_INDIRECT_ACTION_TYPE_CT:
16401                owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
16402                if (owner != PORT_ID(priv))
16403                        return rte_flow_error_set(error, EACCES,
16404                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16405                                        NULL,
16406                                        "CT object owned by another port");
16407                dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
16408                ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
16409                MLX5_ASSERT(ct);
16410                if (!ct->refcnt)
16411                        return rte_flow_error_set(error, EFAULT,
16412                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16413                                        NULL,
16414                                        "CT object is inactive");
16415                ((struct rte_flow_action_conntrack *)data)->peer_port =
16416                                                        ct->peer;
16417                ((struct rte_flow_action_conntrack *)data)->is_original_dir =
16418                                                        ct->is_original;
16419                if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
16420                        return rte_flow_error_set(error, EIO,
16421                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16422                                        NULL,
16423                                        "Failed to query CT context");
16424                return 0;
16425        default:
16426                return rte_flow_error_set(error, ENOTSUP,
16427                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16428                                          "action type query not supported");
16429        }
16430}
16431
16432/**
16433 * Query a flow rule AGE action for aging information.
16434 *
16435 * @param[in] dev
16436 *   Pointer to Ethernet device.
16437 * @param[in] flow
16438 *   Pointer to the sub flow.
16439 * @param[out] data
16440 *   data retrieved by the query.
16441 * @param[out] error
16442 *   Perform verbose error reporting if not NULL.
16443 *
16444 * @return
16445 *   0 on success, a negative errno value otherwise and rte_errno is set.
16446 */
16447static int
16448flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
16449                  void *data, struct rte_flow_error *error)
16450{
16451        struct rte_flow_query_age *resp = data;
16452        struct mlx5_age_param *age_param;
16453
16454        if (flow->age) {
16455                struct mlx5_aso_age_action *act =
16456                                     flow_aso_age_get_by_idx(dev, flow->age);
16457
16458                age_param = &act->age_params;
16459        } else if (flow->counter) {
16460                age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16461
16462                if (!age_param || !age_param->timeout)
16463                        return rte_flow_error_set
16464                                        (error, EINVAL,
16465                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16466                                         NULL, "cannot read age data");
16467        } else {
16468                return rte_flow_error_set(error, EINVAL,
16469                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16470                                          NULL, "age data not available");
16471        }
16472        resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16473                                     AGE_TMOUT ? 1 : 0;
16474        resp->sec_since_last_hit_valid = !resp->aged;
16475        if (resp->sec_since_last_hit_valid)
16476                resp->sec_since_last_hit = __atomic_load_n
16477                             (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16478        return 0;
16479}
16480
16481/**
16482 * Query a flow.
16483 *
16484 * @see rte_flow_query()
16485 * @see rte_flow_ops
16486 */
16487static int
16488flow_dv_query(struct rte_eth_dev *dev,
16489              struct rte_flow *flow __rte_unused,
16490              const struct rte_flow_action *actions __rte_unused,
16491              void *data __rte_unused,
16492              struct rte_flow_error *error __rte_unused)
16493{
16494        int ret = -EINVAL;
16495
16496        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16497                switch (actions->type) {
16498                case RTE_FLOW_ACTION_TYPE_VOID:
16499                        break;
16500                case RTE_FLOW_ACTION_TYPE_COUNT:
16501                        ret = flow_dv_query_count(dev, flow->counter, data,
16502                                                  error);
16503                        break;
16504                case RTE_FLOW_ACTION_TYPE_AGE:
16505                        ret = flow_dv_query_age(dev, flow, data, error);
16506                        break;
16507                default:
16508                        return rte_flow_error_set(error, ENOTSUP,
16509                                                  RTE_FLOW_ERROR_TYPE_ACTION,
16510                                                  actions,
16511                                                  "action not supported");
16512                }
16513        }
16514        return ret;
16515}
16516
16517/**
16518 * Destroy the meter table set.
16519 * Lock free, (mutex should be acquired by caller).
16520 *
16521 * @param[in] dev
16522 *   Pointer to Ethernet device.
16523 * @param[in] fm
16524 *   Meter information table.
16525 */
16526static void
16527flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16528                        struct mlx5_flow_meter_info *fm)
16529{
16530        struct mlx5_priv *priv = dev->data->dev_private;
16531        int i;
16532
16533        if (!fm || !priv->sh->config.dv_flow_en)
16534                return;
16535        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16536                if (fm->drop_rule[i]) {
16537                        claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16538                        fm->drop_rule[i] = NULL;
16539                }
16540        }
16541}
16542
16543static void
16544flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16545{
16546        struct mlx5_priv *priv = dev->data->dev_private;
16547        struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16548        struct mlx5_flow_tbl_data_entry *tbl;
16549        int i, j;
16550
16551        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16552                if (mtrmng->def_rule[i]) {
16553                        claim_zero(mlx5_flow_os_destroy_flow
16554                                        (mtrmng->def_rule[i]));
16555                        mtrmng->def_rule[i] = NULL;
16556                }
16557                if (mtrmng->def_matcher[i]) {
16558                        tbl = container_of(mtrmng->def_matcher[i]->tbl,
16559                                struct mlx5_flow_tbl_data_entry, tbl);
16560                        mlx5_list_unregister(tbl->matchers,
16561                                             &mtrmng->def_matcher[i]->entry);
16562                        mtrmng->def_matcher[i] = NULL;
16563                }
16564                for (j = 0; j < MLX5_REG_BITS; j++) {
16565                        if (mtrmng->drop_matcher[i][j]) {
16566                                tbl =
16567                                container_of(mtrmng->drop_matcher[i][j]->tbl,
16568                                             struct mlx5_flow_tbl_data_entry,
16569                                             tbl);
16570                                mlx5_list_unregister(tbl->matchers,
16571                                            &mtrmng->drop_matcher[i][j]->entry);
16572                                mtrmng->drop_matcher[i][j] = NULL;
16573                        }
16574                }
16575                if (mtrmng->drop_tbl[i]) {
16576                        flow_dv_tbl_resource_release(MLX5_SH(dev),
16577                                mtrmng->drop_tbl[i]);
16578                        mtrmng->drop_tbl[i] = NULL;
16579                }
16580        }
16581}
16582
16583/* Number of meter flow actions, count and jump or count and drop. */
16584#define METER_ACTIONS 2
16585
16586static void
16587__flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16588                                    enum mlx5_meter_domain domain)
16589{
16590        struct mlx5_priv *priv = dev->data->dev_private;
16591        struct mlx5_flow_meter_def_policy *def_policy =
16592                        priv->sh->mtrmng->def_policy[domain];
16593
16594        __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16595        mlx5_free(def_policy);
16596        priv->sh->mtrmng->def_policy[domain] = NULL;
16597}
16598
16599/**
16600 * Destroy the default policy table set.
16601 *
16602 * @param[in] dev
16603 *   Pointer to Ethernet device.
16604 */
16605static void
16606flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16607{
16608        struct mlx5_priv *priv = dev->data->dev_private;
16609        int i;
16610
16611        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16612                if (priv->sh->mtrmng->def_policy[i])
16613                        __flow_dv_destroy_domain_def_policy(dev,
16614                                        (enum mlx5_meter_domain)i);
16615        priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16616}
16617
16618static int
16619__flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16620                        uint32_t color_reg_c_idx,
16621                        enum rte_color color, void *matcher_object,
16622                        int actions_n, void *actions,
16623                        bool match_src_port, const struct rte_flow_item *item,
16624                        void **rule, const struct rte_flow_attr *attr)
16625{
16626        int ret;
16627        struct mlx5_flow_dv_match_params value = {
16628                .size = sizeof(value.buf),
16629        };
16630        struct mlx5_flow_dv_match_params matcher = {
16631                .size = sizeof(matcher.buf),
16632        };
16633        struct mlx5_priv *priv = dev->data->dev_private;
16634        uint8_t misc_mask;
16635
16636        if (match_src_port && priv->sh->esw_mode) {
16637                if (flow_dv_translate_item_port_id(dev, matcher.buf,
16638                                                   value.buf, item, attr)) {
16639                        DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16640                                " value with port.", color);
16641                        return -1;
16642                }
16643        }
16644        flow_dv_match_meta_reg(matcher.buf, value.buf,
16645                               (enum modify_reg)color_reg_c_idx,
16646                               rte_col_2_mlx5_col(color), UINT32_MAX);
16647        misc_mask = flow_dv_matcher_enable(value.buf);
16648        __flow_dv_adjust_buf_size(&value.size, misc_mask);
16649        ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16650                                       actions_n, actions, rule);
16651        if (ret) {
16652                DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16653                return -1;
16654        }
16655        return 0;
16656}
16657
16658static int
16659__flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16660                        uint32_t color_reg_c_idx,
16661                        uint16_t priority,
16662                        struct mlx5_flow_meter_sub_policy *sub_policy,
16663                        const struct rte_flow_attr *attr,
16664                        bool match_src_port,
16665                        const struct rte_flow_item *item,
16666                        struct mlx5_flow_dv_matcher **policy_matcher,
16667                        struct rte_flow_error *error)
16668{
16669        struct mlx5_list_entry *entry;
16670        struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16671        struct mlx5_flow_dv_matcher matcher = {
16672                .mask = {
16673                        .size = sizeof(matcher.mask.buf),
16674                },
16675                .tbl = tbl_rsc,
16676        };
16677        struct mlx5_flow_dv_match_params value = {
16678                .size = sizeof(value.buf),
16679        };
16680        struct mlx5_flow_cb_ctx ctx = {
16681                .error = error,
16682                .data = &matcher,
16683        };
16684        struct mlx5_flow_tbl_data_entry *tbl_data;
16685        struct mlx5_priv *priv = dev->data->dev_private;
16686        const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16687
16688        if (match_src_port && priv->sh->esw_mode) {
16689                if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16690                                                   value.buf, item, attr)) {
16691                        DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16692                                " with port.", priority);
16693                        return -1;
16694                }
16695        }
16696        tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16697        if (priority < RTE_COLOR_RED)
16698                flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16699                        (enum modify_reg)color_reg_c_idx, 0, color_mask);
16700        matcher.priority = priority;
16701        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16702                                    matcher.mask.size);
16703        entry = mlx5_list_register(tbl_data->matchers, &ctx);
16704        if (!entry) {
16705                DRV_LOG(ERR, "Failed to register meter drop matcher.");
16706                return -1;
16707        }
16708        *policy_matcher =
16709                container_of(entry, struct mlx5_flow_dv_matcher, entry);
16710        return 0;
16711}
16712
16713/**
16714 * Create the policy rules per domain.
16715 *
16716 * @param[in] dev
16717 *   Pointer to Ethernet device.
16718 * @param[in] sub_policy
16719 *    Pointer to sub policy table..
16720 * @param[in] egress
16721 *   Direction of the table.
16722 * @param[in] transfer
16723 *   E-Switch or NIC flow.
16724 * @param[in] acts
16725 *   Pointer to policy action list per color.
16726 *
16727 * @return
16728 *   0 on success, -1 otherwise.
16729 */
16730static int
16731__flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16732                struct mlx5_flow_meter_sub_policy *sub_policy,
16733                uint8_t egress, uint8_t transfer, bool match_src_port,
16734                struct mlx5_meter_policy_acts acts[RTE_COLORS])
16735{
16736        struct mlx5_priv *priv = dev->data->dev_private;
16737        struct rte_flow_error flow_err;
16738        uint32_t color_reg_c_idx;
16739        struct rte_flow_attr attr = {
16740                .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16741                .priority = 0,
16742                .ingress = 0,
16743                .egress = !!egress,
16744                .transfer = !!transfer,
16745                .reserved = 0,
16746        };
16747        int i;
16748        int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16749        struct mlx5_sub_policy_color_rule *color_rule;
16750        bool svport_match;
16751        struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16752
16753        if (ret < 0)
16754                return -1;
16755        /* Create policy table with POLICY level. */
16756        if (!sub_policy->tbl_rsc)
16757                sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16758                                MLX5_FLOW_TABLE_LEVEL_POLICY,
16759                                egress, transfer, false, NULL, 0, 0,
16760                                sub_policy->idx, &flow_err);
16761        if (!sub_policy->tbl_rsc) {
16762                DRV_LOG(ERR,
16763                        "Failed to create meter sub policy table.");
16764                return -1;
16765        }
16766        /* Prepare matchers. */
16767        color_reg_c_idx = ret;
16768        for (i = 0; i < RTE_COLORS; i++) {
16769                TAILQ_INIT(&sub_policy->color_rules[i]);
16770                if (!acts[i].actions_n)
16771                        continue;
16772                color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16773                                sizeof(struct mlx5_sub_policy_color_rule),
16774                                0, SOCKET_ID_ANY);
16775                if (!color_rule) {
16776                        DRV_LOG(ERR, "No memory to create color rule.");
16777                        goto err_exit;
16778                }
16779                tmp_rules[i] = color_rule;
16780                TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16781                                  color_rule, next_port);
16782                color_rule->src_port = priv->representor_id;
16783                /* No use. */
16784                attr.priority = i;
16785                /* Create matchers for colors. */
16786                svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16787                if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16788                                MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16789                                &attr, svport_match, NULL,
16790                                &color_rule->matcher, &flow_err)) {
16791                        DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16792                        goto err_exit;
16793                }
16794                /* Create flow, matching color. */
16795                if (__flow_dv_create_policy_flow(dev,
16796                                color_reg_c_idx, (enum rte_color)i,
16797                                color_rule->matcher->matcher_object,
16798                                acts[i].actions_n, acts[i].dv_actions,
16799                                svport_match, NULL, &color_rule->rule,
16800                                &attr)) {
16801                        DRV_LOG(ERR, "Failed to create color%u rule.", i);
16802                        goto err_exit;
16803                }
16804        }
16805        return 0;
16806err_exit:
16807        /* All the policy rules will be cleared. */
16808        do {
16809                color_rule = tmp_rules[i];
16810                if (color_rule) {
16811                        if (color_rule->rule)
16812                                mlx5_flow_os_destroy_flow(color_rule->rule);
16813                        if (color_rule->matcher) {
16814                                struct mlx5_flow_tbl_data_entry *tbl =
16815                                        container_of(color_rule->matcher->tbl,
16816                                                     typeof(*tbl), tbl);
16817                                mlx5_list_unregister(tbl->matchers,
16818                                                &color_rule->matcher->entry);
16819                        }
16820                        TAILQ_REMOVE(&sub_policy->color_rules[i],
16821                                     color_rule, next_port);
16822                        mlx5_free(color_rule);
16823                }
16824        } while (i--);
16825        return -1;
16826}
16827
16828static int
16829__flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16830                        struct mlx5_flow_meter_policy *mtr_policy,
16831                        struct mlx5_flow_meter_sub_policy *sub_policy,
16832                        uint32_t domain)
16833{
16834        struct mlx5_priv *priv = dev->data->dev_private;
16835        struct mlx5_meter_policy_acts acts[RTE_COLORS];
16836        struct mlx5_flow_dv_tag_resource *tag;
16837        struct mlx5_flow_dv_port_id_action_resource *port_action;
16838        struct mlx5_hrxq *hrxq;
16839        struct mlx5_flow_meter_info *next_fm[RTE_COLORS] = {NULL};
16840        struct mlx5_flow_meter_policy *next_policy;
16841        struct mlx5_flow_meter_sub_policy *next_sub_policy;
16842        struct mlx5_flow_tbl_data_entry *tbl_data;
16843        struct rte_flow_error error;
16844        uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16845        uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16846        bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16847        bool match_src_port = false;
16848        int i;
16849
16850        /* If RSS or Queue, no previous actions / rules is created. */
16851        for (i = 0; i < RTE_COLORS; i++) {
16852                acts[i].actions_n = 0;
16853                if (i == RTE_COLOR_RED) {
16854                        /* Only support drop on red. */
16855                        acts[i].dv_actions[0] =
16856                                mtr_policy->dr_drop_action[domain];
16857                        acts[i].actions_n = 1;
16858                        continue;
16859                }
16860                if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16861                        struct rte_flow_attr attr = {
16862                                .transfer = transfer
16863                        };
16864
16865                        next_fm[i] = mlx5_flow_meter_find(priv,
16866                                        mtr_policy->act_cnt[i].next_mtr_id,
16867                                        NULL);
16868                        if (!next_fm[i]) {
16869                                DRV_LOG(ERR,
16870                                        "Failed to get next hierarchy meter.");
16871                                goto err_exit;
16872                        }
16873                        if (mlx5_flow_meter_attach(priv, next_fm[i],
16874                                                   &attr, &error)) {
16875                                DRV_LOG(ERR, "%s", error.message);
16876                                next_fm[i] = NULL;
16877                                goto err_exit;
16878                        }
16879                        /* Meter action must be the first for TX. */
16880                        if (mtr_first) {
16881                                acts[i].dv_actions[acts[i].actions_n] =
16882                                        (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
16883                                                next_fm[i]->meter_action_y :
16884                                                next_fm[i]->meter_action_g;
16885                                acts[i].actions_n++;
16886                        }
16887                }
16888                if (mtr_policy->act_cnt[i].rix_mark) {
16889                        tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16890                                        mtr_policy->act_cnt[i].rix_mark);
16891                        if (!tag) {
16892                                DRV_LOG(ERR, "Failed to find "
16893                                "mark action for policy.");
16894                                goto err_exit;
16895                        }
16896                        acts[i].dv_actions[acts[i].actions_n] = tag->action;
16897                        acts[i].actions_n++;
16898                }
16899                if (mtr_policy->act_cnt[i].modify_hdr) {
16900                        acts[i].dv_actions[acts[i].actions_n] =
16901                                mtr_policy->act_cnt[i].modify_hdr->action;
16902                        acts[i].actions_n++;
16903                }
16904                if (mtr_policy->act_cnt[i].fate_action) {
16905                        switch (mtr_policy->act_cnt[i].fate_action) {
16906                        case MLX5_FLOW_FATE_PORT_ID:
16907                                port_action = mlx5_ipool_get
16908                                        (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16909                                mtr_policy->act_cnt[i].rix_port_id_action);
16910                                if (!port_action) {
16911                                        DRV_LOG(ERR, "Failed to find "
16912                                                "port action for policy.");
16913                                        goto err_exit;
16914                                }
16915                                acts[i].dv_actions[acts[i].actions_n] =
16916                                        port_action->action;
16917                                acts[i].actions_n++;
16918                                mtr_policy->dev = dev;
16919                                match_src_port = true;
16920                                break;
16921                        case MLX5_FLOW_FATE_DROP:
16922                        case MLX5_FLOW_FATE_JUMP:
16923                                acts[i].dv_actions[acts[i].actions_n] =
16924                                mtr_policy->act_cnt[i].dr_jump_action[domain];
16925                                acts[i].actions_n++;
16926                                break;
16927                        case MLX5_FLOW_FATE_SHARED_RSS:
16928                        case MLX5_FLOW_FATE_QUEUE:
16929                                hrxq = mlx5_ipool_get
16930                                        (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16931                                         sub_policy->rix_hrxq[i]);
16932                                if (!hrxq) {
16933                                        DRV_LOG(ERR, "Failed to find "
16934                                                "queue action for policy.");
16935                                        goto err_exit;
16936                                }
16937                                acts[i].dv_actions[acts[i].actions_n] =
16938                                        hrxq->action;
16939                                acts[i].actions_n++;
16940                                break;
16941                        case MLX5_FLOW_FATE_MTR:
16942                                if (!next_fm[i]) {
16943                                        DRV_LOG(ERR,
16944                                                "No next hierarchy meter.");
16945                                        goto err_exit;
16946                                }
16947                                if (!mtr_first) {
16948                                        acts[i].dv_actions[acts[i].actions_n] =
16949                                                (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
16950                                                        next_fm[i]->meter_action_y :
16951                                                        next_fm[i]->meter_action_g;
16952                                        acts[i].actions_n++;
16953                                }
16954                                if (mtr_policy->act_cnt[i].next_sub_policy) {
16955                                        next_sub_policy =
16956                                        mtr_policy->act_cnt[i].next_sub_policy;
16957                                } else {
16958                                        next_policy =
16959                                                mlx5_flow_meter_policy_find(dev,
16960                                                                next_fm[i]->policy_id, NULL);
16961                                        MLX5_ASSERT(next_policy);
16962                                        next_sub_policy =
16963                                        next_policy->sub_policys[domain][0];
16964                                }
16965                                tbl_data =
16966                                        container_of(next_sub_policy->tbl_rsc,
16967                                        struct mlx5_flow_tbl_data_entry, tbl);
16968                                acts[i].dv_actions[acts[i].actions_n++] =
16969                                                        tbl_data->jump.action;
16970                                if (mtr_policy->act_cnt[i].modify_hdr)
16971                                        match_src_port = !!transfer;
16972                                break;
16973                        default:
16974                                /*Queue action do nothing*/
16975                                break;
16976                        }
16977                }
16978        }
16979        if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16980                                egress, transfer, match_src_port, acts)) {
16981                DRV_LOG(ERR,
16982                        "Failed to create policy rules per domain.");
16983                goto err_exit;
16984        }
16985        return 0;
16986err_exit:
16987        for (i = 0; i < RTE_COLORS; i++)
16988                if (next_fm[i])
16989                        mlx5_flow_meter_detach(priv, next_fm[i]);
16990        return -1;
16991}
16992
16993/**
16994 * Create the policy rules.
16995 *
16996 * @param[in] dev
16997 *   Pointer to Ethernet device.
16998 * @param[in,out] mtr_policy
16999 *   Pointer to meter policy table.
17000 *
17001 * @return
17002 *   0 on success, -1 otherwise.
17003 */
17004static int
17005flow_dv_create_policy_rules(struct rte_eth_dev *dev,
17006                             struct mlx5_flow_meter_policy *mtr_policy)
17007{
17008        int i;
17009        uint16_t sub_policy_num;
17010
17011        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17012                sub_policy_num = (mtr_policy->sub_policy_num >>
17013                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
17014                        MLX5_MTR_SUB_POLICY_NUM_MASK;
17015                if (!sub_policy_num)
17016                        continue;
17017                /* Prepare actions list and create policy rules. */
17018                if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
17019                        mtr_policy->sub_policys[i][0], i)) {
17020                        DRV_LOG(ERR, "Failed to create policy action "
17021                                "list per domain.");
17022                        return -1;
17023                }
17024        }
17025        return 0;
17026}
17027
17028static int
17029__flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
17030{
17031        struct mlx5_priv *priv = dev->data->dev_private;
17032        struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
17033        struct mlx5_flow_meter_def_policy *def_policy;
17034        struct mlx5_flow_tbl_resource *jump_tbl;
17035        struct mlx5_flow_tbl_data_entry *tbl_data;
17036        uint8_t egress, transfer;
17037        struct rte_flow_error error;
17038        struct mlx5_meter_policy_acts acts[RTE_COLORS];
17039        int ret;
17040
17041        egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
17042        transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
17043        def_policy = mtrmng->def_policy[domain];
17044        if (!def_policy) {
17045                def_policy = mlx5_malloc(MLX5_MEM_ZERO,
17046                        sizeof(struct mlx5_flow_meter_def_policy),
17047                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
17048                if (!def_policy) {
17049                        DRV_LOG(ERR, "Failed to alloc default policy table.");
17050                        goto def_policy_error;
17051                }
17052                mtrmng->def_policy[domain] = def_policy;
17053                /* Create the meter suffix table with SUFFIX level. */
17054                jump_tbl = flow_dv_tbl_resource_get(dev,
17055                                MLX5_FLOW_TABLE_LEVEL_METER,
17056                                egress, transfer, false, NULL, 0,
17057                                0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
17058                if (!jump_tbl) {
17059                        DRV_LOG(ERR,
17060                                "Failed to create meter suffix table.");
17061                        goto def_policy_error;
17062                }
17063                def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
17064                tbl_data = container_of(jump_tbl,
17065                                        struct mlx5_flow_tbl_data_entry, tbl);
17066                def_policy->dr_jump_action[RTE_COLOR_GREEN] =
17067                                                tbl_data->jump.action;
17068                acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
17069                acts[RTE_COLOR_GREEN].actions_n = 1;
17070                /*
17071                 * YELLOW has the same default policy as GREEN does.
17072                 * G & Y share the same table and action. The 2nd time of table
17073                 * resource getting is just to update the reference count for
17074                 * the releasing stage.
17075                 */
17076                jump_tbl = flow_dv_tbl_resource_get(dev,
17077                                MLX5_FLOW_TABLE_LEVEL_METER,
17078                                egress, transfer, false, NULL, 0,
17079                                0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
17080                if (!jump_tbl) {
17081                        DRV_LOG(ERR,
17082                                "Failed to get meter suffix table.");
17083                        goto def_policy_error;
17084                }
17085                def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
17086                tbl_data = container_of(jump_tbl,
17087                                        struct mlx5_flow_tbl_data_entry, tbl);
17088                def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
17089                                                tbl_data->jump.action;
17090                acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
17091                acts[RTE_COLOR_YELLOW].actions_n = 1;
17092                /* Create jump action to the drop table. */
17093                if (!mtrmng->drop_tbl[domain]) {
17094                        mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
17095                                (dev, MLX5_FLOW_TABLE_LEVEL_METER,
17096                                 egress, transfer, false, NULL, 0,
17097                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
17098                        if (!mtrmng->drop_tbl[domain]) {
17099                                DRV_LOG(ERR, "Failed to create meter "
17100                                        "drop table for default policy.");
17101                                goto def_policy_error;
17102                        }
17103                }
17104                /* all RED: unique Drop table for jump action. */
17105                tbl_data = container_of(mtrmng->drop_tbl[domain],
17106                                        struct mlx5_flow_tbl_data_entry, tbl);
17107                def_policy->dr_jump_action[RTE_COLOR_RED] =
17108                                                tbl_data->jump.action;
17109                acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
17110                acts[RTE_COLOR_RED].actions_n = 1;
17111                /* Create default policy rules. */
17112                ret = __flow_dv_create_domain_policy_rules(dev,
17113                                        &def_policy->sub_policy,
17114                                        egress, transfer, false, acts);
17115                if (ret) {
17116                        DRV_LOG(ERR, "Failed to create default policy rules.");
17117                        goto def_policy_error;
17118                }
17119        }
17120        return 0;
17121def_policy_error:
17122        __flow_dv_destroy_domain_def_policy(dev,
17123                                            (enum mlx5_meter_domain)domain);
17124        return -1;
17125}
17126
17127/**
17128 * Create the default policy table set.
17129 *
17130 * @param[in] dev
17131 *   Pointer to Ethernet device.
17132 * @return
17133 *   0 on success, -1 otherwise.
17134 */
17135static int
17136flow_dv_create_def_policy(struct rte_eth_dev *dev)
17137{
17138        struct mlx5_priv *priv = dev->data->dev_private;
17139        int i;
17140
17141        /* Non-termination policy table. */
17142        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17143                if (!priv->sh->config.dv_esw_en &&
17144                    i == MLX5_MTR_DOMAIN_TRANSFER)
17145                        continue;
17146                if (__flow_dv_create_domain_def_policy(dev, i)) {
17147                        DRV_LOG(ERR, "Failed to create default policy");
17148                        /* Rollback the created default policies for others. */
17149                        flow_dv_destroy_def_policy(dev);
17150                        return -1;
17151                }
17152        }
17153        return 0;
17154}
17155
17156/**
17157 * Create the needed meter tables.
17158 * Lock free, (mutex should be acquired by caller).
17159 *
17160 * @param[in] dev
17161 *   Pointer to Ethernet device.
17162 * @param[in] fm
17163 *   Meter information table.
17164 * @param[in] mtr_idx
17165 *   Meter index.
17166 * @param[in] domain_bitmap
17167 *   Domain bitmap.
17168 * @return
17169 *   0 on success, -1 otherwise.
17170 */
17171static int
17172flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
17173                        struct mlx5_flow_meter_info *fm,
17174                        uint32_t mtr_idx,
17175                        uint8_t domain_bitmap)
17176{
17177        struct mlx5_priv *priv = dev->data->dev_private;
17178        struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
17179        struct rte_flow_error error;
17180        struct mlx5_flow_tbl_data_entry *tbl_data;
17181        uint8_t egress, transfer;
17182        void *actions[METER_ACTIONS];
17183        int domain, ret, i;
17184        struct mlx5_flow_counter *cnt;
17185        struct mlx5_flow_dv_match_params value = {
17186                .size = sizeof(value.buf),
17187        };
17188        struct mlx5_flow_dv_match_params matcher_para = {
17189                .size = sizeof(matcher_para.buf),
17190        };
17191        int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
17192                                                     0, &error);
17193        uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
17194        uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
17195        struct mlx5_list_entry *entry;
17196        struct mlx5_flow_dv_matcher matcher = {
17197                .mask = {
17198                        .size = sizeof(matcher.mask.buf),
17199                },
17200        };
17201        struct mlx5_flow_dv_matcher *drop_matcher;
17202        struct mlx5_flow_cb_ctx ctx = {
17203                .error = &error,
17204                .data = &matcher,
17205        };
17206        uint8_t misc_mask;
17207
17208        if (!priv->mtr_en || mtr_id_reg_c < 0) {
17209                rte_errno = ENOTSUP;
17210                return -1;
17211        }
17212        for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
17213                if (!(domain_bitmap & (1 << domain)) ||
17214                        (mtrmng->def_rule[domain] && !fm->drop_cnt))
17215                        continue;
17216                egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
17217                transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
17218                /* Create the drop table with METER DROP level. */
17219                if (!mtrmng->drop_tbl[domain]) {
17220                        mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
17221                                        MLX5_FLOW_TABLE_LEVEL_METER,
17222                                        egress, transfer, false, NULL, 0,
17223                                        0, MLX5_MTR_TABLE_ID_DROP, &error);
17224                        if (!mtrmng->drop_tbl[domain]) {
17225                                DRV_LOG(ERR, "Failed to create meter drop table.");
17226                                goto policy_error;
17227                        }
17228                }
17229                /* Create default matcher in drop table. */
17230                matcher.tbl = mtrmng->drop_tbl[domain],
17231                tbl_data = container_of(mtrmng->drop_tbl[domain],
17232                                struct mlx5_flow_tbl_data_entry, tbl);
17233                if (!mtrmng->def_matcher[domain]) {
17234                        flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
17235                                       (enum modify_reg)mtr_id_reg_c,
17236                                       0, 0);
17237                        matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
17238                        matcher.crc = rte_raw_cksum
17239                                        ((const void *)matcher.mask.buf,
17240                                        matcher.mask.size);
17241                        entry = mlx5_list_register(tbl_data->matchers, &ctx);
17242                        if (!entry) {
17243                                DRV_LOG(ERR, "Failed to register meter "
17244                                "drop default matcher.");
17245                                goto policy_error;
17246                        }
17247                        mtrmng->def_matcher[domain] = container_of(entry,
17248                        struct mlx5_flow_dv_matcher, entry);
17249                }
17250                /* Create default rule in drop table. */
17251                if (!mtrmng->def_rule[domain]) {
17252                        i = 0;
17253                        actions[i++] = priv->sh->dr_drop_action;
17254                        flow_dv_match_meta_reg(matcher_para.buf, value.buf,
17255                                (enum modify_reg)mtr_id_reg_c, 0, 0);
17256                        misc_mask = flow_dv_matcher_enable(value.buf);
17257                        __flow_dv_adjust_buf_size(&value.size, misc_mask);
17258                        ret = mlx5_flow_os_create_flow
17259                                (mtrmng->def_matcher[domain]->matcher_object,
17260                                (void *)&value, i, actions,
17261                                &mtrmng->def_rule[domain]);
17262                        if (ret) {
17263                                DRV_LOG(ERR, "Failed to create meter "
17264                                "default drop rule for drop table.");
17265                                goto policy_error;
17266                        }
17267                }
17268                if (!fm->drop_cnt)
17269                        continue;
17270                MLX5_ASSERT(mtrmng->max_mtr_bits);
17271                if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
17272                        /* Create matchers for Drop. */
17273                        flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
17274                                        (enum modify_reg)mtr_id_reg_c, 0,
17275                                        (mtr_id_mask << mtr_id_offset));
17276                        matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
17277                        matcher.crc = rte_raw_cksum
17278                                        ((const void *)matcher.mask.buf,
17279                                        matcher.mask.size);
17280                        entry = mlx5_list_register(tbl_data->matchers, &ctx);
17281                        if (!entry) {
17282                                DRV_LOG(ERR,
17283                                "Failed to register meter drop matcher.");
17284                                goto policy_error;
17285                        }
17286                        mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
17287                                container_of(entry, struct mlx5_flow_dv_matcher,
17288                                             entry);
17289                }
17290                drop_matcher =
17291                        mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
17292                /* Create drop rule, matching meter_id only. */
17293                flow_dv_match_meta_reg(matcher_para.buf, value.buf,
17294                                (enum modify_reg)mtr_id_reg_c,
17295                                (mtr_idx << mtr_id_offset), UINT32_MAX);
17296                i = 0;
17297                cnt = flow_dv_counter_get_by_idx(dev,
17298                                        fm->drop_cnt, NULL);
17299                actions[i++] = cnt->action;
17300                actions[i++] = priv->sh->dr_drop_action;
17301                misc_mask = flow_dv_matcher_enable(value.buf);
17302                __flow_dv_adjust_buf_size(&value.size, misc_mask);
17303                ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
17304                                               (void *)&value, i, actions,
17305                                               &fm->drop_rule[domain]);
17306                if (ret) {
17307                        DRV_LOG(ERR, "Failed to create meter "
17308                                "drop rule for drop table.");
17309                                goto policy_error;
17310                }
17311        }
17312        return 0;
17313policy_error:
17314        for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17315                if (fm->drop_rule[i]) {
17316                        claim_zero(mlx5_flow_os_destroy_flow
17317                                (fm->drop_rule[i]));
17318                        fm->drop_rule[i] = NULL;
17319                }
17320        }
17321        return -1;
17322}
17323
17324static struct mlx5_flow_meter_sub_policy *
17325__flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
17326                struct mlx5_flow_meter_policy *mtr_policy,
17327                struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
17328                struct mlx5_flow_meter_sub_policy *next_sub_policy,
17329                bool *is_reuse)
17330{
17331        struct mlx5_priv *priv = dev->data->dev_private;
17332        struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17333        uint32_t sub_policy_idx = 0;
17334        uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
17335        uint32_t i, j;
17336        struct mlx5_hrxq *hrxq;
17337        struct mlx5_flow_handle dh;
17338        struct mlx5_meter_policy_action_container *act_cnt;
17339        uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17340        uint16_t sub_policy_num;
17341        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
17342
17343        MLX5_ASSERT(wks);
17344        rte_spinlock_lock(&mtr_policy->sl);
17345        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17346                if (!rss_desc[i])
17347                        continue;
17348                hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
17349                if (!hrxq) {
17350                        rte_spinlock_unlock(&mtr_policy->sl);
17351                        return NULL;
17352                }
17353                hrxq_idx[i] = hrxq->idx;
17354        }
17355        sub_policy_num = (mtr_policy->sub_policy_num >>
17356                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17357                        MLX5_MTR_SUB_POLICY_NUM_MASK;
17358        for (j = 0; j < sub_policy_num; j++) {
17359                for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17360                        if (rss_desc[i] &&
17361                            hrxq_idx[i] !=
17362                            mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
17363                                break;
17364                }
17365                if (i >= MLX5_MTR_RTE_COLORS) {
17366                        /*
17367                         * Found the sub policy table with
17368                         * the same queue per color.
17369                         */
17370                        rte_spinlock_unlock(&mtr_policy->sl);
17371                        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17372                                mlx5_hrxq_release(dev, hrxq_idx[i]);
17373                        *is_reuse = true;
17374                        return mtr_policy->sub_policys[domain][j];
17375                }
17376        }
17377        /* Create sub policy. */
17378        if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
17379            !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
17380                /* Reuse the first pre-allocated sub_policy. */
17381                sub_policy = mtr_policy->sub_policys[domain][0];
17382                sub_policy_idx = sub_policy->idx;
17383        } else {
17384                sub_policy = mlx5_ipool_zmalloc
17385                                (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17386                                 &sub_policy_idx);
17387                if (!sub_policy ||
17388                    sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
17389                        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17390                                mlx5_hrxq_release(dev, hrxq_idx[i]);
17391                        goto rss_sub_policy_error;
17392                }
17393                sub_policy->idx = sub_policy_idx;
17394                sub_policy->main_policy = mtr_policy;
17395        }
17396        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17397                if (!rss_desc[i])
17398                        continue;
17399                sub_policy->rix_hrxq[i] = hrxq_idx[i];
17400                if (mtr_policy->is_hierarchy) {
17401                        act_cnt = &mtr_policy->act_cnt[i];
17402                        act_cnt->next_sub_policy = next_sub_policy;
17403                        mlx5_hrxq_release(dev, hrxq_idx[i]);
17404                } else {
17405                        /*
17406                         * Overwrite the last action from
17407                         * RSS action to Queue action.
17408                         */
17409                        hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
17410                                              hrxq_idx[i]);
17411                        if (!hrxq) {
17412                                DRV_LOG(ERR, "Failed to get policy hrxq");
17413                                goto rss_sub_policy_error;
17414                        }
17415                        act_cnt = &mtr_policy->act_cnt[i];
17416                        if (act_cnt->rix_mark || act_cnt->modify_hdr) {
17417                                memset(&dh, 0, sizeof(struct mlx5_flow_handle));
17418                                if (act_cnt->rix_mark)
17419                                        wks->mark = 1;
17420                                dh.fate_action = MLX5_FLOW_FATE_QUEUE;
17421                                dh.rix_hrxq = hrxq_idx[i];
17422                                flow_drv_rxq_flags_set(dev, &dh);
17423                        }
17424                }
17425        }
17426        if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
17427                                               sub_policy, domain)) {
17428                DRV_LOG(ERR, "Failed to create policy "
17429                        "rules for ingress domain.");
17430                goto rss_sub_policy_error;
17431        }
17432        if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17433                i = (mtr_policy->sub_policy_num >>
17434                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17435                        MLX5_MTR_SUB_POLICY_NUM_MASK;
17436                if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
17437                        DRV_LOG(ERR, "No free sub-policy slot.");
17438                        goto rss_sub_policy_error;
17439                }
17440                mtr_policy->sub_policys[domain][i] = sub_policy;
17441                i++;
17442                mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17443                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17444                mtr_policy->sub_policy_num |=
17445                        (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17446                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17447        }
17448        rte_spinlock_unlock(&mtr_policy->sl);
17449        *is_reuse = false;
17450        return sub_policy;
17451rss_sub_policy_error:
17452        if (sub_policy) {
17453                __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17454                if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17455                        i = (mtr_policy->sub_policy_num >>
17456                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17457                        MLX5_MTR_SUB_POLICY_NUM_MASK;
17458                        mtr_policy->sub_policys[domain][i] = NULL;
17459                        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17460                                        sub_policy->idx);
17461                }
17462        }
17463        rte_spinlock_unlock(&mtr_policy->sl);
17464        return NULL;
17465}
17466
17467/**
17468 * Find the policy table for prefix table with RSS.
17469 *
17470 * @param[in] dev
17471 *   Pointer to Ethernet device.
17472 * @param[in] mtr_policy
17473 *   Pointer to meter policy table.
17474 * @param[in] rss_desc
17475 *   Pointer to rss_desc
17476 * @return
17477 *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17478 */
17479static struct mlx5_flow_meter_sub_policy *
17480flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17481                struct mlx5_flow_meter_policy *mtr_policy,
17482                struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17483{
17484        struct mlx5_priv *priv = dev->data->dev_private;
17485        struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17486        struct mlx5_flow_meter_info *next_fm;
17487        struct mlx5_flow_meter_policy *next_policy;
17488        struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17489        struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17490        struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17491        uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17492        bool reuse_sub_policy;
17493        uint32_t i = 0;
17494        uint32_t j = 0;
17495
17496        while (true) {
17497                /* Iterate hierarchy to get all policies in this hierarchy. */
17498                policies[i++] = mtr_policy;
17499                if (!mtr_policy->is_hierarchy)
17500                        break;
17501                if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17502                        DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17503                        return NULL;
17504                }
17505                rte_spinlock_lock(&mtr_policy->sl);
17506                next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
17507                rte_spinlock_unlock(&mtr_policy->sl);
17508                if (!next_fm) {
17509                        DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17510                        return NULL;
17511                }
17512                next_policy =
17513                        mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17514                                                    NULL);
17515                MLX5_ASSERT(next_policy);
17516                mtr_policy = next_policy;
17517        }
17518        while (i) {
17519                /**
17520                 * From last policy to the first one in hierarchy,
17521                 * create / get the sub policy for each of them.
17522                 */
17523                sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17524                                                        policies[--i],
17525                                                        rss_desc,
17526                                                        next_sub_policy,
17527                                                        &reuse_sub_policy);
17528                if (!sub_policy) {
17529                        DRV_LOG(ERR, "Failed to get the sub policy.");
17530                        goto err_exit;
17531                }
17532                if (!reuse_sub_policy)
17533                        sub_policies[j++] = sub_policy;
17534                next_sub_policy = sub_policy;
17535        }
17536        return sub_policy;
17537err_exit:
17538        while (j) {
17539                uint16_t sub_policy_num;
17540
17541                sub_policy = sub_policies[--j];
17542                mtr_policy = sub_policy->main_policy;
17543                __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17544                if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17545                        sub_policy_num = (mtr_policy->sub_policy_num >>
17546                                (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17547                                MLX5_MTR_SUB_POLICY_NUM_MASK;
17548                        mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17549                                                                        NULL;
17550                        sub_policy_num--;
17551                        mtr_policy->sub_policy_num &=
17552                                ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17553                                  (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17554                        mtr_policy->sub_policy_num |=
17555                        (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17556                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17557                        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17558                                        sub_policy->idx);
17559                }
17560        }
17561        return NULL;
17562}
17563
17564/**
17565 * Check if need to create hierarchy tag rule.
17566 *
17567 * @param[in] priv
17568 *   Pointer to mlx5_priv.
17569 * @param[in] mtr_policy
17570 *   Pointer to current meter policy.
17571 * @param[in] src_port
17572 *   The src port this extra rule should use.
17573 * @param[out] next_fm
17574 *   Pointer to next meter in hierarchy.
17575 * @param[out] skip
17576 *   Indicate if skip the tag rule creation.
17577 * @param[out] error
17578 *   Perform verbose error reporting if not NULL.
17579 * @return
17580 *   0 on success, a negative errno value otherwise and rte_errno is set.
17581 */
17582static int
17583mlx5_meter_hierarchy_skip_tag_rule(struct mlx5_priv *priv,
17584                                   struct mlx5_flow_meter_policy *mtr_policy,
17585                                   int32_t src_port,
17586                                   struct mlx5_flow_meter_info **next_fm,
17587                                   bool *skip,
17588                                   struct rte_flow_error *error)
17589{
17590        struct mlx5_flow_meter_sub_policy *sub_policy;
17591        struct mlx5_sub_policy_color_rule *color_rule;
17592        uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17593        int ret = 0;
17594        int i;
17595
17596        *next_fm = NULL;
17597        *skip = false;
17598        rte_spinlock_lock(&mtr_policy->sl);
17599        if (!mtr_policy->is_hierarchy)
17600                goto exit;
17601        *next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
17602        if (!*next_fm) {
17603                ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
17604                                         NULL, "Failed to find next meter in hierarchy.");
17605                goto exit;
17606        }
17607        if (!(*next_fm)->drop_cnt) {
17608                *skip = true;
17609                goto exit;
17610        }
17611        sub_policy = mtr_policy->sub_policys[domain][0];
17612        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17613                if (mtr_policy->act_cnt[i].fate_action != MLX5_FLOW_FATE_MTR)
17614                        continue;
17615                TAILQ_FOREACH(color_rule, &sub_policy->color_rules[i], next_port)
17616                        if (color_rule->src_port == src_port) {
17617                                *skip = true;
17618                                goto exit;
17619                        }
17620        }
17621exit:
17622        rte_spinlock_unlock(&mtr_policy->sl);
17623        return ret;
17624}
17625
17626/**
17627 * Create the sub policy tag rule for all meters in hierarchy.
17628 *
17629 * @param[in] dev
17630 *   Pointer to Ethernet device.
17631 * @param[in] fm
17632 *   Meter information table.
17633 * @param[in] src_port
17634 *   The src port this extra rule should use.
17635 * @param[in] item
17636 *   The src port match item.
17637 * @param[out] error
17638 *   Perform verbose error reporting if not NULL.
17639 * @return
17640 *   0 on success, a negative errno value otherwise and rte_errno is set.
17641 */
17642static int
17643flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17644                                struct mlx5_flow_meter_info *fm,
17645                                int32_t src_port,
17646                                const struct rte_flow_item *item,
17647                                struct rte_flow_error *error)
17648{
17649        struct mlx5_priv *priv = dev->data->dev_private;
17650        struct mlx5_flow_meter_policy *mtr_policy;
17651        struct mlx5_flow_meter_sub_policy *sub_policy;
17652        struct mlx5_flow_meter_info *next_fm = NULL;
17653        struct mlx5_flow_meter_policy *next_policy;
17654        struct mlx5_flow_meter_sub_policy *next_sub_policy;
17655        struct mlx5_flow_tbl_data_entry *tbl_data;
17656        struct mlx5_sub_policy_color_rule *color_rule;
17657        struct mlx5_meter_policy_acts acts;
17658        uint32_t color_reg_c_idx;
17659        bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17660        struct rte_flow_attr attr = {
17661                .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17662                .priority = 0,
17663                .ingress = 0,
17664                .egress = 0,
17665                .transfer = 1,
17666                .reserved = 0,
17667        };
17668        uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17669        struct {
17670                struct mlx5_flow_meter_policy *fm_policy;
17671                struct mlx5_flow_meter_info *next_fm;
17672                struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS];
17673        } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
17674        uint32_t fm_cnt = 0;
17675        uint32_t i, j;
17676
17677        color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17678        /* Get all fms who need to create the tag color rule. */
17679        do {
17680                bool skip = false;
17681
17682                mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17683                MLX5_ASSERT(mtr_policy);
17684                if (mlx5_meter_hierarchy_skip_tag_rule(priv, mtr_policy, src_port,
17685                                                       &next_fm, &skip, error))
17686                        goto err_exit;
17687                if (next_fm && !skip) {
17688                        fm_info[fm_cnt].fm_policy = mtr_policy;
17689                        fm_info[fm_cnt].next_fm = next_fm;
17690                        if (++fm_cnt >= MLX5_MTR_CHAIN_MAX_NUM) {
17691                                rte_flow_error_set(error, errno,
17692                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17693                                        "Exceed max meter number in hierarchy.");
17694                                goto err_exit;
17695                        }
17696                }
17697                fm = next_fm;
17698        } while (fm);
17699        /* Create tag color rules for all needed fms. */
17700        for (i = 0; i < fm_cnt; i++) {
17701                void *mtr_action;
17702
17703                mtr_policy = fm_info[i].fm_policy;
17704                rte_spinlock_lock(&mtr_policy->sl);
17705                sub_policy = mtr_policy->sub_policys[domain][0];
17706                for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
17707                        if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR)
17708                                continue;
17709                        color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17710                                                 sizeof(struct mlx5_sub_policy_color_rule),
17711                                                 0, SOCKET_ID_ANY);
17712                        if (!color_rule) {
17713                                rte_spinlock_unlock(&mtr_policy->sl);
17714                                rte_flow_error_set(error, ENOMEM,
17715                                                   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17716                                                   "No memory to create tag color rule.");
17717                                goto err_exit;
17718                        }
17719                        color_rule->src_port = src_port;
17720                        next_fm = fm_info[i].next_fm;
17721                        if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17722                                mlx5_free(color_rule);
17723                                rte_spinlock_unlock(&mtr_policy->sl);
17724                                goto err_exit;
17725                        }
17726                        fm_info[i].tag_rule[j] = color_rule;
17727                        TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port);
17728                        /* Prepare to create color rule. */
17729                        mtr_action = (next_fm->color_aware && j == RTE_COLOR_YELLOW) ?
17730                                                                next_fm->meter_action_y :
17731                                                                next_fm->meter_action_g;
17732                        next_policy = mlx5_flow_meter_policy_find(dev, next_fm->policy_id, NULL);
17733                        MLX5_ASSERT(next_policy);
17734                        next_sub_policy = next_policy->sub_policys[domain][0];
17735                        tbl_data = container_of(next_sub_policy->tbl_rsc,
17736                                                struct mlx5_flow_tbl_data_entry, tbl);
17737                        if (mtr_first) {
17738                                acts.dv_actions[0] = mtr_action;
17739                                acts.dv_actions[1] = mtr_policy->act_cnt[j].modify_hdr->action;
17740                        } else {
17741                                acts.dv_actions[0] = mtr_policy->act_cnt[j].modify_hdr->action;
17742                                acts.dv_actions[1] = mtr_action;
17743                        }
17744                        acts.dv_actions[2] = tbl_data->jump.action;
17745                        acts.actions_n = 3;
17746                        if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17747                                                MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17748                                                &attr, true, item, &color_rule->matcher, error)) {
17749                                rte_spinlock_unlock(&mtr_policy->sl);
17750                                rte_flow_error_set(error, errno,
17751                                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17752                                                   "Failed to create hierarchy meter matcher.");
17753                                goto err_exit;
17754                        }
17755                        if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j,
17756                                                color_rule->matcher->matcher_object,
17757                                                acts.actions_n, acts.dv_actions,
17758                                                true, item, &color_rule->rule, &attr)) {
17759                                rte_spinlock_unlock(&mtr_policy->sl);
17760                                rte_flow_error_set(error, errno,
17761                                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17762                                                   "Failed to create hierarchy meter rule.");
17763                                goto err_exit;
17764                        }
17765                }
17766                rte_spinlock_unlock(&mtr_policy->sl);
17767        }
17768        return 0;
17769err_exit:
17770        for (i = 0; i < fm_cnt; i++) {
17771                mtr_policy = fm_info[i].fm_policy;
17772                rte_spinlock_lock(&mtr_policy->sl);
17773                sub_policy = mtr_policy->sub_policys[domain][0];
17774                for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
17775                        color_rule = fm_info[i].tag_rule[j];
17776                        if (!color_rule)
17777                                continue;
17778                        if (color_rule->rule)
17779                                mlx5_flow_os_destroy_flow(color_rule->rule);
17780                        if (color_rule->matcher) {
17781                                struct mlx5_flow_tbl_data_entry *tbl =
17782                                        container_of(color_rule->matcher->tbl, typeof(*tbl), tbl);
17783                                mlx5_list_unregister(tbl->matchers, &color_rule->matcher->entry);
17784                        }
17785                        if (fm_info[i].next_fm)
17786                                mlx5_flow_meter_detach(priv, fm_info[i].next_fm);
17787                        TAILQ_REMOVE(&sub_policy->color_rules[j], color_rule, next_port);
17788                        mlx5_free(color_rule);
17789                }
17790                rte_spinlock_unlock(&mtr_policy->sl);
17791        }
17792        return -rte_errno;
17793}
17794
17795/**
17796 * Destroy the sub policy table with RX queue.
17797 *
17798 * @param[in] dev
17799 *   Pointer to Ethernet device.
17800 * @param[in] mtr_policy
17801 *   Pointer to meter policy table.
17802 */
17803static void
17804flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17805                                    struct mlx5_flow_meter_policy *mtr_policy)
17806{
17807        struct mlx5_priv *priv = dev->data->dev_private;
17808        struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17809        uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17810        uint32_t i, j;
17811        uint16_t sub_policy_num, new_policy_num;
17812
17813        rte_spinlock_lock(&mtr_policy->sl);
17814        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17815                switch (mtr_policy->act_cnt[i].fate_action) {
17816                case MLX5_FLOW_FATE_SHARED_RSS:
17817                        sub_policy_num = (mtr_policy->sub_policy_num >>
17818                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17819                        MLX5_MTR_SUB_POLICY_NUM_MASK;
17820                        new_policy_num = sub_policy_num;
17821                        for (j = 0; j < sub_policy_num; j++) {
17822                                sub_policy =
17823                                        mtr_policy->sub_policys[domain][j];
17824                                if (sub_policy) {
17825                                        __flow_dv_destroy_sub_policy_rules(dev,
17826                                                sub_policy);
17827                                if (sub_policy !=
17828                                        mtr_policy->sub_policys[domain][0]) {
17829                                        mtr_policy->sub_policys[domain][j] =
17830                                                                NULL;
17831                                        mlx5_ipool_free
17832                                (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17833                                                sub_policy->idx);
17834                                                new_policy_num--;
17835                                        }
17836                                }
17837                        }
17838                        if (new_policy_num != sub_policy_num) {
17839                                mtr_policy->sub_policy_num &=
17840                                ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17841                                (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17842                                mtr_policy->sub_policy_num |=
17843                                (new_policy_num &
17844                                        MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17845                                (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17846                        }
17847                        break;
17848                case MLX5_FLOW_FATE_QUEUE:
17849                        sub_policy = mtr_policy->sub_policys[domain][0];
17850                        __flow_dv_destroy_sub_policy_rules(dev,
17851                                                           sub_policy);
17852                        break;
17853                default:
17854                        /*Other actions without queue and do nothing*/
17855                        break;
17856                }
17857        }
17858        rte_spinlock_unlock(&mtr_policy->sl);
17859}
17860/**
17861 * Check whether the DR drop action is supported on the root table or not.
17862 *
17863 * Create a simple flow with DR drop action on root table to validate
17864 * if DR drop action on root table is supported or not.
17865 *
17866 * @param[in] dev
17867 *   Pointer to rte_eth_dev structure.
17868 *
17869 * @return
17870 *   0 on success, a negative errno value otherwise and rte_errno is set.
17871 */
17872int
17873mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17874{
17875        struct mlx5_priv *priv = dev->data->dev_private;
17876        struct mlx5_dev_ctx_shared *sh = priv->sh;
17877        struct mlx5_flow_dv_match_params mask = {
17878                .size = sizeof(mask.buf),
17879        };
17880        struct mlx5_flow_dv_match_params value = {
17881                .size = sizeof(value.buf),
17882        };
17883        struct mlx5dv_flow_matcher_attr dv_attr = {
17884                .type = IBV_FLOW_ATTR_NORMAL,
17885                .priority = 0,
17886                .match_criteria_enable = 0,
17887                .match_mask = (void *)&mask,
17888        };
17889        struct mlx5_flow_tbl_resource *tbl = NULL;
17890        void *matcher = NULL;
17891        void *flow = NULL;
17892        int ret = -1;
17893
17894        tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17895                                        0, 0, 0, NULL);
17896        if (!tbl)
17897                goto err;
17898        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17899        __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17900        ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17901                                               tbl->obj, &matcher);
17902        if (ret)
17903                goto err;
17904        __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17905        ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17906                                       &sh->dr_drop_action, &flow);
17907err:
17908        /*
17909         * If DR drop action is not supported on root table, flow create will
17910         * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17911         */
17912        if (!flow) {
17913                if (matcher &&
17914                    (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17915                        DRV_LOG(INFO, "DR drop action is not supported in root table.");
17916                else
17917                        DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17918                ret = -1;
17919        } else {
17920                claim_zero(mlx5_flow_os_destroy_flow(flow));
17921        }
17922        if (matcher)
17923                claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17924        if (tbl)
17925                flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17926        return ret;
17927}
17928
17929/**
17930 * Validate the batch counter support in root table.
17931 *
17932 * Create a simple flow with invalid counter and drop action on root table to
17933 * validate if batch counter with offset on root table is supported or not.
17934 *
17935 * @param[in] dev
17936 *   Pointer to rte_eth_dev structure.
17937 *
17938 * @return
17939 *   0 on success, a negative errno value otherwise and rte_errno is set.
17940 */
17941int
17942mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17943{
17944        struct mlx5_priv *priv = dev->data->dev_private;
17945        struct mlx5_dev_ctx_shared *sh = priv->sh;
17946        struct mlx5_flow_dv_match_params mask = {
17947                .size = sizeof(mask.buf),
17948        };
17949        struct mlx5_flow_dv_match_params value = {
17950                .size = sizeof(value.buf),
17951        };
17952        struct mlx5dv_flow_matcher_attr dv_attr = {
17953                .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17954                .priority = 0,
17955                .match_criteria_enable = 0,
17956                .match_mask = (void *)&mask,
17957        };
17958        void *actions[2] = { 0 };
17959        struct mlx5_flow_tbl_resource *tbl = NULL;
17960        struct mlx5_devx_obj *dcs = NULL;
17961        void *matcher = NULL;
17962        void *flow = NULL;
17963        int ret = -1;
17964
17965        tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17966                                        0, 0, 0, NULL);
17967        if (!tbl)
17968                goto err;
17969        dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17970        if (!dcs)
17971                goto err;
17972        ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17973                                                    &actions[0]);
17974        if (ret)
17975                goto err;
17976        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17977        __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17978        ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17979                                               tbl->obj, &matcher);
17980        if (ret)
17981                goto err;
17982        __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17983        ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17984                                       actions, &flow);
17985err:
17986        /*
17987         * If batch counter with offset is not supported, the driver will not
17988         * validate the invalid offset value, flow create should success.
17989         * In this case, it means batch counter is not supported in root table.
17990         *
17991         * Otherwise, if flow create is failed, counter offset is supported.
17992         */
17993        if (flow) {
17994                DRV_LOG(INFO, "Batch counter is not supported in root "
17995                              "table. Switch to fallback mode.");
17996                rte_errno = ENOTSUP;
17997                ret = -rte_errno;
17998                claim_zero(mlx5_flow_os_destroy_flow(flow));
17999        } else {
18000                /* Check matcher to make sure validate fail at flow create. */
18001                if (!matcher || (matcher && errno != EINVAL))
18002                        DRV_LOG(ERR, "Unexpected error in counter offset "
18003                                     "support detection");
18004                ret = 0;
18005        }
18006        if (actions[0])
18007                claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
18008        if (matcher)
18009                claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
18010        if (tbl)
18011                flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
18012        if (dcs)
18013                claim_zero(mlx5_devx_cmd_destroy(dcs));
18014        return ret;
18015}
18016
18017/**
18018 * Query a devx counter.
18019 *
18020 * @param[in] dev
18021 *   Pointer to the Ethernet device structure.
18022 * @param[in] cnt
18023 *   Index to the flow counter.
18024 * @param[in] clear
18025 *   Set to clear the counter statistics.
18026 * @param[out] pkts
18027 *   The statistics value of packets.
18028 * @param[out] bytes
18029 *   The statistics value of bytes.
18030 *
18031 * @return
18032 *   0 on success, otherwise return -1.
18033 */
18034static int
18035flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
18036                      uint64_t *pkts, uint64_t *bytes, void **action)
18037{
18038        struct mlx5_priv *priv = dev->data->dev_private;
18039        struct mlx5_flow_counter *cnt;
18040        uint64_t inn_pkts, inn_bytes;
18041        int ret;
18042
18043        if (!priv->sh->cdev->config.devx)
18044                return -1;
18045
18046        ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
18047        if (ret)
18048                return -1;
18049        cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
18050        if (cnt && action)
18051                *action = cnt->action;
18052
18053        *pkts = inn_pkts - cnt->hits;
18054        *bytes = inn_bytes - cnt->bytes;
18055        if (clear) {
18056                cnt->hits = inn_pkts;
18057                cnt->bytes = inn_bytes;
18058        }
18059        return 0;
18060}
18061
18062/**
18063 * Get aged-out flows.
18064 *
18065 * @param[in] dev
18066 *   Pointer to the Ethernet device structure.
18067 * @param[in] context
18068 *   The address of an array of pointers to the aged-out flows contexts.
18069 * @param[in] nb_contexts
18070 *   The length of context array pointers.
18071 * @param[out] error
18072 *   Perform verbose error reporting if not NULL. Initialized in case of
18073 *   error only.
18074 *
18075 * @return
18076 *   how many contexts get in success, otherwise negative errno value.
18077 *   if nb_contexts is 0, return the amount of all aged contexts.
18078 *   if nb_contexts is not 0 , return the amount of aged flows reported
18079 *   in the context array.
18080 * @note: only stub for now
18081 */
18082static int
18083flow_dv_get_aged_flows(struct rte_eth_dev *dev,
18084                    void **context,
18085                    uint32_t nb_contexts,
18086                    struct rte_flow_error *error)
18087{
18088        struct mlx5_priv *priv = dev->data->dev_private;
18089        struct mlx5_age_info *age_info;
18090        struct mlx5_age_param *age_param;
18091        struct mlx5_flow_counter *counter;
18092        struct mlx5_aso_age_action *act;
18093        int nb_flows = 0;
18094
18095        if (nb_contexts && !context)
18096                return rte_flow_error_set(error, EINVAL,
18097                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
18098                                          NULL, "empty context");
18099        age_info = GET_PORT_AGE_INFO(priv);
18100        rte_spinlock_lock(&age_info->aged_sl);
18101        LIST_FOREACH(act, &age_info->aged_aso, next) {
18102                nb_flows++;
18103                if (nb_contexts) {
18104                        context[nb_flows - 1] =
18105                                                act->age_params.context;
18106                        if (!(--nb_contexts))
18107                                break;
18108                }
18109        }
18110        TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
18111                nb_flows++;
18112                if (nb_contexts) {
18113                        age_param = MLX5_CNT_TO_AGE(counter);
18114                        context[nb_flows - 1] = age_param->context;
18115                        if (!(--nb_contexts))
18116                                break;
18117                }
18118        }
18119        rte_spinlock_unlock(&age_info->aged_sl);
18120        MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
18121        return nb_flows;
18122}
18123
18124/*
18125 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
18126 */
18127static uint32_t
18128flow_dv_counter_allocate(struct rte_eth_dev *dev)
18129{
18130        return flow_dv_counter_alloc(dev, 0);
18131}
18132
18133/**
18134 * Validate indirect action.
18135 * Dispatcher for action type specific validation.
18136 *
18137 * @param[in] dev
18138 *   Pointer to the Ethernet device structure.
18139 * @param[in] conf
18140 *   Indirect action configuration.
18141 * @param[in] action
18142 *   The indirect action object to validate.
18143 * @param[out] error
18144 *   Perform verbose error reporting if not NULL. Initialized in case of
18145 *   error only.
18146 *
18147 * @return
18148 *   0 on success, otherwise negative errno value.
18149 */
18150int
18151flow_dv_action_validate(struct rte_eth_dev *dev,
18152                        const struct rte_flow_indir_action_conf *conf,
18153                        const struct rte_flow_action *action,
18154                        struct rte_flow_error *err)
18155{
18156        struct mlx5_priv *priv = dev->data->dev_private;
18157
18158        RTE_SET_USED(conf);
18159        switch (action->type) {
18160        case RTE_FLOW_ACTION_TYPE_RSS:
18161                /*
18162                 * priv->obj_ops is set according to driver capabilities.
18163                 * When DevX capabilities are
18164                 * sufficient, it is set to devx_obj_ops.
18165                 * Otherwise, it is set to ibv_obj_ops.
18166                 * ibv_obj_ops doesn't support ind_table_modify operation.
18167                 * In this case the indirect RSS action can't be used.
18168                 */
18169                if (priv->obj_ops.ind_table_modify == NULL)
18170                        return rte_flow_error_set
18171                                        (err, ENOTSUP,
18172                                         RTE_FLOW_ERROR_TYPE_ACTION,
18173                                         NULL,
18174                                         "Indirect RSS action not supported");
18175                return mlx5_validate_action_rss(dev, action, err);
18176        case RTE_FLOW_ACTION_TYPE_AGE:
18177                if (!priv->sh->aso_age_mng)
18178                        return rte_flow_error_set(err, ENOTSUP,
18179                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
18180                                                NULL,
18181                                                "Indirect age action not supported");
18182                return flow_dv_validate_action_age(0, action, dev, err);
18183        case RTE_FLOW_ACTION_TYPE_COUNT:
18184                return flow_dv_validate_action_count(dev, true, 0, NULL, err);
18185        case RTE_FLOW_ACTION_TYPE_CONNTRACK:
18186                if (!priv->sh->ct_aso_en)
18187                        return rte_flow_error_set(err, ENOTSUP,
18188                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
18189                                        "ASO CT is not supported");
18190                return mlx5_validate_action_ct(dev, action->conf, err);
18191        default:
18192                return rte_flow_error_set(err, ENOTSUP,
18193                                          RTE_FLOW_ERROR_TYPE_ACTION,
18194                                          NULL,
18195                                          "action type not supported");
18196        }
18197}
18198
18199/*
18200 * Check if the RSS configurations for colors of a meter policy match
18201 * each other, except the queues.
18202 *
18203 * @param[in] r1
18204 *   Pointer to the first RSS flow action.
18205 * @param[in] r2
18206 *   Pointer to the second RSS flow action.
18207 *
18208 * @return
18209 *   0 on match, 1 on conflict.
18210 */
18211static inline int
18212flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
18213                               const struct rte_flow_action_rss *r2)
18214{
18215        if (r1 == NULL || r2 == NULL)
18216                return 0;
18217        if (!(r1->level <= 1 && r2->level <= 1) &&
18218            !(r1->level > 1 && r2->level > 1))
18219                return 1;
18220        if (r1->types != r2->types &&
18221            !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
18222              (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
18223                return 1;
18224        if (r1->key || r2->key) {
18225                const void *key1 = r1->key ? r1->key : rss_hash_default_key;
18226                const void *key2 = r2->key ? r2->key : rss_hash_default_key;
18227
18228                if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
18229                        return 1;
18230        }
18231        return 0;
18232}
18233
18234/**
18235 * Validate the meter hierarchy chain for meter policy.
18236 *
18237 * @param[in] dev
18238 *   Pointer to the Ethernet device structure.
18239 * @param[in] meter_id
18240 *   Meter id.
18241 * @param[in] action_flags
18242 *   Holds the actions detected until now.
18243 * @param[out] is_rss
18244 *   Is RSS or not.
18245 * @param[out] hierarchy_domain
18246 *   The domain bitmap for hierarchy policy.
18247 * @param[out] error
18248 *   Perform verbose error reporting if not NULL. Initialized in case of
18249 *   error only.
18250 *
18251 * @return
18252 *   0 on success, otherwise negative errno value with error set.
18253 */
18254static int
18255flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
18256                                  uint32_t meter_id,
18257                                  uint64_t action_flags,
18258                                  bool *is_rss,
18259                                  uint8_t *hierarchy_domain,
18260                                  struct rte_mtr_error *error)
18261{
18262        struct mlx5_priv *priv = dev->data->dev_private;
18263        struct mlx5_flow_meter_info *fm;
18264        struct mlx5_flow_meter_policy *policy;
18265        uint8_t cnt = 1;
18266
18267        if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
18268                            MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18269                return -rte_mtr_error_set(error, EINVAL,
18270                                        RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
18271                                        NULL,
18272                                        "Multiple fate actions not supported.");
18273        *hierarchy_domain = 0;
18274        fm = mlx5_flow_meter_find(priv, meter_id, NULL);
18275        while (true) {
18276                if (!fm)
18277                        return -rte_mtr_error_set(error, EINVAL,
18278                                                RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18279                                        "Meter not found in meter hierarchy.");
18280                if (fm->def_policy)
18281                        return -rte_mtr_error_set(error, EINVAL,
18282                                        RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18283                        "Non termination meter not supported in hierarchy.");
18284                if (!fm->shared)
18285                        return -rte_mtr_error_set(error, EINVAL,
18286                                        RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18287                                        "Only shared meter supported in hierarchy.");
18288                policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
18289                MLX5_ASSERT(policy);
18290                /**
18291                 * Only inherit the supported domains of the first meter in
18292                 * hierarchy.
18293                 * One meter supports at least one domain.
18294                 */
18295                if (!*hierarchy_domain) {
18296                        if (policy->transfer)
18297                                *hierarchy_domain |=
18298                                                MLX5_MTR_DOMAIN_TRANSFER_BIT;
18299                        if (policy->ingress)
18300                                *hierarchy_domain |=
18301                                                MLX5_MTR_DOMAIN_INGRESS_BIT;
18302                        if (policy->egress)
18303                                *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
18304                }
18305                if (!policy->is_hierarchy) {
18306                        *is_rss = policy->is_rss;
18307                        break;
18308                }
18309                rte_spinlock_lock(&policy->sl);
18310                fm = mlx5_flow_meter_hierarchy_next_meter(priv, policy, NULL);
18311                rte_spinlock_unlock(&policy->sl);
18312                if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
18313                        return -rte_mtr_error_set(error, EINVAL,
18314                                        RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
18315                                        "Exceed max hierarchy meter number.");
18316        }
18317        return 0;
18318}
18319
18320/**
18321 * Validate meter policy actions.
18322 * Dispatcher for action type specific validation.
18323 *
18324 * @param[in] dev
18325 *   Pointer to the Ethernet device structure.
18326 * @param[in] action
18327 *   The meter policy action object to validate.
18328 * @param[in] attr
18329 *   Attributes of flow to determine steering domain.
18330 * @param[out] error
18331 *   Perform verbose error reporting if not NULL. Initialized in case of
18332 *   error only.
18333 *
18334 * @return
18335 *   0 on success, otherwise negative errno value.
18336 */
18337static int
18338flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
18339                        const struct rte_flow_action *actions[RTE_COLORS],
18340                        struct rte_flow_attr *attr,
18341                        bool *is_rss,
18342                        uint8_t *domain_bitmap,
18343                        uint8_t *policy_mode,
18344                        struct rte_mtr_error *error)
18345{
18346        struct mlx5_priv *priv = dev->data->dev_private;
18347        struct mlx5_sh_config *dev_conf = &priv->sh->config;
18348        const struct rte_flow_action *act;
18349        uint64_t action_flags[RTE_COLORS] = {0};
18350        int actions_n;
18351        int i, ret;
18352        struct rte_flow_error flow_err;
18353        uint8_t domain_color[RTE_COLORS] = {0};
18354        uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
18355        uint8_t hierarchy_domain = 0;
18356        const struct rte_flow_action_meter *mtr;
18357        const struct rte_flow_action_meter *next_mtr = NULL;
18358        bool def_green = false;
18359        bool def_yellow = false;
18360        const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
18361
18362        if (!dev_conf->dv_esw_en)
18363                def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18364        *domain_bitmap = def_domain;
18365        /* Red color could only support DROP action. */
18366        if (!actions[RTE_COLOR_RED] ||
18367            actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
18368                return -rte_mtr_error_set(error, ENOTSUP,
18369                                RTE_MTR_ERROR_TYPE_METER_POLICY,
18370                                NULL, "Red color only supports drop action.");
18371        /*
18372         * Check default policy actions:
18373         * Green / Yellow: no action, Red: drop action
18374         * Either G or Y will trigger default policy actions to be created.
18375         */
18376        if (!actions[RTE_COLOR_GREEN] ||
18377            actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
18378                def_green = true;
18379        if (!actions[RTE_COLOR_YELLOW] ||
18380            actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
18381                def_yellow = true;
18382        if (def_green && def_yellow) {
18383                *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
18384                return 0;
18385        } else if (!def_green && def_yellow) {
18386                *policy_mode = MLX5_MTR_POLICY_MODE_OG;
18387        } else if (def_green && !def_yellow) {
18388                *policy_mode = MLX5_MTR_POLICY_MODE_OY;
18389        } else {
18390                *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
18391        }
18392        /* Set to empty string in case of NULL pointer access by user. */
18393        flow_err.message = "";
18394        for (i = 0; i < RTE_COLORS; i++) {
18395                act = actions[i];
18396                for (action_flags[i] = 0, actions_n = 0;
18397                     act && act->type != RTE_FLOW_ACTION_TYPE_END;
18398                     act++) {
18399                        if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
18400                                return -rte_mtr_error_set(error, ENOTSUP,
18401                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18402                                          NULL, "too many actions");
18403                        switch (act->type) {
18404                        case RTE_FLOW_ACTION_TYPE_PORT_ID:
18405                        case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
18406                                if (!dev_conf->dv_esw_en)
18407                                        return -rte_mtr_error_set(error,
18408                                        ENOTSUP,
18409                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18410                                        NULL, "PORT action validate check"
18411                                        " fail for ESW disable");
18412                                ret = flow_dv_validate_action_port_id(dev,
18413                                                action_flags[i],
18414                                                act, attr, &flow_err);
18415                                if (ret)
18416                                        return -rte_mtr_error_set(error,
18417                                        ENOTSUP,
18418                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18419                                        NULL, flow_err.message ?
18420                                        flow_err.message :
18421                                        "PORT action validate check fail");
18422                                ++actions_n;
18423                                action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
18424                                break;
18425                        case RTE_FLOW_ACTION_TYPE_MARK:
18426                                ret = flow_dv_validate_action_mark(dev, act,
18427                                                           action_flags[i],
18428                                                           attr, &flow_err);
18429                                if (ret < 0)
18430                                        return -rte_mtr_error_set(error,
18431                                        ENOTSUP,
18432                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18433                                        NULL, flow_err.message ?
18434                                        flow_err.message :
18435                                        "Mark action validate check fail");
18436                                if (dev_conf->dv_xmeta_en !=
18437                                        MLX5_XMETA_MODE_LEGACY)
18438                                        return -rte_mtr_error_set(error,
18439                                        ENOTSUP,
18440                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18441                                        NULL, "Extend MARK action is "
18442                                        "not supported. Please try use "
18443                                        "default policy for meter.");
18444                                action_flags[i] |= MLX5_FLOW_ACTION_MARK;
18445                                ++actions_n;
18446                                break;
18447                        case RTE_FLOW_ACTION_TYPE_SET_TAG:
18448                                ret = flow_dv_validate_action_set_tag(dev,
18449                                                        act, action_flags[i],
18450                                                        attr, &flow_err);
18451                                if (ret)
18452                                        return -rte_mtr_error_set(error,
18453                                        ENOTSUP,
18454                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18455                                        NULL, flow_err.message ?
18456                                        flow_err.message :
18457                                        "Set tag action validate check fail");
18458                                action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
18459                                ++actions_n;
18460                                break;
18461                        case RTE_FLOW_ACTION_TYPE_DROP:
18462                                ret = mlx5_flow_validate_action_drop
18463                                        (action_flags[i], attr, &flow_err);
18464                                if (ret < 0)
18465                                        return -rte_mtr_error_set(error,
18466                                        ENOTSUP,
18467                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18468                                        NULL, flow_err.message ?
18469                                        flow_err.message :
18470                                        "Drop action validate check fail");
18471                                action_flags[i] |= MLX5_FLOW_ACTION_DROP;
18472                                ++actions_n;
18473                                break;
18474                        case RTE_FLOW_ACTION_TYPE_QUEUE:
18475                                /*
18476                                 * Check whether extensive
18477                                 * metadata feature is engaged.
18478                                 */
18479                                if (dev_conf->dv_flow_en &&
18480                                    (dev_conf->dv_xmeta_en !=
18481                                     MLX5_XMETA_MODE_LEGACY) &&
18482                                    mlx5_flow_ext_mreg_supported(dev))
18483                                        return -rte_mtr_error_set(error,
18484                                          ENOTSUP,
18485                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18486                                          NULL, "Queue action with meta "
18487                                          "is not supported. Please try use "
18488                                          "default policy for meter.");
18489                                ret = mlx5_flow_validate_action_queue(act,
18490                                                        action_flags[i], dev,
18491                                                        attr, &flow_err);
18492                                if (ret < 0)
18493                                        return -rte_mtr_error_set(error,
18494                                          ENOTSUP,
18495                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18496                                          NULL, flow_err.message ?
18497                                          flow_err.message :
18498                                          "Queue action validate check fail");
18499                                action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
18500                                ++actions_n;
18501                                break;
18502                        case RTE_FLOW_ACTION_TYPE_RSS:
18503                                if (dev_conf->dv_flow_en &&
18504                                    (dev_conf->dv_xmeta_en !=
18505                                     MLX5_XMETA_MODE_LEGACY) &&
18506                                    mlx5_flow_ext_mreg_supported(dev))
18507                                        return -rte_mtr_error_set(error,
18508                                          ENOTSUP,
18509                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18510                                          NULL, "RSS action with meta "
18511                                          "is not supported. Please try use "
18512                                          "default policy for meter.");
18513                                ret = mlx5_validate_action_rss(dev, act,
18514                                                               &flow_err);
18515                                if (ret < 0)
18516                                        return -rte_mtr_error_set(error,
18517                                          ENOTSUP,
18518                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18519                                          NULL, flow_err.message ?
18520                                          flow_err.message :
18521                                          "RSS action validate check fail");
18522                                action_flags[i] |= MLX5_FLOW_ACTION_RSS;
18523                                ++actions_n;
18524                                /* Either G or Y will set the RSS. */
18525                                rss_color[i] = act->conf;
18526                                break;
18527                        case RTE_FLOW_ACTION_TYPE_JUMP:
18528                                ret = flow_dv_validate_action_jump(dev,
18529                                        NULL, act, action_flags[i],
18530                                        attr, true, &flow_err);
18531                                if (ret)
18532                                        return -rte_mtr_error_set(error,
18533                                          ENOTSUP,
18534                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18535                                          NULL, flow_err.message ?
18536                                          flow_err.message :
18537                                          "Jump action validate check fail");
18538                                ++actions_n;
18539                                action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
18540                                break;
18541                        case RTE_FLOW_ACTION_TYPE_METER:
18542                                mtr = act->conf;
18543                                if (next_mtr && next_mtr->mtr_id != mtr->mtr_id)
18544                                        return -rte_mtr_error_set(error, ENOTSUP,
18545                                                RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
18546                                                "Green and Yellow must use the same meter.");
18547                                ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18548                                                        mtr->mtr_id,
18549                                                        action_flags[i],
18550                                                        is_rss,
18551                                                        &hierarchy_domain,
18552                                                        error);
18553                                if (ret)
18554                                        return ret;
18555                                ++actions_n;
18556                                action_flags[i] |=
18557                                MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18558                                next_mtr = mtr;
18559                                break;
18560                        case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
18561                                ret = flow_dv_validate_action_modify_field(dev,
18562                                        action_flags[i], act, attr, &flow_err);
18563                                if (ret < 0)
18564                                        return -rte_mtr_error_set(error,
18565                                          ENOTSUP,
18566                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18567                                          NULL, flow_err.message ?
18568                                          flow_err.message :
18569                                          "Modify field action validate check fail");
18570                                ++actions_n;
18571                                action_flags[i] |= MLX5_FLOW_ACTION_MODIFY_FIELD;
18572                                break;
18573                        default:
18574                                return -rte_mtr_error_set(error, ENOTSUP,
18575                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18576                                        NULL,
18577                                        "Doesn't support optional action");
18578                        }
18579                }
18580                if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18581                        domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18582                } else if ((action_flags[i] &
18583                          (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18584                          (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18585                        /*
18586                         * Only support MLX5_XMETA_MODE_LEGACY
18587                         * so MARK action is only in ingress domain.
18588                         */
18589                        domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18590                } else {
18591                        domain_color[i] = def_domain;
18592                        if (action_flags[i] &&
18593                            !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18594                                domain_color[i] &=
18595                                ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18596                }
18597                if (action_flags[i] &
18598                    MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18599                        domain_color[i] &= hierarchy_domain;
18600                /*
18601                 * Non-termination actions only support NIC Tx domain.
18602                 * The adjustion should be skipped when there is no
18603                 * action or only END is provided. The default domains
18604                 * bit-mask is set to find the MIN intersection.
18605                 * The action flags checking should also be skipped.
18606                 */
18607                if ((def_green && i == RTE_COLOR_GREEN) ||
18608                    (def_yellow && i == RTE_COLOR_YELLOW))
18609                        continue;
18610                /*
18611                 * Validate the drop action mutual exclusion
18612                 * with other actions. Drop action is mutually-exclusive
18613                 * with any other action, except for Count action.
18614                 */
18615                if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18616                    (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18617                        return -rte_mtr_error_set(error, ENOTSUP,
18618                                RTE_MTR_ERROR_TYPE_METER_POLICY,
18619                                NULL, "Drop action is mutually-exclusive "
18620                                "with any other action");
18621                }
18622                /* Eswitch has few restrictions on using items and actions */
18623                if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18624                        if (!mlx5_flow_ext_mreg_supported(dev) &&
18625                            action_flags[i] & MLX5_FLOW_ACTION_MARK)
18626                                return -rte_mtr_error_set(error, ENOTSUP,
18627                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18628                                        NULL, "unsupported action MARK");
18629                        if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18630                                return -rte_mtr_error_set(error, ENOTSUP,
18631                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18632                                        NULL, "unsupported action QUEUE");
18633                        if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18634                                return -rte_mtr_error_set(error, ENOTSUP,
18635                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18636                                        NULL, "unsupported action RSS");
18637                        if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18638                                return -rte_mtr_error_set(error, ENOTSUP,
18639                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
18640                                        NULL, "no fate action is found");
18641                } else {
18642                        if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18643                            (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18644                                if ((domain_color[i] &
18645                                     MLX5_MTR_DOMAIN_EGRESS_BIT))
18646                                        domain_color[i] =
18647                                                MLX5_MTR_DOMAIN_EGRESS_BIT;
18648                                else
18649                                        return -rte_mtr_error_set(error,
18650                                                ENOTSUP,
18651                                                RTE_MTR_ERROR_TYPE_METER_POLICY,
18652                                                NULL,
18653                                                "no fate action is found");
18654                        }
18655                }
18656        }
18657        if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) {
18658                if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] &
18659                      MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY))
18660                        return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY,
18661                                                  NULL,
18662                                                  "Meter hierarchy supports meter action only.");
18663        }
18664        /* If both colors have RSS, the attributes should be the same. */
18665        if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18666                                           rss_color[RTE_COLOR_YELLOW]))
18667                return -rte_mtr_error_set(error, EINVAL,
18668                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18669                                          NULL, "policy RSS attr conflict");
18670        if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18671                *is_rss = true;
18672        /* "domain_color[C]" is non-zero for each color, default is ALL. */
18673        if (!def_green && !def_yellow &&
18674            domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18675            !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18676            !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18677                return -rte_mtr_error_set(error, EINVAL,
18678                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
18679                                          NULL, "policy domains conflict");
18680        /*
18681         * At least one color policy is listed in the actions, the domains
18682         * to be supported should be the intersection.
18683         */
18684        *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18685                         domain_color[RTE_COLOR_YELLOW];
18686        return 0;
18687}
18688
18689static int
18690flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18691{
18692        struct mlx5_priv *priv = dev->data->dev_private;
18693        int ret = 0;
18694
18695        if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18696                ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18697                                                flags);
18698                if (ret != 0)
18699                        return ret;
18700        }
18701        if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18702                ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18703                if (ret != 0)
18704                        return ret;
18705        }
18706        if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18707                ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18708                if (ret != 0)
18709                        return ret;
18710        }
18711        return 0;
18712}
18713
18714/**
18715 * Discover the number of available flow priorities
18716 * by trying to create a flow with the highest priority value
18717 * for each possible number.
18718 *
18719 * @param[in] dev
18720 *   Ethernet device.
18721 * @param[in] vprio
18722 *   List of possible number of available priorities.
18723 * @param[in] vprio_n
18724 *   Size of @p vprio array.
18725 * @return
18726 *   On success, number of available flow priorities.
18727 *   On failure, a negative errno-style code and rte_errno is set.
18728 */
18729static int
18730flow_dv_discover_priorities(struct rte_eth_dev *dev,
18731                            const uint16_t *vprio, int vprio_n)
18732{
18733        struct mlx5_priv *priv = dev->data->dev_private;
18734        struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18735        struct rte_flow_item_eth eth;
18736        struct rte_flow_item item = {
18737                .type = RTE_FLOW_ITEM_TYPE_ETH,
18738                .spec = &eth,
18739                .mask = &eth,
18740        };
18741        struct mlx5_flow_dv_matcher matcher = {
18742                .mask = {
18743                        .size = sizeof(matcher.mask.buf),
18744                },
18745        };
18746        union mlx5_flow_tbl_key tbl_key;
18747        struct mlx5_flow flow;
18748        void *action;
18749        struct rte_flow_error error;
18750        uint8_t misc_mask;
18751        int i, err, ret = -ENOTSUP;
18752
18753        /*
18754         * Prepare a flow with a catch-all pattern and a drop action.
18755         * Use drop queue, because shared drop action may be unavailable.
18756         */
18757        action = priv->drop_queue.hrxq->action;
18758        if (action == NULL) {
18759                DRV_LOG(ERR, "Priority discovery requires a drop action");
18760                rte_errno = ENOTSUP;
18761                return -rte_errno;
18762        }
18763        memset(&flow, 0, sizeof(flow));
18764        flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18765        if (flow.handle == NULL) {
18766                DRV_LOG(ERR, "Cannot create flow handle");
18767                rte_errno = ENOMEM;
18768                return -rte_errno;
18769        }
18770        flow.ingress = true;
18771        flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18772        flow.dv.actions[0] = action;
18773        flow.dv.actions_n = 1;
18774        memset(&eth, 0, sizeof(eth));
18775        flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18776                                   &item, /* inner */ false, /* group */ 0);
18777        matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18778        for (i = 0; i < vprio_n; i++) {
18779                /* Configure the next proposed maximum priority. */
18780                matcher.priority = vprio[i] - 1;
18781                memset(&tbl_key, 0, sizeof(tbl_key));
18782                err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18783                                               /* tunnel */ NULL,
18784                                               /* group */ 0,
18785                                               &error);
18786                if (err != 0) {
18787                        /* This action is pure SW and must always succeed. */
18788                        DRV_LOG(ERR, "Cannot register matcher");
18789                        ret = -rte_errno;
18790                        break;
18791                }
18792                /* Try to apply the flow to HW. */
18793                misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18794                __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18795                err = mlx5_flow_os_create_flow
18796                                (flow.handle->dvh.matcher->matcher_object,
18797                                 (void *)&flow.dv.value, flow.dv.actions_n,
18798                                 flow.dv.actions, &flow.handle->drv_flow);
18799                if (err == 0) {
18800                        claim_zero(mlx5_flow_os_destroy_flow
18801                                                (flow.handle->drv_flow));
18802                        flow.handle->drv_flow = NULL;
18803                }
18804                claim_zero(flow_dv_matcher_release(dev, flow.handle));
18805                if (err != 0)
18806                        break;
18807                ret = vprio[i];
18808        }
18809        mlx5_ipool_free(pool, flow.handle_idx);
18810        /* Set rte_errno if no expected priority value matched. */
18811        if (ret < 0)
18812                rte_errno = -ret;
18813        return ret;
18814}
18815
18816const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18817        .validate = flow_dv_validate,
18818        .prepare = flow_dv_prepare,
18819        .translate = flow_dv_translate,
18820        .apply = flow_dv_apply,
18821        .remove = flow_dv_remove,
18822        .destroy = flow_dv_destroy,
18823        .query = flow_dv_query,
18824        .create_mtr_tbls = flow_dv_create_mtr_tbls,
18825        .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18826        .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18827        .create_meter = flow_dv_mtr_alloc,
18828        .free_meter = flow_dv_aso_mtr_release_to_pool,
18829        .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18830        .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18831        .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18832        .create_policy_rules = flow_dv_create_policy_rules,
18833        .destroy_policy_rules = flow_dv_destroy_policy_rules,
18834        .create_def_policy = flow_dv_create_def_policy,
18835        .destroy_def_policy = flow_dv_destroy_def_policy,
18836        .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18837        .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18838        .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18839        .counter_alloc = flow_dv_counter_allocate,
18840        .counter_free = flow_dv_counter_free,
18841        .counter_query = flow_dv_counter_query,
18842        .get_aged_flows = flow_dv_get_aged_flows,
18843        .action_validate = flow_dv_action_validate,
18844        .action_create = flow_dv_action_create,
18845        .action_destroy = flow_dv_action_destroy,
18846        .action_update = flow_dv_action_update,
18847        .action_query = flow_dv_action_query,
18848        .sync_domain = flow_dv_sync_domain,
18849        .discover_priorities = flow_dv_discover_priorities,
18850        .item_create = flow_dv_item_create,
18851        .item_release = flow_dv_item_release,
18852};
18853
18854#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18855