dpdk/drivers/net/mlx5/mlx5_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright 2015 6WIND S.A.
   3 * Copyright 2015-2019 Mellanox Technologies, Ltd
   4 */
   5
   6#include <stdint.h>
   7#include <string.h>
   8#include <stdlib.h>
   9
  10#include <rte_mbuf.h>
  11#include <rte_mempool.h>
  12#include <rte_prefetch.h>
  13#include <rte_common.h>
  14#include <rte_branch_prediction.h>
  15#include <rte_ether.h>
  16#include <rte_cycles.h>
  17#include <rte_flow.h>
  18
  19#include <mlx5_prm.h>
  20#include <mlx5_common.h>
  21
  22#include "mlx5_autoconf.h"
  23#include "mlx5_defs.h"
  24#include "mlx5.h"
  25#include "mlx5_mr.h"
  26#include "mlx5_utils.h"
  27#include "mlx5_rxtx.h"
  28#include "mlx5_rx.h"
  29#include "mlx5_tx.h"
  30
  31/* static asserts */
  32static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
  33static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
  34static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  35                (sizeof(uint16_t) +
  36                 sizeof(rte_v128u32_t)),
  37                "invalid Ethernet Segment data size");
  38static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  39                (sizeof(uint16_t) +
  40                 sizeof(struct rte_vlan_hdr) +
  41                 2 * RTE_ETHER_ADDR_LEN),
  42                "invalid Ethernet Segment data size");
  43static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  44                (sizeof(uint16_t) +
  45                 sizeof(rte_v128u32_t)),
  46                "invalid Ethernet Segment data size");
  47static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  48                (sizeof(uint16_t) +
  49                 sizeof(struct rte_vlan_hdr) +
  50                 2 * RTE_ETHER_ADDR_LEN),
  51                "invalid Ethernet Segment data size");
  52static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  53                (sizeof(uint16_t) +
  54                 sizeof(rte_v128u32_t)),
  55                "invalid Ethernet Segment data size");
  56static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
  57                (sizeof(uint16_t) +
  58                 sizeof(struct rte_vlan_hdr) +
  59                 2 * RTE_ETHER_ADDR_LEN),
  60                "invalid Ethernet Segment data size");
  61static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
  62                (2 * RTE_ETHER_ADDR_LEN),
  63                "invalid Data Segment data size");
  64static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
  65static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
  66static_assert((sizeof(struct rte_vlan_hdr) +
  67                        sizeof(struct rte_ether_hdr)) ==
  68                MLX5_ESEG_MIN_INLINE_SIZE,
  69                "invalid min inline data size");
  70static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
  71                MLX5_DSEG_MAX, "invalid WQE max size");
  72static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
  73                "invalid WQE Control Segment size");
  74static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
  75                "invalid WQE Ethernet Segment size");
  76static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
  77                "invalid WQE Data Segment size");
  78static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
  79                "invalid WQE size");
  80
  81uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
  82        [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
  83};
  84
  85uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
  86uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
  87
  88uint64_t rte_net_mlx5_dynf_inline_mask;
  89
  90/**
  91 * Build a table to translate Rx completion flags to packet type.
  92 *
  93 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
  94 */
  95void
  96mlx5_set_ptype_table(void)
  97{
  98        unsigned int i;
  99        uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
 100
 101        /* Last entry must not be overwritten, reserved for errored packet. */
 102        for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
 103                (*p)[i] = RTE_PTYPE_UNKNOWN;
 104        /*
 105         * The index to the array should have:
 106         * bit[1:0] = l3_hdr_type
 107         * bit[4:2] = l4_hdr_type
 108         * bit[5] = ip_frag
 109         * bit[6] = tunneled
 110         * bit[7] = outer_l3_type
 111         */
 112        /* L2 */
 113        (*p)[0x00] = RTE_PTYPE_L2_ETHER;
 114        /* L3 */
 115        (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 116                     RTE_PTYPE_L4_NONFRAG;
 117        (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 118                     RTE_PTYPE_L4_NONFRAG;
 119        /* Fragmented */
 120        (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 121                     RTE_PTYPE_L4_FRAG;
 122        (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 123                     RTE_PTYPE_L4_FRAG;
 124        /* TCP */
 125        (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 126                     RTE_PTYPE_L4_TCP;
 127        (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 128                     RTE_PTYPE_L4_TCP;
 129        (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 130                     RTE_PTYPE_L4_TCP;
 131        (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 132                     RTE_PTYPE_L4_TCP;
 133        (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 134                     RTE_PTYPE_L4_TCP;
 135        (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 136                     RTE_PTYPE_L4_TCP;
 137        /* UDP */
 138        (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 139                     RTE_PTYPE_L4_UDP;
 140        (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 141                     RTE_PTYPE_L4_UDP;
 142        /* Repeat with outer_l3_type being set. Just in case. */
 143        (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 144                     RTE_PTYPE_L4_NONFRAG;
 145        (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 146                     RTE_PTYPE_L4_NONFRAG;
 147        (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 148                     RTE_PTYPE_L4_FRAG;
 149        (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 150                     RTE_PTYPE_L4_FRAG;
 151        (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 152                     RTE_PTYPE_L4_TCP;
 153        (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 154                     RTE_PTYPE_L4_TCP;
 155        (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 156                     RTE_PTYPE_L4_TCP;
 157        (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 158                     RTE_PTYPE_L4_TCP;
 159        (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 160                     RTE_PTYPE_L4_TCP;
 161        (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 162                     RTE_PTYPE_L4_TCP;
 163        (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 164                     RTE_PTYPE_L4_UDP;
 165        (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 166                     RTE_PTYPE_L4_UDP;
 167        /* Tunneled - L3 */
 168        (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
 169        (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 170                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 171                     RTE_PTYPE_INNER_L4_NONFRAG;
 172        (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 173                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 174                     RTE_PTYPE_INNER_L4_NONFRAG;
 175        (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
 176        (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 177                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 178                     RTE_PTYPE_INNER_L4_NONFRAG;
 179        (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 180                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 181                     RTE_PTYPE_INNER_L4_NONFRAG;
 182        /* Tunneled - Fragmented */
 183        (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 184                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 185                     RTE_PTYPE_INNER_L4_FRAG;
 186        (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 187                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 188                     RTE_PTYPE_INNER_L4_FRAG;
 189        (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 190                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 191                     RTE_PTYPE_INNER_L4_FRAG;
 192        (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 193                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 194                     RTE_PTYPE_INNER_L4_FRAG;
 195        /* Tunneled - TCP */
 196        (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 197                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 198                     RTE_PTYPE_INNER_L4_TCP;
 199        (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 200                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 201                     RTE_PTYPE_INNER_L4_TCP;
 202        (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 203                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 204                     RTE_PTYPE_INNER_L4_TCP;
 205        (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 206                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 207                     RTE_PTYPE_INNER_L4_TCP;
 208        (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 209                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 210                     RTE_PTYPE_INNER_L4_TCP;
 211        (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 212                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 213                     RTE_PTYPE_INNER_L4_TCP;
 214        (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 215                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 216                     RTE_PTYPE_INNER_L4_TCP;
 217        (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 218                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 219                     RTE_PTYPE_INNER_L4_TCP;
 220        (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 221                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 222                     RTE_PTYPE_INNER_L4_TCP;
 223        (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 224                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 225                     RTE_PTYPE_INNER_L4_TCP;
 226        (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 227                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 228                     RTE_PTYPE_INNER_L4_TCP;
 229        (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 230                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 231                     RTE_PTYPE_INNER_L4_TCP;
 232        /* Tunneled - UDP */
 233        (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 234                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 235                     RTE_PTYPE_INNER_L4_UDP;
 236        (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
 237                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 238                     RTE_PTYPE_INNER_L4_UDP;
 239        (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 240                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
 241                     RTE_PTYPE_INNER_L4_UDP;
 242        (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
 243                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
 244                     RTE_PTYPE_INNER_L4_UDP;
 245}
 246
 247/**
 248 * Build a table to translate packet to checksum type of Verbs.
 249 */
 250void
 251mlx5_set_cksum_table(void)
 252{
 253        unsigned int i;
 254        uint8_t v;
 255
 256        /*
 257         * The index should have:
 258         * bit[0] = PKT_TX_TCP_SEG
 259         * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
 260         * bit[4] = PKT_TX_IP_CKSUM
 261         * bit[8] = PKT_TX_OUTER_IP_CKSUM
 262         * bit[9] = tunnel
 263         */
 264        for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
 265                v = 0;
 266                if (i & (1 << 9)) {
 267                        /* Tunneled packet. */
 268                        if (i & (1 << 8)) /* Outer IP. */
 269                                v |= MLX5_ETH_WQE_L3_CSUM;
 270                        if (i & (1 << 4)) /* Inner IP. */
 271                                v |= MLX5_ETH_WQE_L3_INNER_CSUM;
 272                        if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
 273                                v |= MLX5_ETH_WQE_L4_INNER_CSUM;
 274                } else {
 275                        /* No tunnel. */
 276                        if (i & (1 << 4)) /* IP. */
 277                                v |= MLX5_ETH_WQE_L3_CSUM;
 278                        if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
 279                                v |= MLX5_ETH_WQE_L4_CSUM;
 280                }
 281                mlx5_cksum_table[i] = v;
 282        }
 283}
 284
 285/**
 286 * Build a table to translate packet type of mbuf to SWP type of Verbs.
 287 */
 288void
 289mlx5_set_swp_types_table(void)
 290{
 291        unsigned int i;
 292        uint8_t v;
 293
 294        /*
 295         * The index should have:
 296         * bit[0:1] = PKT_TX_L4_MASK
 297         * bit[4] = PKT_TX_IPV6
 298         * bit[8] = PKT_TX_OUTER_IPV6
 299         * bit[9] = PKT_TX_OUTER_UDP
 300         */
 301        for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
 302                v = 0;
 303                if (i & (1 << 8))
 304                        v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
 305                if (i & (1 << 9))
 306                        v |= MLX5_ETH_WQE_L4_OUTER_UDP;
 307                if (i & (1 << 4))
 308                        v |= MLX5_ETH_WQE_L3_INNER_IPV6;
 309                if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
 310                        v |= MLX5_ETH_WQE_L4_INNER_UDP;
 311                mlx5_swp_types_table[i] = v;
 312        }
 313}
 314
 315#define MLX5_SYSTEM_LOG_DIR "/var/log"
 316/**
 317 * Dump debug information to log file.
 318 *
 319 * @param fname
 320 *   The file name.
 321 * @param hex_title
 322 *   If not NULL this string is printed as a header to the output
 323 *   and the output will be in hexadecimal view.
 324 * @param buf
 325 *   This is the buffer address to print out.
 326 * @param len
 327 *   The number of bytes to dump out.
 328 */
 329void
 330mlx5_dump_debug_information(const char *fname, const char *hex_title,
 331                            const void *buf, unsigned int hex_len)
 332{
 333        FILE *fd;
 334
 335        MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
 336        fd = fopen(path, "a+");
 337        if (!fd) {
 338                DRV_LOG(WARNING, "cannot open %s for debug dump", path);
 339                MKSTR(path2, "./%s", fname);
 340                fd = fopen(path2, "a+");
 341                if (!fd) {
 342                        DRV_LOG(ERR, "cannot open %s for debug dump", path2);
 343                        return;
 344                }
 345                DRV_LOG(INFO, "New debug dump in file %s", path2);
 346        } else {
 347                DRV_LOG(INFO, "New debug dump in file %s", path);
 348        }
 349        if (hex_title)
 350                rte_hexdump(fd, hex_title, buf, hex_len);
 351        else
 352                fprintf(fd, "%s", (const char *)buf);
 353        fprintf(fd, "\n\n\n");
 354        fclose(fd);
 355}
 356
 357/**
 358 * Modify a Verbs/DevX queue state.
 359 * This must be called from the primary process.
 360 *
 361 * @param dev
 362 *   Pointer to Ethernet device.
 363 * @param sm
 364 *   State modify request parameters.
 365 *
 366 * @return
 367 *   0 in case of success else non-zero value and rte_errno is set.
 368 */
 369int
 370mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
 371                        const struct mlx5_mp_arg_queue_state_modify *sm)
 372{
 373        int ret;
 374        struct mlx5_priv *priv = dev->data->dev_private;
 375
 376        if (sm->is_wq) {
 377                struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
 378                struct mlx5_rxq_ctrl *rxq_ctrl =
 379                        container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 380
 381                ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
 382                if (ret) {
 383                        DRV_LOG(ERR, "Cannot change Rx WQ state to %u  - %s",
 384                                        sm->state, strerror(errno));
 385                        rte_errno = errno;
 386                        return ret;
 387                }
 388        } else {
 389                struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
 390                struct mlx5_txq_ctrl *txq_ctrl =
 391                        container_of(txq, struct mlx5_txq_ctrl, txq);
 392
 393                ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
 394                                                   MLX5_TXQ_MOD_ERR2RDY,
 395                                                   (uint8_t)priv->dev_port);
 396                if (ret)
 397                        return ret;
 398        }
 399        return 0;
 400}
 401
 402/**
 403 * Modify a Verbs queue state.
 404 *
 405 * @param dev
 406 *   Pointer to Ethernet device.
 407 * @param sm
 408 *   State modify request parameters.
 409 *
 410 * @return
 411 *   0 in case of success else non-zero value.
 412 */
 413int
 414mlx5_queue_state_modify(struct rte_eth_dev *dev,
 415                        struct mlx5_mp_arg_queue_state_modify *sm)
 416{
 417        struct mlx5_priv *priv = dev->data->dev_private;
 418        int ret = 0;
 419
 420        switch (rte_eal_process_type()) {
 421        case RTE_PROC_PRIMARY:
 422                ret = mlx5_queue_state_modify_primary(dev, sm);
 423                break;
 424        case RTE_PROC_SECONDARY:
 425                ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
 426                break;
 427        default:
 428                break;
 429        }
 430        return ret;
 431}
 432