dpdk/lib/ethdev/rte_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2017 Intel Corporation
   3 */
   4
   5#include <ctype.h>
   6#include <errno.h>
   7#include <inttypes.h>
   8#include <stdbool.h>
   9#include <stdint.h>
  10#include <stdlib.h>
  11#include <string.h>
  12#include <sys/queue.h>
  13
  14#include <rte_log.h>
  15#include <rte_interrupts.h>
  16#include <rte_memcpy.h>
  17#include <rte_common.h>
  18#include <rte_mempool.h>
  19#include <rte_malloc.h>
  20#include <rte_mbuf.h>
  21#include <rte_errno.h>
  22#include <rte_spinlock.h>
  23#include <rte_string_fns.h>
  24#include <rte_class.h>
  25#include <rte_ether.h>
  26#include <rte_telemetry.h>
  27
  28#include "rte_ethdev_trace.h"
  29#include "rte_ethdev.h"
  30#include "ethdev_driver.h"
  31#include "ethdev_profile.h"
  32#include "ethdev_private.h"
  33#include "sff_telemetry.h"
  34
  35struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
  36
  37/* public fast-path API */
  38struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
  39
  40/* spinlock for add/remove Rx callbacks */
  41static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
  42
  43/* spinlock for add/remove Tx callbacks */
  44static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
  45
  46/* store statistics names and its offset in stats structure  */
  47struct rte_eth_xstats_name_off {
  48        char name[RTE_ETH_XSTATS_NAME_SIZE];
  49        unsigned offset;
  50};
  51
  52static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
  53        {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
  54        {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
  55        {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
  56        {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
  57        {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
  58        {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
  59        {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
  60        {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
  61                rx_nombuf)},
  62};
  63
  64#define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
  65
  66static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
  67        {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
  68        {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
  69        {"errors", offsetof(struct rte_eth_stats, q_errors)},
  70};
  71
  72#define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
  73
  74static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
  75        {"packets", offsetof(struct rte_eth_stats, q_opackets)},
  76        {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
  77};
  78#define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
  79
  80#define RTE_RX_OFFLOAD_BIT2STR(_name)   \
  81        { RTE_ETH_RX_OFFLOAD_##_name, #_name }
  82
  83static const struct {
  84        uint64_t offload;
  85        const char *name;
  86} eth_dev_rx_offload_names[] = {
  87        RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
  88        RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
  89        RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
  90        RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
  91        RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
  92        RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
  93        RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
  94        RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
  95        RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
  96        RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
  97        RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
  98        RTE_RX_OFFLOAD_BIT2STR(SCATTER),
  99        RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
 100        RTE_RX_OFFLOAD_BIT2STR(SECURITY),
 101        RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
 102        RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 103        RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 104        RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
 105        RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
 106};
 107
 108#undef RTE_RX_OFFLOAD_BIT2STR
 109#undef RTE_ETH_RX_OFFLOAD_BIT2STR
 110
 111#define RTE_TX_OFFLOAD_BIT2STR(_name)   \
 112        { RTE_ETH_TX_OFFLOAD_##_name, #_name }
 113
 114static const struct {
 115        uint64_t offload;
 116        const char *name;
 117} eth_dev_tx_offload_names[] = {
 118        RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
 119        RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
 120        RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
 121        RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
 122        RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
 123        RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
 124        RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
 125        RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
 126        RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
 127        RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
 128        RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
 129        RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
 130        RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
 131        RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
 132        RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
 133        RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
 134        RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
 135        RTE_TX_OFFLOAD_BIT2STR(SECURITY),
 136        RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
 137        RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
 138        RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
 139        RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
 140};
 141
 142#undef RTE_TX_OFFLOAD_BIT2STR
 143
 144static const struct {
 145        uint64_t offload;
 146        const char *name;
 147} rte_eth_dev_capa_names[] = {
 148        {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
 149        {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
 150        {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
 151        {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
 152        {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
 153};
 154
 155enum {
 156        STAT_QMAP_TX = 0,
 157        STAT_QMAP_RX
 158};
 159
 160int
 161rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
 162{
 163        int ret;
 164        struct rte_devargs devargs;
 165        const char *bus_param_key;
 166        char *bus_str = NULL;
 167        char *cls_str = NULL;
 168        int str_size;
 169
 170        if (iter == NULL) {
 171                RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
 172                return -EINVAL;
 173        }
 174
 175        if (devargs_str == NULL) {
 176                RTE_ETHDEV_LOG(ERR,
 177                        "Cannot initialize iterator from NULL device description string\n");
 178                return -EINVAL;
 179        }
 180
 181        memset(iter, 0, sizeof(*iter));
 182        memset(&devargs, 0, sizeof(devargs));
 183
 184        /*
 185         * The devargs string may use various syntaxes:
 186         *   - 0000:08:00.0,representor=[1-3]
 187         *   - pci:0000:06:00.0,representor=[0,5]
 188         *   - class=eth,mac=00:11:22:33:44:55
 189         *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
 190         */
 191
 192        /*
 193         * Handle pure class filter (i.e. without any bus-level argument),
 194         * from future new syntax.
 195         * rte_devargs_parse() is not yet supporting the new syntax,
 196         * that's why this simple case is temporarily parsed here.
 197         */
 198#define iter_anybus_str "class=eth,"
 199        if (strncmp(devargs_str, iter_anybus_str,
 200                        strlen(iter_anybus_str)) == 0) {
 201                iter->cls_str = devargs_str + strlen(iter_anybus_str);
 202                goto end;
 203        }
 204
 205        /* Split bus, device and parameters. */
 206        ret = rte_devargs_parse(&devargs, devargs_str);
 207        if (ret != 0)
 208                goto error;
 209
 210        /*
 211         * Assume parameters of old syntax can match only at ethdev level.
 212         * Extra parameters will be ignored, thanks to "+" prefix.
 213         */
 214        str_size = strlen(devargs.args) + 2;
 215        cls_str = malloc(str_size);
 216        if (cls_str == NULL) {
 217                ret = -ENOMEM;
 218                goto error;
 219        }
 220        ret = snprintf(cls_str, str_size, "+%s", devargs.args);
 221        if (ret != str_size - 1) {
 222                ret = -EINVAL;
 223                goto error;
 224        }
 225        iter->cls_str = cls_str;
 226
 227        iter->bus = devargs.bus;
 228        if (iter->bus->dev_iterate == NULL) {
 229                ret = -ENOTSUP;
 230                goto error;
 231        }
 232
 233        /* Convert bus args to new syntax for use with new API dev_iterate. */
 234        if ((strcmp(iter->bus->name, "vdev") == 0) ||
 235                (strcmp(iter->bus->name, "fslmc") == 0) ||
 236                (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
 237                bus_param_key = "name";
 238        } else if (strcmp(iter->bus->name, "pci") == 0) {
 239                bus_param_key = "addr";
 240        } else {
 241                ret = -ENOTSUP;
 242                goto error;
 243        }
 244        str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
 245        bus_str = malloc(str_size);
 246        if (bus_str == NULL) {
 247                ret = -ENOMEM;
 248                goto error;
 249        }
 250        ret = snprintf(bus_str, str_size, "%s=%s",
 251                        bus_param_key, devargs.name);
 252        if (ret != str_size - 1) {
 253                ret = -EINVAL;
 254                goto error;
 255        }
 256        iter->bus_str = bus_str;
 257
 258end:
 259        iter->cls = rte_class_find_by_name("eth");
 260        rte_devargs_reset(&devargs);
 261        return 0;
 262
 263error:
 264        if (ret == -ENOTSUP)
 265                RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
 266                                iter->bus->name);
 267        rte_devargs_reset(&devargs);
 268        free(bus_str);
 269        free(cls_str);
 270        return ret;
 271}
 272
 273uint16_t
 274rte_eth_iterator_next(struct rte_dev_iterator *iter)
 275{
 276        if (iter == NULL) {
 277                RTE_ETHDEV_LOG(ERR,
 278                        "Cannot get next device from NULL iterator\n");
 279                return RTE_MAX_ETHPORTS;
 280        }
 281
 282        if (iter->cls == NULL) /* invalid ethdev iterator */
 283                return RTE_MAX_ETHPORTS;
 284
 285        do { /* loop to try all matching rte_device */
 286                /* If not pure ethdev filter and */
 287                if (iter->bus != NULL &&
 288                                /* not in middle of rte_eth_dev iteration, */
 289                                iter->class_device == NULL) {
 290                        /* get next rte_device to try. */
 291                        iter->device = iter->bus->dev_iterate(
 292                                        iter->device, iter->bus_str, iter);
 293                        if (iter->device == NULL)
 294                                break; /* no more rte_device candidate */
 295                }
 296                /* A device is matching bus part, need to check ethdev part. */
 297                iter->class_device = iter->cls->dev_iterate(
 298                                iter->class_device, iter->cls_str, iter);
 299                if (iter->class_device != NULL)
 300                        return eth_dev_to_id(iter->class_device); /* match */
 301        } while (iter->bus != NULL); /* need to try next rte_device */
 302
 303        /* No more ethdev port to iterate. */
 304        rte_eth_iterator_cleanup(iter);
 305        return RTE_MAX_ETHPORTS;
 306}
 307
 308void
 309rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
 310{
 311        if (iter == NULL) {
 312                RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
 313                return;
 314        }
 315
 316        if (iter->bus_str == NULL)
 317                return; /* nothing to free in pure class filter */
 318        free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
 319        free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
 320        memset(iter, 0, sizeof(*iter));
 321}
 322
 323uint16_t
 324rte_eth_find_next(uint16_t port_id)
 325{
 326        while (port_id < RTE_MAX_ETHPORTS &&
 327                        rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
 328                port_id++;
 329
 330        if (port_id >= RTE_MAX_ETHPORTS)
 331                return RTE_MAX_ETHPORTS;
 332
 333        return port_id;
 334}
 335
 336/*
 337 * Macro to iterate over all valid ports for internal usage.
 338 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
 339 */
 340#define RTE_ETH_FOREACH_VALID_DEV(port_id) \
 341        for (port_id = rte_eth_find_next(0); \
 342             port_id < RTE_MAX_ETHPORTS; \
 343             port_id = rte_eth_find_next(port_id + 1))
 344
 345uint16_t
 346rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
 347{
 348        port_id = rte_eth_find_next(port_id);
 349        while (port_id < RTE_MAX_ETHPORTS &&
 350                        rte_eth_devices[port_id].device != parent)
 351                port_id = rte_eth_find_next(port_id + 1);
 352
 353        return port_id;
 354}
 355
 356uint16_t
 357rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
 358{
 359        RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
 360        return rte_eth_find_next_of(port_id,
 361                        rte_eth_devices[ref_port_id].device);
 362}
 363
 364static bool
 365eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
 366{
 367        return ethdev->data->name[0] != '\0';
 368}
 369
 370int
 371rte_eth_dev_is_valid_port(uint16_t port_id)
 372{
 373        if (port_id >= RTE_MAX_ETHPORTS ||
 374            (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
 375                return 0;
 376        else
 377                return 1;
 378}
 379
 380static int
 381eth_is_valid_owner_id(uint64_t owner_id)
 382{
 383        if (owner_id == RTE_ETH_DEV_NO_OWNER ||
 384            eth_dev_shared_data->next_owner_id <= owner_id)
 385                return 0;
 386        return 1;
 387}
 388
 389uint64_t
 390rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
 391{
 392        port_id = rte_eth_find_next(port_id);
 393        while (port_id < RTE_MAX_ETHPORTS &&
 394                        rte_eth_devices[port_id].data->owner.id != owner_id)
 395                port_id = rte_eth_find_next(port_id + 1);
 396
 397        return port_id;
 398}
 399
 400int
 401rte_eth_dev_owner_new(uint64_t *owner_id)
 402{
 403        if (owner_id == NULL) {
 404                RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
 405                return -EINVAL;
 406        }
 407
 408        eth_dev_shared_data_prepare();
 409
 410        rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 411
 412        *owner_id = eth_dev_shared_data->next_owner_id++;
 413
 414        rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
 415        return 0;
 416}
 417
 418static int
 419eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 420                       const struct rte_eth_dev_owner *new_owner)
 421{
 422        struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
 423        struct rte_eth_dev_owner *port_owner;
 424
 425        if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
 426                RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
 427                        port_id);
 428                return -ENODEV;
 429        }
 430
 431        if (new_owner == NULL) {
 432                RTE_ETHDEV_LOG(ERR,
 433                        "Cannot set ethdev port %u owner from NULL owner\n",
 434                        port_id);
 435                return -EINVAL;
 436        }
 437
 438        if (!eth_is_valid_owner_id(new_owner->id) &&
 439            !eth_is_valid_owner_id(old_owner_id)) {
 440                RTE_ETHDEV_LOG(ERR,
 441                        "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
 442                       old_owner_id, new_owner->id);
 443                return -EINVAL;
 444        }
 445
 446        port_owner = &rte_eth_devices[port_id].data->owner;
 447        if (port_owner->id != old_owner_id) {
 448                RTE_ETHDEV_LOG(ERR,
 449                        "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
 450                        port_id, port_owner->name, port_owner->id);
 451                return -EPERM;
 452        }
 453
 454        /* can not truncate (same structure) */
 455        strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
 456
 457        port_owner->id = new_owner->id;
 458
 459        RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
 460                port_id, new_owner->name, new_owner->id);
 461
 462        return 0;
 463}
 464
 465int
 466rte_eth_dev_owner_set(const uint16_t port_id,
 467                      const struct rte_eth_dev_owner *owner)
 468{
 469        int ret;
 470
 471        eth_dev_shared_data_prepare();
 472
 473        rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 474
 475        ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
 476
 477        rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
 478        return ret;
 479}
 480
 481int
 482rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
 483{
 484        const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
 485                        {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
 486        int ret;
 487
 488        eth_dev_shared_data_prepare();
 489
 490        rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 491
 492        ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
 493
 494        rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
 495        return ret;
 496}
 497
 498int
 499rte_eth_dev_owner_delete(const uint64_t owner_id)
 500{
 501        uint16_t port_id;
 502        int ret = 0;
 503
 504        eth_dev_shared_data_prepare();
 505
 506        rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 507
 508        if (eth_is_valid_owner_id(owner_id)) {
 509                for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
 510                        struct rte_eth_dev_data *data =
 511                                rte_eth_devices[port_id].data;
 512                        if (data != NULL && data->owner.id == owner_id)
 513                                memset(&data->owner, 0,
 514                                       sizeof(struct rte_eth_dev_owner));
 515                }
 516                RTE_ETHDEV_LOG(NOTICE,
 517                        "All port owners owned by %016"PRIx64" identifier have removed\n",
 518                        owner_id);
 519        } else {
 520                RTE_ETHDEV_LOG(ERR,
 521                               "Invalid owner ID=%016"PRIx64"\n",
 522                               owner_id);
 523                ret = -EINVAL;
 524        }
 525
 526        rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
 527
 528        return ret;
 529}
 530
 531int
 532rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
 533{
 534        struct rte_eth_dev *ethdev;
 535
 536        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 537        ethdev = &rte_eth_devices[port_id];
 538
 539        if (!eth_dev_is_allocated(ethdev)) {
 540                RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
 541                        port_id);
 542                return -ENODEV;
 543        }
 544
 545        if (owner == NULL) {
 546                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
 547                        port_id);
 548                return -EINVAL;
 549        }
 550
 551        eth_dev_shared_data_prepare();
 552
 553        rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 554        rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
 555        rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
 556
 557        return 0;
 558}
 559
 560int
 561rte_eth_dev_socket_id(uint16_t port_id)
 562{
 563        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
 564        return rte_eth_devices[port_id].data->numa_node;
 565}
 566
 567void *
 568rte_eth_dev_get_sec_ctx(uint16_t port_id)
 569{
 570        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
 571        return rte_eth_devices[port_id].security_ctx;
 572}
 573
 574uint16_t
 575rte_eth_dev_count_avail(void)
 576{
 577        uint16_t p;
 578        uint16_t count;
 579
 580        count = 0;
 581
 582        RTE_ETH_FOREACH_DEV(p)
 583                count++;
 584
 585        return count;
 586}
 587
 588uint16_t
 589rte_eth_dev_count_total(void)
 590{
 591        uint16_t port, count = 0;
 592
 593        RTE_ETH_FOREACH_VALID_DEV(port)
 594                count++;
 595
 596        return count;
 597}
 598
 599int
 600rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
 601{
 602        char *tmp;
 603
 604        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 605
 606        if (name == NULL) {
 607                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
 608                        port_id);
 609                return -EINVAL;
 610        }
 611
 612        /* shouldn't check 'rte_eth_devices[i].data',
 613         * because it might be overwritten by VDEV PMD */
 614        tmp = eth_dev_shared_data->data[port_id].name;
 615        strcpy(name, tmp);
 616        return 0;
 617}
 618
 619int
 620rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
 621{
 622        uint16_t pid;
 623
 624        if (name == NULL) {
 625                RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
 626                return -EINVAL;
 627        }
 628
 629        if (port_id == NULL) {
 630                RTE_ETHDEV_LOG(ERR,
 631                        "Cannot get port ID to NULL for %s\n", name);
 632                return -EINVAL;
 633        }
 634
 635        RTE_ETH_FOREACH_VALID_DEV(pid)
 636                if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
 637                        *port_id = pid;
 638                        return 0;
 639                }
 640
 641        return -ENODEV;
 642}
 643
 644static int
 645eth_err(uint16_t port_id, int ret)
 646{
 647        if (ret == 0)
 648                return 0;
 649        if (rte_eth_dev_is_removed(port_id))
 650                return -EIO;
 651        return ret;
 652}
 653
 654static int
 655eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
 656{
 657        uint16_t port_id;
 658
 659        if (rx_queue_id >= dev->data->nb_rx_queues) {
 660                port_id = dev->data->port_id;
 661                RTE_ETHDEV_LOG(ERR,
 662                               "Invalid Rx queue_id=%u of device with port_id=%u\n",
 663                               rx_queue_id, port_id);
 664                return -EINVAL;
 665        }
 666
 667        if (dev->data->rx_queues[rx_queue_id] == NULL) {
 668                port_id = dev->data->port_id;
 669                RTE_ETHDEV_LOG(ERR,
 670                               "Queue %u of device with port_id=%u has not been setup\n",
 671                               rx_queue_id, port_id);
 672                return -EINVAL;
 673        }
 674
 675        return 0;
 676}
 677
 678static int
 679eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
 680{
 681        uint16_t port_id;
 682
 683        if (tx_queue_id >= dev->data->nb_tx_queues) {
 684                port_id = dev->data->port_id;
 685                RTE_ETHDEV_LOG(ERR,
 686                               "Invalid Tx queue_id=%u of device with port_id=%u\n",
 687                               tx_queue_id, port_id);
 688                return -EINVAL;
 689        }
 690
 691        if (dev->data->tx_queues[tx_queue_id] == NULL) {
 692                port_id = dev->data->port_id;
 693                RTE_ETHDEV_LOG(ERR,
 694                               "Queue %u of device with port_id=%u has not been setup\n",
 695                               tx_queue_id, port_id);
 696                return -EINVAL;
 697        }
 698
 699        return 0;
 700}
 701
 702int
 703rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
 704{
 705        struct rte_eth_dev *dev;
 706        int ret;
 707
 708        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 709        dev = &rte_eth_devices[port_id];
 710
 711        if (!dev->data->dev_started) {
 712                RTE_ETHDEV_LOG(ERR,
 713                        "Port %u must be started before start any queue\n",
 714                        port_id);
 715                return -EINVAL;
 716        }
 717
 718        ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
 719        if (ret != 0)
 720                return ret;
 721
 722        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
 723
 724        if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
 725                RTE_ETHDEV_LOG(INFO,
 726                        "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
 727                        rx_queue_id, port_id);
 728                return -EINVAL;
 729        }
 730
 731        if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
 732                RTE_ETHDEV_LOG(INFO,
 733                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
 734                        rx_queue_id, port_id);
 735                return 0;
 736        }
 737
 738        return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
 739}
 740
 741int
 742rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
 743{
 744        struct rte_eth_dev *dev;
 745        int ret;
 746
 747        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 748        dev = &rte_eth_devices[port_id];
 749
 750        ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
 751        if (ret != 0)
 752                return ret;
 753
 754        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
 755
 756        if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
 757                RTE_ETHDEV_LOG(INFO,
 758                        "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
 759                        rx_queue_id, port_id);
 760                return -EINVAL;
 761        }
 762
 763        if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
 764                RTE_ETHDEV_LOG(INFO,
 765                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
 766                        rx_queue_id, port_id);
 767                return 0;
 768        }
 769
 770        return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
 771}
 772
 773int
 774rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
 775{
 776        struct rte_eth_dev *dev;
 777        int ret;
 778
 779        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 780        dev = &rte_eth_devices[port_id];
 781
 782        if (!dev->data->dev_started) {
 783                RTE_ETHDEV_LOG(ERR,
 784                        "Port %u must be started before start any queue\n",
 785                        port_id);
 786                return -EINVAL;
 787        }
 788
 789        ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
 790        if (ret != 0)
 791                return ret;
 792
 793        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
 794
 795        if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
 796                RTE_ETHDEV_LOG(INFO,
 797                        "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
 798                        tx_queue_id, port_id);
 799                return -EINVAL;
 800        }
 801
 802        if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
 803                RTE_ETHDEV_LOG(INFO,
 804                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
 805                        tx_queue_id, port_id);
 806                return 0;
 807        }
 808
 809        return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
 810}
 811
 812int
 813rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
 814{
 815        struct rte_eth_dev *dev;
 816        int ret;
 817
 818        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 819        dev = &rte_eth_devices[port_id];
 820
 821        ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
 822        if (ret != 0)
 823                return ret;
 824
 825        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
 826
 827        if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
 828                RTE_ETHDEV_LOG(INFO,
 829                        "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
 830                        tx_queue_id, port_id);
 831                return -EINVAL;
 832        }
 833
 834        if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
 835                RTE_ETHDEV_LOG(INFO,
 836                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
 837                        tx_queue_id, port_id);
 838                return 0;
 839        }
 840
 841        return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
 842}
 843
 844uint32_t
 845rte_eth_speed_bitflag(uint32_t speed, int duplex)
 846{
 847        switch (speed) {
 848        case RTE_ETH_SPEED_NUM_10M:
 849                return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
 850        case RTE_ETH_SPEED_NUM_100M:
 851                return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
 852        case RTE_ETH_SPEED_NUM_1G:
 853                return RTE_ETH_LINK_SPEED_1G;
 854        case RTE_ETH_SPEED_NUM_2_5G:
 855                return RTE_ETH_LINK_SPEED_2_5G;
 856        case RTE_ETH_SPEED_NUM_5G:
 857                return RTE_ETH_LINK_SPEED_5G;
 858        case RTE_ETH_SPEED_NUM_10G:
 859                return RTE_ETH_LINK_SPEED_10G;
 860        case RTE_ETH_SPEED_NUM_20G:
 861                return RTE_ETH_LINK_SPEED_20G;
 862        case RTE_ETH_SPEED_NUM_25G:
 863                return RTE_ETH_LINK_SPEED_25G;
 864        case RTE_ETH_SPEED_NUM_40G:
 865                return RTE_ETH_LINK_SPEED_40G;
 866        case RTE_ETH_SPEED_NUM_50G:
 867                return RTE_ETH_LINK_SPEED_50G;
 868        case RTE_ETH_SPEED_NUM_56G:
 869                return RTE_ETH_LINK_SPEED_56G;
 870        case RTE_ETH_SPEED_NUM_100G:
 871                return RTE_ETH_LINK_SPEED_100G;
 872        case RTE_ETH_SPEED_NUM_200G:
 873                return RTE_ETH_LINK_SPEED_200G;
 874        default:
 875                return 0;
 876        }
 877}
 878
 879const char *
 880rte_eth_dev_rx_offload_name(uint64_t offload)
 881{
 882        const char *name = "UNKNOWN";
 883        unsigned int i;
 884
 885        for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
 886                if (offload == eth_dev_rx_offload_names[i].offload) {
 887                        name = eth_dev_rx_offload_names[i].name;
 888                        break;
 889                }
 890        }
 891
 892        return name;
 893}
 894
 895const char *
 896rte_eth_dev_tx_offload_name(uint64_t offload)
 897{
 898        const char *name = "UNKNOWN";
 899        unsigned int i;
 900
 901        for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
 902                if (offload == eth_dev_tx_offload_names[i].offload) {
 903                        name = eth_dev_tx_offload_names[i].name;
 904                        break;
 905                }
 906        }
 907
 908        return name;
 909}
 910
 911const char *
 912rte_eth_dev_capability_name(uint64_t capability)
 913{
 914        const char *name = "UNKNOWN";
 915        unsigned int i;
 916
 917        for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
 918                if (capability == rte_eth_dev_capa_names[i].offload) {
 919                        name = rte_eth_dev_capa_names[i].name;
 920                        break;
 921                }
 922        }
 923
 924        return name;
 925}
 926
 927static inline int
 928eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
 929                   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
 930{
 931        int ret = 0;
 932
 933        if (dev_info_size == 0) {
 934                if (config_size != max_rx_pkt_len) {
 935                        RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
 936                                       " %u != %u is not allowed\n",
 937                                       port_id, config_size, max_rx_pkt_len);
 938                        ret = -EINVAL;
 939                }
 940        } else if (config_size > dev_info_size) {
 941                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
 942                               "> max allowed value %u\n", port_id, config_size,
 943                               dev_info_size);
 944                ret = -EINVAL;
 945        } else if (config_size < RTE_ETHER_MIN_LEN) {
 946                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
 947                               "< min allowed value %u\n", port_id, config_size,
 948                               (unsigned int)RTE_ETHER_MIN_LEN);
 949                ret = -EINVAL;
 950        }
 951        return ret;
 952}
 953
 954/*
 955 * Validate offloads that are requested through rte_eth_dev_configure against
 956 * the offloads successfully set by the Ethernet device.
 957 *
 958 * @param port_id
 959 *   The port identifier of the Ethernet device.
 960 * @param req_offloads
 961 *   The offloads that have been requested through `rte_eth_dev_configure`.
 962 * @param set_offloads
 963 *   The offloads successfully set by the Ethernet device.
 964 * @param offload_type
 965 *   The offload type i.e. Rx/Tx string.
 966 * @param offload_name
 967 *   The function that prints the offload name.
 968 * @return
 969 *   - (0) if validation successful.
 970 *   - (-EINVAL) if requested offload has been silently disabled.
 971 *
 972 */
 973static int
 974eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
 975                  uint64_t set_offloads, const char *offload_type,
 976                  const char *(*offload_name)(uint64_t))
 977{
 978        uint64_t offloads_diff = req_offloads ^ set_offloads;
 979        uint64_t offload;
 980        int ret = 0;
 981
 982        while (offloads_diff != 0) {
 983                /* Check if any offload is requested but not enabled. */
 984                offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
 985                if (offload & req_offloads) {
 986                        RTE_ETHDEV_LOG(ERR,
 987                                "Port %u failed to enable %s offload %s\n",
 988                                port_id, offload_type, offload_name(offload));
 989                        ret = -EINVAL;
 990                }
 991
 992                /* Check if offload couldn't be disabled. */
 993                if (offload & set_offloads) {
 994                        RTE_ETHDEV_LOG(DEBUG,
 995                                "Port %u %s offload %s is not requested but enabled\n",
 996                                port_id, offload_type, offload_name(offload));
 997                }
 998
 999                offloads_diff &= ~offload;
1000        }
1001
1002        return ret;
1003}
1004
1005static uint32_t
1006eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1007{
1008        uint32_t overhead_len;
1009
1010        if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1011                overhead_len = max_rx_pktlen - max_mtu;
1012        else
1013                overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1014
1015        return overhead_len;
1016}
1017
1018/* rte_eth_dev_info_get() should be called prior to this function */
1019static int
1020eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1021                uint16_t mtu)
1022{
1023        uint32_t overhead_len;
1024        uint32_t frame_size;
1025
1026        if (mtu < dev_info->min_mtu) {
1027                RTE_ETHDEV_LOG(ERR,
1028                        "MTU (%u) < device min MTU (%u) for port_id %u\n",
1029                        mtu, dev_info->min_mtu, port_id);
1030                return -EINVAL;
1031        }
1032        if (mtu > dev_info->max_mtu) {
1033                RTE_ETHDEV_LOG(ERR,
1034                        "MTU (%u) > device max MTU (%u) for port_id %u\n",
1035                        mtu, dev_info->max_mtu, port_id);
1036                return -EINVAL;
1037        }
1038
1039        overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1040                        dev_info->max_mtu);
1041        frame_size = mtu + overhead_len;
1042        if (frame_size < RTE_ETHER_MIN_LEN) {
1043                RTE_ETHDEV_LOG(ERR,
1044                        "Frame size (%u) < min frame size (%u) for port_id %u\n",
1045                        frame_size, RTE_ETHER_MIN_LEN, port_id);
1046                return -EINVAL;
1047        }
1048
1049        if (frame_size > dev_info->max_rx_pktlen) {
1050                RTE_ETHDEV_LOG(ERR,
1051                        "Frame size (%u) > device max frame size (%u) for port_id %u\n",
1052                        frame_size, dev_info->max_rx_pktlen, port_id);
1053                return -EINVAL;
1054        }
1055
1056        return 0;
1057}
1058
1059int
1060rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1061                      const struct rte_eth_conf *dev_conf)
1062{
1063        struct rte_eth_dev *dev;
1064        struct rte_eth_dev_info dev_info;
1065        struct rte_eth_conf orig_conf;
1066        int diag;
1067        int ret;
1068        uint16_t old_mtu;
1069
1070        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1071        dev = &rte_eth_devices[port_id];
1072
1073        if (dev_conf == NULL) {
1074                RTE_ETHDEV_LOG(ERR,
1075                        "Cannot configure ethdev port %u from NULL config\n",
1076                        port_id);
1077                return -EINVAL;
1078        }
1079
1080        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1081
1082        if (dev->data->dev_started) {
1083                RTE_ETHDEV_LOG(ERR,
1084                        "Port %u must be stopped to allow configuration\n",
1085                        port_id);
1086                return -EBUSY;
1087        }
1088
1089        /*
1090         * Ensure that "dev_configured" is always 0 each time prepare to do
1091         * dev_configure() to avoid any non-anticipated behaviour.
1092         * And set to 1 when dev_configure() is executed successfully.
1093         */
1094        dev->data->dev_configured = 0;
1095
1096         /* Store original config, as rollback required on failure */
1097        memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1098
1099        /*
1100         * Copy the dev_conf parameter into the dev structure.
1101         * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1102         */
1103        if (dev_conf != &dev->data->dev_conf)
1104                memcpy(&dev->data->dev_conf, dev_conf,
1105                       sizeof(dev->data->dev_conf));
1106
1107        /* Backup mtu for rollback */
1108        old_mtu = dev->data->mtu;
1109
1110        ret = rte_eth_dev_info_get(port_id, &dev_info);
1111        if (ret != 0)
1112                goto rollback;
1113
1114        /* If number of queues specified by application for both Rx and Tx is
1115         * zero, use driver preferred values. This cannot be done individually
1116         * as it is valid for either Tx or Rx (but not both) to be zero.
1117         * If driver does not provide any preferred valued, fall back on
1118         * EAL defaults.
1119         */
1120        if (nb_rx_q == 0 && nb_tx_q == 0) {
1121                nb_rx_q = dev_info.default_rxportconf.nb_queues;
1122                if (nb_rx_q == 0)
1123                        nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1124                nb_tx_q = dev_info.default_txportconf.nb_queues;
1125                if (nb_tx_q == 0)
1126                        nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1127        }
1128
1129        if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1130                RTE_ETHDEV_LOG(ERR,
1131                        "Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1132                        nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1133                ret = -EINVAL;
1134                goto rollback;
1135        }
1136
1137        if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1138                RTE_ETHDEV_LOG(ERR,
1139                        "Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1140                        nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1141                ret = -EINVAL;
1142                goto rollback;
1143        }
1144
1145        /*
1146         * Check that the numbers of Rx and Tx queues are not greater
1147         * than the maximum number of Rx and Tx queues supported by the
1148         * configured device.
1149         */
1150        if (nb_rx_q > dev_info.max_rx_queues) {
1151                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1152                        port_id, nb_rx_q, dev_info.max_rx_queues);
1153                ret = -EINVAL;
1154                goto rollback;
1155        }
1156
1157        if (nb_tx_q > dev_info.max_tx_queues) {
1158                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1159                        port_id, nb_tx_q, dev_info.max_tx_queues);
1160                ret = -EINVAL;
1161                goto rollback;
1162        }
1163
1164        /* Check that the device supports requested interrupts */
1165        if ((dev_conf->intr_conf.lsc == 1) &&
1166                        (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1167                RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1168                        dev->device->driver->name);
1169                ret = -EINVAL;
1170                goto rollback;
1171        }
1172        if ((dev_conf->intr_conf.rmv == 1) &&
1173                        (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1174                RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1175                        dev->device->driver->name);
1176                ret = -EINVAL;
1177                goto rollback;
1178        }
1179
1180        if (dev_conf->rxmode.mtu == 0)
1181                dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1182
1183        ret = eth_dev_validate_mtu(port_id, &dev_info,
1184                        dev->data->dev_conf.rxmode.mtu);
1185        if (ret != 0)
1186                goto rollback;
1187
1188        dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1189
1190        /*
1191         * If LRO is enabled, check that the maximum aggregated packet
1192         * size is supported by the configured device.
1193         */
1194        if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1195                uint32_t max_rx_pktlen;
1196                uint32_t overhead_len;
1197
1198                overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1199                                dev_info.max_mtu);
1200                max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1201                if (dev_conf->rxmode.max_lro_pkt_size == 0)
1202                        dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1203                ret = eth_dev_check_lro_pkt_size(port_id,
1204                                dev->data->dev_conf.rxmode.max_lro_pkt_size,
1205                                max_rx_pktlen,
1206                                dev_info.max_lro_pkt_size);
1207                if (ret != 0)
1208                        goto rollback;
1209        }
1210
1211        /* Any requested offloading must be within its device capabilities */
1212        if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1213             dev_conf->rxmode.offloads) {
1214                RTE_ETHDEV_LOG(ERR,
1215                        "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1216                        "capabilities 0x%"PRIx64" in %s()\n",
1217                        port_id, dev_conf->rxmode.offloads,
1218                        dev_info.rx_offload_capa,
1219                        __func__);
1220                ret = -EINVAL;
1221                goto rollback;
1222        }
1223        if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1224             dev_conf->txmode.offloads) {
1225                RTE_ETHDEV_LOG(ERR,
1226                        "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1227                        "capabilities 0x%"PRIx64" in %s()\n",
1228                        port_id, dev_conf->txmode.offloads,
1229                        dev_info.tx_offload_capa,
1230                        __func__);
1231                ret = -EINVAL;
1232                goto rollback;
1233        }
1234
1235        dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1236                rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1237
1238        /* Check that device supports requested rss hash functions. */
1239        if ((dev_info.flow_type_rss_offloads |
1240             dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1241            dev_info.flow_type_rss_offloads) {
1242                RTE_ETHDEV_LOG(ERR,
1243                        "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1244                        port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1245                        dev_info.flow_type_rss_offloads);
1246                ret = -EINVAL;
1247                goto rollback;
1248        }
1249
1250        /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1251        if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1252            (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1253                RTE_ETHDEV_LOG(ERR,
1254                        "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1255                        port_id,
1256                        rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1257                ret = -EINVAL;
1258                goto rollback;
1259        }
1260
1261        /*
1262         * Setup new number of Rx/Tx queues and reconfigure device.
1263         */
1264        diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1265        if (diag != 0) {
1266                RTE_ETHDEV_LOG(ERR,
1267                        "Port%u eth_dev_rx_queue_config = %d\n",
1268                        port_id, diag);
1269                ret = diag;
1270                goto rollback;
1271        }
1272
1273        diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1274        if (diag != 0) {
1275                RTE_ETHDEV_LOG(ERR,
1276                        "Port%u eth_dev_tx_queue_config = %d\n",
1277                        port_id, diag);
1278                eth_dev_rx_queue_config(dev, 0);
1279                ret = diag;
1280                goto rollback;
1281        }
1282
1283        diag = (*dev->dev_ops->dev_configure)(dev);
1284        if (diag != 0) {
1285                RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1286                        port_id, diag);
1287                ret = eth_err(port_id, diag);
1288                goto reset_queues;
1289        }
1290
1291        /* Initialize Rx profiling if enabled at compilation time. */
1292        diag = __rte_eth_dev_profile_init(port_id, dev);
1293        if (diag != 0) {
1294                RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1295                        port_id, diag);
1296                ret = eth_err(port_id, diag);
1297                goto reset_queues;
1298        }
1299
1300        /* Validate Rx offloads. */
1301        diag = eth_dev_validate_offloads(port_id,
1302                        dev_conf->rxmode.offloads,
1303                        dev->data->dev_conf.rxmode.offloads, "Rx",
1304                        rte_eth_dev_rx_offload_name);
1305        if (diag != 0) {
1306                ret = diag;
1307                goto reset_queues;
1308        }
1309
1310        /* Validate Tx offloads. */
1311        diag = eth_dev_validate_offloads(port_id,
1312                        dev_conf->txmode.offloads,
1313                        dev->data->dev_conf.txmode.offloads, "Tx",
1314                        rte_eth_dev_tx_offload_name);
1315        if (diag != 0) {
1316                ret = diag;
1317                goto reset_queues;
1318        }
1319
1320        dev->data->dev_configured = 1;
1321        rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1322        return 0;
1323reset_queues:
1324        eth_dev_rx_queue_config(dev, 0);
1325        eth_dev_tx_queue_config(dev, 0);
1326rollback:
1327        memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1328        if (old_mtu != dev->data->mtu)
1329                dev->data->mtu = old_mtu;
1330
1331        rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1332        return ret;
1333}
1334
1335static void
1336eth_dev_mac_restore(struct rte_eth_dev *dev,
1337                        struct rte_eth_dev_info *dev_info)
1338{
1339        struct rte_ether_addr *addr;
1340        uint16_t i;
1341        uint32_t pool = 0;
1342        uint64_t pool_mask;
1343
1344        /* replay MAC address configuration including default MAC */
1345        addr = &dev->data->mac_addrs[0];
1346        if (*dev->dev_ops->mac_addr_set != NULL)
1347                (*dev->dev_ops->mac_addr_set)(dev, addr);
1348        else if (*dev->dev_ops->mac_addr_add != NULL)
1349                (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1350
1351        if (*dev->dev_ops->mac_addr_add != NULL) {
1352                for (i = 1; i < dev_info->max_mac_addrs; i++) {
1353                        addr = &dev->data->mac_addrs[i];
1354
1355                        /* skip zero address */
1356                        if (rte_is_zero_ether_addr(addr))
1357                                continue;
1358
1359                        pool = 0;
1360                        pool_mask = dev->data->mac_pool_sel[i];
1361
1362                        do {
1363                                if (pool_mask & UINT64_C(1))
1364                                        (*dev->dev_ops->mac_addr_add)(dev,
1365                                                addr, i, pool);
1366                                pool_mask >>= 1;
1367                                pool++;
1368                        } while (pool_mask);
1369                }
1370        }
1371}
1372
1373static int
1374eth_dev_config_restore(struct rte_eth_dev *dev,
1375                struct rte_eth_dev_info *dev_info, uint16_t port_id)
1376{
1377        int ret;
1378
1379        if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1380                eth_dev_mac_restore(dev, dev_info);
1381
1382        /* replay promiscuous configuration */
1383        /*
1384         * use callbacks directly since we don't need port_id check and
1385         * would like to bypass the same value set
1386         */
1387        if (rte_eth_promiscuous_get(port_id) == 1 &&
1388            *dev->dev_ops->promiscuous_enable != NULL) {
1389                ret = eth_err(port_id,
1390                              (*dev->dev_ops->promiscuous_enable)(dev));
1391                if (ret != 0 && ret != -ENOTSUP) {
1392                        RTE_ETHDEV_LOG(ERR,
1393                                "Failed to enable promiscuous mode for device (port %u): %s\n",
1394                                port_id, rte_strerror(-ret));
1395                        return ret;
1396                }
1397        } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1398                   *dev->dev_ops->promiscuous_disable != NULL) {
1399                ret = eth_err(port_id,
1400                              (*dev->dev_ops->promiscuous_disable)(dev));
1401                if (ret != 0 && ret != -ENOTSUP) {
1402                        RTE_ETHDEV_LOG(ERR,
1403                                "Failed to disable promiscuous mode for device (port %u): %s\n",
1404                                port_id, rte_strerror(-ret));
1405                        return ret;
1406                }
1407        }
1408
1409        /* replay all multicast configuration */
1410        /*
1411         * use callbacks directly since we don't need port_id check and
1412         * would like to bypass the same value set
1413         */
1414        if (rte_eth_allmulticast_get(port_id) == 1 &&
1415            *dev->dev_ops->allmulticast_enable != NULL) {
1416                ret = eth_err(port_id,
1417                              (*dev->dev_ops->allmulticast_enable)(dev));
1418                if (ret != 0 && ret != -ENOTSUP) {
1419                        RTE_ETHDEV_LOG(ERR,
1420                                "Failed to enable allmulticast mode for device (port %u): %s\n",
1421                                port_id, rte_strerror(-ret));
1422                        return ret;
1423                }
1424        } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1425                   *dev->dev_ops->allmulticast_disable != NULL) {
1426                ret = eth_err(port_id,
1427                              (*dev->dev_ops->allmulticast_disable)(dev));
1428                if (ret != 0 && ret != -ENOTSUP) {
1429                        RTE_ETHDEV_LOG(ERR,
1430                                "Failed to disable allmulticast mode for device (port %u): %s\n",
1431                                port_id, rte_strerror(-ret));
1432                        return ret;
1433                }
1434        }
1435
1436        return 0;
1437}
1438
1439int
1440rte_eth_dev_start(uint16_t port_id)
1441{
1442        struct rte_eth_dev *dev;
1443        struct rte_eth_dev_info dev_info;
1444        int diag;
1445        int ret, ret_stop;
1446
1447        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1448        dev = &rte_eth_devices[port_id];
1449
1450        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1451
1452        if (dev->data->dev_configured == 0) {
1453                RTE_ETHDEV_LOG(INFO,
1454                        "Device with port_id=%"PRIu16" is not configured.\n",
1455                        port_id);
1456                return -EINVAL;
1457        }
1458
1459        if (dev->data->dev_started != 0) {
1460                RTE_ETHDEV_LOG(INFO,
1461                        "Device with port_id=%"PRIu16" already started\n",
1462                        port_id);
1463                return 0;
1464        }
1465
1466        ret = rte_eth_dev_info_get(port_id, &dev_info);
1467        if (ret != 0)
1468                return ret;
1469
1470        /* Lets restore MAC now if device does not support live change */
1471        if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1472                eth_dev_mac_restore(dev, &dev_info);
1473
1474        diag = (*dev->dev_ops->dev_start)(dev);
1475        if (diag == 0)
1476                dev->data->dev_started = 1;
1477        else
1478                return eth_err(port_id, diag);
1479
1480        ret = eth_dev_config_restore(dev, &dev_info, port_id);
1481        if (ret != 0) {
1482                RTE_ETHDEV_LOG(ERR,
1483                        "Error during restoring configuration for device (port %u): %s\n",
1484                        port_id, rte_strerror(-ret));
1485                ret_stop = rte_eth_dev_stop(port_id);
1486                if (ret_stop != 0) {
1487                        RTE_ETHDEV_LOG(ERR,
1488                                "Failed to stop device (port %u): %s\n",
1489                                port_id, rte_strerror(-ret_stop));
1490                }
1491
1492                return ret;
1493        }
1494
1495        if (dev->data->dev_conf.intr_conf.lsc == 0) {
1496                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1497                (*dev->dev_ops->link_update)(dev, 0);
1498        }
1499
1500        /* expose selection of PMD fast-path functions */
1501        eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1502
1503        rte_ethdev_trace_start(port_id);
1504        return 0;
1505}
1506
1507int
1508rte_eth_dev_stop(uint16_t port_id)
1509{
1510        struct rte_eth_dev *dev;
1511        int ret;
1512
1513        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1514        dev = &rte_eth_devices[port_id];
1515
1516        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1517
1518        if (dev->data->dev_started == 0) {
1519                RTE_ETHDEV_LOG(INFO,
1520                        "Device with port_id=%"PRIu16" already stopped\n",
1521                        port_id);
1522                return 0;
1523        }
1524
1525        /* point fast-path functions to dummy ones */
1526        eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1527
1528        ret = (*dev->dev_ops->dev_stop)(dev);
1529        if (ret == 0)
1530                dev->data->dev_started = 0;
1531        rte_ethdev_trace_stop(port_id, ret);
1532
1533        return ret;
1534}
1535
1536int
1537rte_eth_dev_set_link_up(uint16_t port_id)
1538{
1539        struct rte_eth_dev *dev;
1540
1541        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1542        dev = &rte_eth_devices[port_id];
1543
1544        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1545        return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1546}
1547
1548int
1549rte_eth_dev_set_link_down(uint16_t port_id)
1550{
1551        struct rte_eth_dev *dev;
1552
1553        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1554        dev = &rte_eth_devices[port_id];
1555
1556        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1557        return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1558}
1559
1560int
1561rte_eth_dev_close(uint16_t port_id)
1562{
1563        struct rte_eth_dev *dev;
1564        int firsterr, binerr;
1565        int *lasterr = &firsterr;
1566
1567        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1568        dev = &rte_eth_devices[port_id];
1569
1570        /*
1571         * Secondary process needs to close device to release process private
1572         * resources. But secondary process should not be obliged to wait
1573         * for device stop before closing ethdev.
1574         */
1575        if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1576                        dev->data->dev_started) {
1577                RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1578                               port_id);
1579                return -EINVAL;
1580        }
1581
1582        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1583        *lasterr = (*dev->dev_ops->dev_close)(dev);
1584        if (*lasterr != 0)
1585                lasterr = &binerr;
1586
1587        rte_ethdev_trace_close(port_id);
1588        *lasterr = rte_eth_dev_release_port(dev);
1589
1590        return firsterr;
1591}
1592
1593int
1594rte_eth_dev_reset(uint16_t port_id)
1595{
1596        struct rte_eth_dev *dev;
1597        int ret;
1598
1599        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1600        dev = &rte_eth_devices[port_id];
1601
1602        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1603
1604        ret = rte_eth_dev_stop(port_id);
1605        if (ret != 0) {
1606                RTE_ETHDEV_LOG(ERR,
1607                        "Failed to stop device (port %u) before reset: %s - ignore\n",
1608                        port_id, rte_strerror(-ret));
1609        }
1610        ret = dev->dev_ops->dev_reset(dev);
1611
1612        return eth_err(port_id, ret);
1613}
1614
1615int
1616rte_eth_dev_is_removed(uint16_t port_id)
1617{
1618        struct rte_eth_dev *dev;
1619        int ret;
1620
1621        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1622        dev = &rte_eth_devices[port_id];
1623
1624        if (dev->state == RTE_ETH_DEV_REMOVED)
1625                return 1;
1626
1627        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1628
1629        ret = dev->dev_ops->is_removed(dev);
1630        if (ret != 0)
1631                /* Device is physically removed. */
1632                dev->state = RTE_ETH_DEV_REMOVED;
1633
1634        return ret;
1635}
1636
1637static int
1638rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1639                             uint16_t n_seg, uint32_t *mbp_buf_size,
1640                             const struct rte_eth_dev_info *dev_info)
1641{
1642        const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1643        struct rte_mempool *mp_first;
1644        uint32_t offset_mask;
1645        uint16_t seg_idx;
1646
1647        if (n_seg > seg_capa->max_nseg) {
1648                RTE_ETHDEV_LOG(ERR,
1649                               "Requested Rx segments %u exceed supported %u\n",
1650                               n_seg, seg_capa->max_nseg);
1651                return -EINVAL;
1652        }
1653        /*
1654         * Check the sizes and offsets against buffer sizes
1655         * for each segment specified in extended configuration.
1656         */
1657        mp_first = rx_seg[0].mp;
1658        offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1659        for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1660                struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1661                uint32_t length = rx_seg[seg_idx].length;
1662                uint32_t offset = rx_seg[seg_idx].offset;
1663
1664                if (mpl == NULL) {
1665                        RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1666                        return -EINVAL;
1667                }
1668                if (seg_idx != 0 && mp_first != mpl &&
1669                    seg_capa->multi_pools == 0) {
1670                        RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1671                        return -ENOTSUP;
1672                }
1673                if (offset != 0) {
1674                        if (seg_capa->offset_allowed == 0) {
1675                                RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1676                                return -ENOTSUP;
1677                        }
1678                        if (offset & offset_mask) {
1679                                RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1680                                               offset,
1681                                               seg_capa->offset_align_log2);
1682                                return -EINVAL;
1683                        }
1684                }
1685                if (mpl->private_data_size <
1686                        sizeof(struct rte_pktmbuf_pool_private)) {
1687                        RTE_ETHDEV_LOG(ERR,
1688                                       "%s private_data_size %u < %u\n",
1689                                       mpl->name, mpl->private_data_size,
1690                                       (unsigned int)sizeof
1691                                        (struct rte_pktmbuf_pool_private));
1692                        return -ENOSPC;
1693                }
1694                offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1695                *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1696                length = length != 0 ? length : *mbp_buf_size;
1697                if (*mbp_buf_size < length + offset) {
1698                        RTE_ETHDEV_LOG(ERR,
1699                                       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1700                                       mpl->name, *mbp_buf_size,
1701                                       length + offset, length, offset);
1702                        return -EINVAL;
1703                }
1704        }
1705        return 0;
1706}
1707
1708int
1709rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1710                       uint16_t nb_rx_desc, unsigned int socket_id,
1711                       const struct rte_eth_rxconf *rx_conf,
1712                       struct rte_mempool *mp)
1713{
1714        int ret;
1715        uint32_t mbp_buf_size;
1716        struct rte_eth_dev *dev;
1717        struct rte_eth_dev_info dev_info;
1718        struct rte_eth_rxconf local_conf;
1719
1720        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1721        dev = &rte_eth_devices[port_id];
1722
1723        if (rx_queue_id >= dev->data->nb_rx_queues) {
1724                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1725                return -EINVAL;
1726        }
1727
1728        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1729
1730        ret = rte_eth_dev_info_get(port_id, &dev_info);
1731        if (ret != 0)
1732                return ret;
1733
1734        if (mp != NULL) {
1735                /* Single pool configuration check. */
1736                if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1737                        RTE_ETHDEV_LOG(ERR,
1738                                       "Ambiguous segment configuration\n");
1739                        return -EINVAL;
1740                }
1741                /*
1742                 * Check the size of the mbuf data buffer, this value
1743                 * must be provided in the private data of the memory pool.
1744                 * First check that the memory pool(s) has a valid private data.
1745                 */
1746                if (mp->private_data_size <
1747                                sizeof(struct rte_pktmbuf_pool_private)) {
1748                        RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1749                                mp->name, mp->private_data_size,
1750                                (unsigned int)
1751                                sizeof(struct rte_pktmbuf_pool_private));
1752                        return -ENOSPC;
1753                }
1754                mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1755                if (mbp_buf_size < dev_info.min_rx_bufsize +
1756                                   RTE_PKTMBUF_HEADROOM) {
1757                        RTE_ETHDEV_LOG(ERR,
1758                                       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1759                                       mp->name, mbp_buf_size,
1760                                       RTE_PKTMBUF_HEADROOM +
1761                                       dev_info.min_rx_bufsize,
1762                                       RTE_PKTMBUF_HEADROOM,
1763                                       dev_info.min_rx_bufsize);
1764                        return -EINVAL;
1765                }
1766        } else {
1767                const struct rte_eth_rxseg_split *rx_seg;
1768                uint16_t n_seg;
1769
1770                /* Extended multi-segment configuration check. */
1771                if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1772                        RTE_ETHDEV_LOG(ERR,
1773                                       "Memory pool is null and no extended configuration provided\n");
1774                        return -EINVAL;
1775                }
1776
1777                rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1778                n_seg = rx_conf->rx_nseg;
1779
1780                if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1781                        ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1782                                                           &mbp_buf_size,
1783                                                           &dev_info);
1784                        if (ret != 0)
1785                                return ret;
1786                } else {
1787                        RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1788                        return -EINVAL;
1789                }
1790        }
1791
1792        /* Use default specified by driver, if nb_rx_desc is zero */
1793        if (nb_rx_desc == 0) {
1794                nb_rx_desc = dev_info.default_rxportconf.ring_size;
1795                /* If driver default is also zero, fall back on EAL default */
1796                if (nb_rx_desc == 0)
1797                        nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1798        }
1799
1800        if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1801                        nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1802                        nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1803
1804                RTE_ETHDEV_LOG(ERR,
1805                        "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1806                        nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1807                        dev_info.rx_desc_lim.nb_min,
1808                        dev_info.rx_desc_lim.nb_align);
1809                return -EINVAL;
1810        }
1811
1812        if (dev->data->dev_started &&
1813                !(dev_info.dev_capa &
1814                        RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1815                return -EBUSY;
1816
1817        if (dev->data->dev_started &&
1818                (dev->data->rx_queue_state[rx_queue_id] !=
1819                        RTE_ETH_QUEUE_STATE_STOPPED))
1820                return -EBUSY;
1821
1822        eth_dev_rxq_release(dev, rx_queue_id);
1823
1824        if (rx_conf == NULL)
1825                rx_conf = &dev_info.default_rxconf;
1826
1827        local_conf = *rx_conf;
1828
1829        /*
1830         * If an offloading has already been enabled in
1831         * rte_eth_dev_configure(), it has been enabled on all queues,
1832         * so there is no need to enable it in this queue again.
1833         * The local_conf.offloads input to underlying PMD only carries
1834         * those offloadings which are only enabled on this queue and
1835         * not enabled on all queues.
1836         */
1837        local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1838
1839        /*
1840         * New added offloadings for this queue are those not enabled in
1841         * rte_eth_dev_configure() and they must be per-queue type.
1842         * A pure per-port offloading can't be enabled on a queue while
1843         * disabled on another queue. A pure per-port offloading can't
1844         * be enabled for any queue as new added one if it hasn't been
1845         * enabled in rte_eth_dev_configure().
1846         */
1847        if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1848             local_conf.offloads) {
1849                RTE_ETHDEV_LOG(ERR,
1850                        "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1851                        "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1852                        port_id, rx_queue_id, local_conf.offloads,
1853                        dev_info.rx_queue_offload_capa,
1854                        __func__);
1855                return -EINVAL;
1856        }
1857
1858        if (local_conf.share_group > 0 &&
1859            (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1860                RTE_ETHDEV_LOG(ERR,
1861                        "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1862                        port_id, rx_queue_id, local_conf.share_group);
1863                return -EINVAL;
1864        }
1865
1866        /*
1867         * If LRO is enabled, check that the maximum aggregated packet
1868         * size is supported by the configured device.
1869         */
1870        /* Get the real Ethernet overhead length */
1871        if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1872                uint32_t overhead_len;
1873                uint32_t max_rx_pktlen;
1874                int ret;
1875
1876                overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1877                                dev_info.max_mtu);
1878                max_rx_pktlen = dev->data->mtu + overhead_len;
1879                if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1880                        dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1881                ret = eth_dev_check_lro_pkt_size(port_id,
1882                                dev->data->dev_conf.rxmode.max_lro_pkt_size,
1883                                max_rx_pktlen,
1884                                dev_info.max_lro_pkt_size);
1885                if (ret != 0)
1886                        return ret;
1887        }
1888
1889        ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1890                                              socket_id, &local_conf, mp);
1891        if (!ret) {
1892                if (!dev->data->min_rx_buf_size ||
1893                    dev->data->min_rx_buf_size > mbp_buf_size)
1894                        dev->data->min_rx_buf_size = mbp_buf_size;
1895        }
1896
1897        rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1898                rx_conf, ret);
1899        return eth_err(port_id, ret);
1900}
1901
1902int
1903rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1904                               uint16_t nb_rx_desc,
1905                               const struct rte_eth_hairpin_conf *conf)
1906{
1907        int ret;
1908        struct rte_eth_dev *dev;
1909        struct rte_eth_hairpin_cap cap;
1910        int i;
1911        int count;
1912
1913        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1914        dev = &rte_eth_devices[port_id];
1915
1916        if (rx_queue_id >= dev->data->nb_rx_queues) {
1917                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1918                return -EINVAL;
1919        }
1920
1921        if (conf == NULL) {
1922                RTE_ETHDEV_LOG(ERR,
1923                        "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1924                        port_id);
1925                return -EINVAL;
1926        }
1927
1928        ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1929        if (ret != 0)
1930                return ret;
1931        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1932                                -ENOTSUP);
1933        /* if nb_rx_desc is zero use max number of desc from the driver. */
1934        if (nb_rx_desc == 0)
1935                nb_rx_desc = cap.max_nb_desc;
1936        if (nb_rx_desc > cap.max_nb_desc) {
1937                RTE_ETHDEV_LOG(ERR,
1938                        "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1939                        nb_rx_desc, cap.max_nb_desc);
1940                return -EINVAL;
1941        }
1942        if (conf->peer_count > cap.max_rx_2_tx) {
1943                RTE_ETHDEV_LOG(ERR,
1944                        "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1945                        conf->peer_count, cap.max_rx_2_tx);
1946                return -EINVAL;
1947        }
1948        if (conf->peer_count == 0) {
1949                RTE_ETHDEV_LOG(ERR,
1950                        "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1951                        conf->peer_count);
1952                return -EINVAL;
1953        }
1954        for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1955             cap.max_nb_queues != UINT16_MAX; i++) {
1956                if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1957                        count++;
1958        }
1959        if (count > cap.max_nb_queues) {
1960                RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1961                cap.max_nb_queues);
1962                return -EINVAL;
1963        }
1964        if (dev->data->dev_started)
1965                return -EBUSY;
1966        eth_dev_rxq_release(dev, rx_queue_id);
1967        ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1968                                                      nb_rx_desc, conf);
1969        if (ret == 0)
1970                dev->data->rx_queue_state[rx_queue_id] =
1971                        RTE_ETH_QUEUE_STATE_HAIRPIN;
1972        return eth_err(port_id, ret);
1973}
1974
1975int
1976rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1977                       uint16_t nb_tx_desc, unsigned int socket_id,
1978                       const struct rte_eth_txconf *tx_conf)
1979{
1980        struct rte_eth_dev *dev;
1981        struct rte_eth_dev_info dev_info;
1982        struct rte_eth_txconf local_conf;
1983        int ret;
1984
1985        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1986        dev = &rte_eth_devices[port_id];
1987
1988        if (tx_queue_id >= dev->data->nb_tx_queues) {
1989                RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
1990                return -EINVAL;
1991        }
1992
1993        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1994
1995        ret = rte_eth_dev_info_get(port_id, &dev_info);
1996        if (ret != 0)
1997                return ret;
1998
1999        /* Use default specified by driver, if nb_tx_desc is zero */
2000        if (nb_tx_desc == 0) {
2001                nb_tx_desc = dev_info.default_txportconf.ring_size;
2002                /* If driver default is zero, fall back on EAL default */
2003                if (nb_tx_desc == 0)
2004                        nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2005        }
2006        if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2007            nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2008            nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2009                RTE_ETHDEV_LOG(ERR,
2010                        "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2011                        nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2012                        dev_info.tx_desc_lim.nb_min,
2013                        dev_info.tx_desc_lim.nb_align);
2014                return -EINVAL;
2015        }
2016
2017        if (dev->data->dev_started &&
2018                !(dev_info.dev_capa &
2019                        RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2020                return -EBUSY;
2021
2022        if (dev->data->dev_started &&
2023                (dev->data->tx_queue_state[tx_queue_id] !=
2024                        RTE_ETH_QUEUE_STATE_STOPPED))
2025                return -EBUSY;
2026
2027        eth_dev_txq_release(dev, tx_queue_id);
2028
2029        if (tx_conf == NULL)
2030                tx_conf = &dev_info.default_txconf;
2031
2032        local_conf = *tx_conf;
2033
2034        /*
2035         * If an offloading has already been enabled in
2036         * rte_eth_dev_configure(), it has been enabled on all queues,
2037         * so there is no need to enable it in this queue again.
2038         * The local_conf.offloads input to underlying PMD only carries
2039         * those offloadings which are only enabled on this queue and
2040         * not enabled on all queues.
2041         */
2042        local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2043
2044        /*
2045         * New added offloadings for this queue are those not enabled in
2046         * rte_eth_dev_configure() and they must be per-queue type.
2047         * A pure per-port offloading can't be enabled on a queue while
2048         * disabled on another queue. A pure per-port offloading can't
2049         * be enabled for any queue as new added one if it hasn't been
2050         * enabled in rte_eth_dev_configure().
2051         */
2052        if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2053             local_conf.offloads) {
2054                RTE_ETHDEV_LOG(ERR,
2055                        "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2056                        "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2057                        port_id, tx_queue_id, local_conf.offloads,
2058                        dev_info.tx_queue_offload_capa,
2059                        __func__);
2060                return -EINVAL;
2061        }
2062
2063        rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2064        return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2065                       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2066}
2067
2068int
2069rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2070                               uint16_t nb_tx_desc,
2071                               const struct rte_eth_hairpin_conf *conf)
2072{
2073        struct rte_eth_dev *dev;
2074        struct rte_eth_hairpin_cap cap;
2075        int i;
2076        int count;
2077        int ret;
2078
2079        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2080        dev = &rte_eth_devices[port_id];
2081
2082        if (tx_queue_id >= dev->data->nb_tx_queues) {
2083                RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2084                return -EINVAL;
2085        }
2086
2087        if (conf == NULL) {
2088                RTE_ETHDEV_LOG(ERR,
2089                        "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2090                        port_id);
2091                return -EINVAL;
2092        }
2093
2094        ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2095        if (ret != 0)
2096                return ret;
2097        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2098                                -ENOTSUP);
2099        /* if nb_rx_desc is zero use max number of desc from the driver. */
2100        if (nb_tx_desc == 0)
2101                nb_tx_desc = cap.max_nb_desc;
2102        if (nb_tx_desc > cap.max_nb_desc) {
2103                RTE_ETHDEV_LOG(ERR,
2104                        "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2105                        nb_tx_desc, cap.max_nb_desc);
2106                return -EINVAL;
2107        }
2108        if (conf->peer_count > cap.max_tx_2_rx) {
2109                RTE_ETHDEV_LOG(ERR,
2110                        "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2111                        conf->peer_count, cap.max_tx_2_rx);
2112                return -EINVAL;
2113        }
2114        if (conf->peer_count == 0) {
2115                RTE_ETHDEV_LOG(ERR,
2116                        "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2117                        conf->peer_count);
2118                return -EINVAL;
2119        }
2120        for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2121             cap.max_nb_queues != UINT16_MAX; i++) {
2122                if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2123                        count++;
2124        }
2125        if (count > cap.max_nb_queues) {
2126                RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2127                cap.max_nb_queues);
2128                return -EINVAL;
2129        }
2130        if (dev->data->dev_started)
2131                return -EBUSY;
2132        eth_dev_txq_release(dev, tx_queue_id);
2133        ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2134                (dev, tx_queue_id, nb_tx_desc, conf);
2135        if (ret == 0)
2136                dev->data->tx_queue_state[tx_queue_id] =
2137                        RTE_ETH_QUEUE_STATE_HAIRPIN;
2138        return eth_err(port_id, ret);
2139}
2140
2141int
2142rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2143{
2144        struct rte_eth_dev *dev;
2145        int ret;
2146
2147        RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2148        dev = &rte_eth_devices[tx_port];
2149
2150        if (dev->data->dev_started == 0) {
2151                RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2152                return -EBUSY;
2153        }
2154
2155        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2156        ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2157        if (ret != 0)
2158                RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2159                               " to Rx %d (%d - all ports)\n",
2160                               tx_port, rx_port, RTE_MAX_ETHPORTS);
2161
2162        return ret;
2163}
2164
2165int
2166rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2167{
2168        struct rte_eth_dev *dev;
2169        int ret;
2170
2171        RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2172        dev = &rte_eth_devices[tx_port];
2173
2174        if (dev->data->dev_started == 0) {
2175                RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2176                return -EBUSY;
2177        }
2178
2179        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2180        ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2181        if (ret != 0)
2182                RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2183                               " from Rx %d (%d - all ports)\n",
2184                               tx_port, rx_port, RTE_MAX_ETHPORTS);
2185
2186        return ret;
2187}
2188
2189int
2190rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2191                               size_t len, uint32_t direction)
2192{
2193        struct rte_eth_dev *dev;
2194        int ret;
2195
2196        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2197        dev = &rte_eth_devices[port_id];
2198
2199        if (peer_ports == NULL) {
2200                RTE_ETHDEV_LOG(ERR,
2201                        "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2202                        port_id);
2203                return -EINVAL;
2204        }
2205
2206        if (len == 0) {
2207                RTE_ETHDEV_LOG(ERR,
2208                        "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2209                        port_id);
2210                return -EINVAL;
2211        }
2212
2213        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2214                                -ENOTSUP);
2215
2216        ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2217                                                      len, direction);
2218        if (ret < 0)
2219                RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2220                               port_id, direction ? "Rx" : "Tx");
2221
2222        return ret;
2223}
2224
2225void
2226rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2227                void *userdata __rte_unused)
2228{
2229        rte_pktmbuf_free_bulk(pkts, unsent);
2230}
2231
2232void
2233rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2234                void *userdata)
2235{
2236        uint64_t *count = userdata;
2237
2238        rte_pktmbuf_free_bulk(pkts, unsent);
2239        *count += unsent;
2240}
2241
2242int
2243rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2244                buffer_tx_error_fn cbfn, void *userdata)
2245{
2246        if (buffer == NULL) {
2247                RTE_ETHDEV_LOG(ERR,
2248                        "Cannot set Tx buffer error callback to NULL buffer\n");
2249                return -EINVAL;
2250        }
2251
2252        buffer->error_callback = cbfn;
2253        buffer->error_userdata = userdata;
2254        return 0;
2255}
2256
2257int
2258rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2259{
2260        int ret = 0;
2261
2262        if (buffer == NULL) {
2263                RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2264                return -EINVAL;
2265        }
2266
2267        buffer->size = size;
2268        if (buffer->error_callback == NULL) {
2269                ret = rte_eth_tx_buffer_set_err_callback(
2270                        buffer, rte_eth_tx_buffer_drop_callback, NULL);
2271        }
2272
2273        return ret;
2274}
2275
2276int
2277rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2278{
2279        struct rte_eth_dev *dev;
2280        int ret;
2281
2282        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283        dev = &rte_eth_devices[port_id];
2284
2285        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2286
2287        /* Call driver to free pending mbufs. */
2288        ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2289                                               free_cnt);
2290        return eth_err(port_id, ret);
2291}
2292
2293int
2294rte_eth_promiscuous_enable(uint16_t port_id)
2295{
2296        struct rte_eth_dev *dev;
2297        int diag = 0;
2298
2299        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2300        dev = &rte_eth_devices[port_id];
2301
2302        if (dev->data->promiscuous == 1)
2303                return 0;
2304
2305        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2306
2307        diag = (*dev->dev_ops->promiscuous_enable)(dev);
2308        dev->data->promiscuous = (diag == 0) ? 1 : 0;
2309
2310        return eth_err(port_id, diag);
2311}
2312
2313int
2314rte_eth_promiscuous_disable(uint16_t port_id)
2315{
2316        struct rte_eth_dev *dev;
2317        int diag = 0;
2318
2319        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320        dev = &rte_eth_devices[port_id];
2321
2322        if (dev->data->promiscuous == 0)
2323                return 0;
2324
2325        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2326
2327        dev->data->promiscuous = 0;
2328        diag = (*dev->dev_ops->promiscuous_disable)(dev);
2329        if (diag != 0)
2330                dev->data->promiscuous = 1;
2331
2332        return eth_err(port_id, diag);
2333}
2334
2335int
2336rte_eth_promiscuous_get(uint16_t port_id)
2337{
2338        struct rte_eth_dev *dev;
2339
2340        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341        dev = &rte_eth_devices[port_id];
2342
2343        return dev->data->promiscuous;
2344}
2345
2346int
2347rte_eth_allmulticast_enable(uint16_t port_id)
2348{
2349        struct rte_eth_dev *dev;
2350        int diag;
2351
2352        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2353        dev = &rte_eth_devices[port_id];
2354
2355        if (dev->data->all_multicast == 1)
2356                return 0;
2357
2358        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2359        diag = (*dev->dev_ops->allmulticast_enable)(dev);
2360        dev->data->all_multicast = (diag == 0) ? 1 : 0;
2361
2362        return eth_err(port_id, diag);
2363}
2364
2365int
2366rte_eth_allmulticast_disable(uint16_t port_id)
2367{
2368        struct rte_eth_dev *dev;
2369        int diag;
2370
2371        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2372        dev = &rte_eth_devices[port_id];
2373
2374        if (dev->data->all_multicast == 0)
2375                return 0;
2376
2377        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2378        dev->data->all_multicast = 0;
2379        diag = (*dev->dev_ops->allmulticast_disable)(dev);
2380        if (diag != 0)
2381                dev->data->all_multicast = 1;
2382
2383        return eth_err(port_id, diag);
2384}
2385
2386int
2387rte_eth_allmulticast_get(uint16_t port_id)
2388{
2389        struct rte_eth_dev *dev;
2390
2391        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2392        dev = &rte_eth_devices[port_id];
2393
2394        return dev->data->all_multicast;
2395}
2396
2397int
2398rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2399{
2400        struct rte_eth_dev *dev;
2401
2402        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2403        dev = &rte_eth_devices[port_id];
2404
2405        if (eth_link == NULL) {
2406                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2407                        port_id);
2408                return -EINVAL;
2409        }
2410
2411        if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2412                rte_eth_linkstatus_get(dev, eth_link);
2413        else {
2414                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2415                (*dev->dev_ops->link_update)(dev, 1);
2416                *eth_link = dev->data->dev_link;
2417        }
2418
2419        return 0;
2420}
2421
2422int
2423rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2424{
2425        struct rte_eth_dev *dev;
2426
2427        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2428        dev = &rte_eth_devices[port_id];
2429
2430        if (eth_link == NULL) {
2431                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2432                        port_id);
2433                return -EINVAL;
2434        }
2435
2436        if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2437                rte_eth_linkstatus_get(dev, eth_link);
2438        else {
2439                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2440                (*dev->dev_ops->link_update)(dev, 0);
2441                *eth_link = dev->data->dev_link;
2442        }
2443
2444        return 0;
2445}
2446
2447const char *
2448rte_eth_link_speed_to_str(uint32_t link_speed)
2449{
2450        switch (link_speed) {
2451        case RTE_ETH_SPEED_NUM_NONE: return "None";
2452        case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2453        case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2454        case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2455        case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2456        case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2457        case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2458        case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2459        case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2460        case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2461        case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2462        case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2463        case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2464        case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2465        case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2466        default: return "Invalid";
2467        }
2468}
2469
2470int
2471rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2472{
2473        if (str == NULL) {
2474                RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2475                return -EINVAL;
2476        }
2477
2478        if (len == 0) {
2479                RTE_ETHDEV_LOG(ERR,
2480                        "Cannot convert link to string with zero size\n");
2481                return -EINVAL;
2482        }
2483
2484        if (eth_link == NULL) {
2485                RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2486                return -EINVAL;
2487        }
2488
2489        if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2490                return snprintf(str, len, "Link down");
2491        else
2492                return snprintf(str, len, "Link up at %s %s %s",
2493                        rte_eth_link_speed_to_str(eth_link->link_speed),
2494                        (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2495                        "FDX" : "HDX",
2496                        (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2497                        "Autoneg" : "Fixed");
2498}
2499
2500int
2501rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2502{
2503        struct rte_eth_dev *dev;
2504
2505        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2506        dev = &rte_eth_devices[port_id];
2507
2508        if (stats == NULL) {
2509                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2510                        port_id);
2511                return -EINVAL;
2512        }
2513
2514        memset(stats, 0, sizeof(*stats));
2515
2516        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2517        stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2518        return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2519}
2520
2521int
2522rte_eth_stats_reset(uint16_t port_id)
2523{
2524        struct rte_eth_dev *dev;
2525        int ret;
2526
2527        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2528        dev = &rte_eth_devices[port_id];
2529
2530        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2531        ret = (*dev->dev_ops->stats_reset)(dev);
2532        if (ret != 0)
2533                return eth_err(port_id, ret);
2534
2535        dev->data->rx_mbuf_alloc_failed = 0;
2536
2537        return 0;
2538}
2539
2540static inline int
2541eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2542{
2543        uint16_t nb_rxqs, nb_txqs;
2544        int count;
2545
2546        nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2547        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2548
2549        count = RTE_NB_STATS;
2550        if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2551                count += nb_rxqs * RTE_NB_RXQ_STATS;
2552                count += nb_txqs * RTE_NB_TXQ_STATS;
2553        }
2554
2555        return count;
2556}
2557
2558static int
2559eth_dev_get_xstats_count(uint16_t port_id)
2560{
2561        struct rte_eth_dev *dev;
2562        int count;
2563
2564        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2565        dev = &rte_eth_devices[port_id];
2566        if (dev->dev_ops->xstats_get_names != NULL) {
2567                count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2568                if (count < 0)
2569                        return eth_err(port_id, count);
2570        } else
2571                count = 0;
2572
2573
2574        count += eth_dev_get_xstats_basic_count(dev);
2575
2576        return count;
2577}
2578
2579int
2580rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2581                uint64_t *id)
2582{
2583        int cnt_xstats, idx_xstat;
2584
2585        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586
2587        if (xstat_name == NULL) {
2588                RTE_ETHDEV_LOG(ERR,
2589                        "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2590                        port_id);
2591                return -ENOMEM;
2592        }
2593
2594        if (id == NULL) {
2595                RTE_ETHDEV_LOG(ERR,
2596                        "Cannot get ethdev port %u xstats ID to NULL\n",
2597                        port_id);
2598                return -ENOMEM;
2599        }
2600
2601        /* Get count */
2602        cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2603        if (cnt_xstats  < 0) {
2604                RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2605                return -ENODEV;
2606        }
2607
2608        /* Get id-name lookup table */
2609        struct rte_eth_xstat_name xstats_names[cnt_xstats];
2610
2611        if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2612                        port_id, xstats_names, cnt_xstats, NULL)) {
2613                RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2614                return -1;
2615        }
2616
2617        for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2618                if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2619                        *id = idx_xstat;
2620                        return 0;
2621                };
2622        }
2623
2624        return -EINVAL;
2625}
2626
2627/* retrieve basic stats names */
2628static int
2629eth_basic_stats_get_names(struct rte_eth_dev *dev,
2630        struct rte_eth_xstat_name *xstats_names)
2631{
2632        int cnt_used_entries = 0;
2633        uint32_t idx, id_queue;
2634        uint16_t num_q;
2635
2636        for (idx = 0; idx < RTE_NB_STATS; idx++) {
2637                strlcpy(xstats_names[cnt_used_entries].name,
2638                        eth_dev_stats_strings[idx].name,
2639                        sizeof(xstats_names[0].name));
2640                cnt_used_entries++;
2641        }
2642
2643        if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2644                return cnt_used_entries;
2645
2646        num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2647        for (id_queue = 0; id_queue < num_q; id_queue++) {
2648                for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2649                        snprintf(xstats_names[cnt_used_entries].name,
2650                                sizeof(xstats_names[0].name),
2651                                "rx_q%u_%s",
2652                                id_queue, eth_dev_rxq_stats_strings[idx].name);
2653                        cnt_used_entries++;
2654                }
2655
2656        }
2657        num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2658        for (id_queue = 0; id_queue < num_q; id_queue++) {
2659                for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2660                        snprintf(xstats_names[cnt_used_entries].name,
2661                                sizeof(xstats_names[0].name),
2662                                "tx_q%u_%s",
2663                                id_queue, eth_dev_txq_stats_strings[idx].name);
2664                        cnt_used_entries++;
2665                }
2666        }
2667        return cnt_used_entries;
2668}
2669
2670/* retrieve ethdev extended statistics names */
2671int
2672rte_eth_xstats_get_names_by_id(uint16_t port_id,
2673        struct rte_eth_xstat_name *xstats_names, unsigned int size,
2674        uint64_t *ids)
2675{
2676        struct rte_eth_xstat_name *xstats_names_copy;
2677        unsigned int no_basic_stat_requested = 1;
2678        unsigned int no_ext_stat_requested = 1;
2679        unsigned int expected_entries;
2680        unsigned int basic_count;
2681        struct rte_eth_dev *dev;
2682        unsigned int i;
2683        int ret;
2684
2685        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2686        dev = &rte_eth_devices[port_id];
2687
2688        basic_count = eth_dev_get_xstats_basic_count(dev);
2689        ret = eth_dev_get_xstats_count(port_id);
2690        if (ret < 0)
2691                return ret;
2692        expected_entries = (unsigned int)ret;
2693
2694        /* Return max number of stats if no ids given */
2695        if (!ids) {
2696                if (!xstats_names)
2697                        return expected_entries;
2698                else if (xstats_names && size < expected_entries)
2699                        return expected_entries;
2700        }
2701
2702        if (ids && !xstats_names)
2703                return -EINVAL;
2704
2705        if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2706                uint64_t ids_copy[size];
2707
2708                for (i = 0; i < size; i++) {
2709                        if (ids[i] < basic_count) {
2710                                no_basic_stat_requested = 0;
2711                                break;
2712                        }
2713
2714                        /*
2715                         * Convert ids to xstats ids that PMD knows.
2716                         * ids known by user are basic + extended stats.
2717                         */
2718                        ids_copy[i] = ids[i] - basic_count;
2719                }
2720
2721                if (no_basic_stat_requested)
2722                        return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2723                                        ids_copy, xstats_names, size);
2724        }
2725
2726        /* Retrieve all stats */
2727        if (!ids) {
2728                int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2729                                expected_entries);
2730                if (num_stats < 0 || num_stats > (int)expected_entries)
2731                        return num_stats;
2732                else
2733                        return expected_entries;
2734        }
2735
2736        xstats_names_copy = calloc(expected_entries,
2737                sizeof(struct rte_eth_xstat_name));
2738
2739        if (!xstats_names_copy) {
2740                RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2741                return -ENOMEM;
2742        }
2743
2744        if (ids) {
2745                for (i = 0; i < size; i++) {
2746                        if (ids[i] >= basic_count) {
2747                                no_ext_stat_requested = 0;
2748                                break;
2749                        }
2750                }
2751        }
2752
2753        /* Fill xstats_names_copy structure */
2754        if (ids && no_ext_stat_requested) {
2755                eth_basic_stats_get_names(dev, xstats_names_copy);
2756        } else {
2757                ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2758                        expected_entries);
2759                if (ret < 0) {
2760                        free(xstats_names_copy);
2761                        return ret;
2762                }
2763        }
2764
2765        /* Filter stats */
2766        for (i = 0; i < size; i++) {
2767                if (ids[i] >= expected_entries) {
2768                        RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2769                        free(xstats_names_copy);
2770                        return -1;
2771                }
2772                xstats_names[i] = xstats_names_copy[ids[i]];
2773        }
2774
2775        free(xstats_names_copy);
2776        return size;
2777}
2778
2779int
2780rte_eth_xstats_get_names(uint16_t port_id,
2781        struct rte_eth_xstat_name *xstats_names,
2782        unsigned int size)
2783{
2784        struct rte_eth_dev *dev;
2785        int cnt_used_entries;
2786        int cnt_expected_entries;
2787        int cnt_driver_entries;
2788
2789        cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2790        if (xstats_names == NULL || cnt_expected_entries < 0 ||
2791                        (int)size < cnt_expected_entries)
2792                return cnt_expected_entries;
2793
2794        /* port_id checked in eth_dev_get_xstats_count() */
2795        dev = &rte_eth_devices[port_id];
2796
2797        cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2798
2799        if (dev->dev_ops->xstats_get_names != NULL) {
2800                /* If there are any driver-specific xstats, append them
2801                 * to end of list.
2802                 */
2803                cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2804                        dev,
2805                        xstats_names + cnt_used_entries,
2806                        size - cnt_used_entries);
2807                if (cnt_driver_entries < 0)
2808                        return eth_err(port_id, cnt_driver_entries);
2809                cnt_used_entries += cnt_driver_entries;
2810        }
2811
2812        return cnt_used_entries;
2813}
2814
2815
2816static int
2817eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2818{
2819        struct rte_eth_dev *dev;
2820        struct rte_eth_stats eth_stats;
2821        unsigned int count = 0, i, q;
2822        uint64_t val, *stats_ptr;
2823        uint16_t nb_rxqs, nb_txqs;
2824        int ret;
2825
2826        ret = rte_eth_stats_get(port_id, &eth_stats);
2827        if (ret < 0)
2828                return ret;
2829
2830        dev = &rte_eth_devices[port_id];
2831
2832        nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2833        nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2834
2835        /* global stats */
2836        for (i = 0; i < RTE_NB_STATS; i++) {
2837                stats_ptr = RTE_PTR_ADD(&eth_stats,
2838                                        eth_dev_stats_strings[i].offset);
2839                val = *stats_ptr;
2840                xstats[count++].value = val;
2841        }
2842
2843        if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2844                return count;
2845
2846        /* per-rxq stats */
2847        for (q = 0; q < nb_rxqs; q++) {
2848                for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2849                        stats_ptr = RTE_PTR_ADD(&eth_stats,
2850                                        eth_dev_rxq_stats_strings[i].offset +
2851                                        q * sizeof(uint64_t));
2852                        val = *stats_ptr;
2853                        xstats[count++].value = val;
2854                }
2855        }
2856
2857        /* per-txq stats */
2858        for (q = 0; q < nb_txqs; q++) {
2859                for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2860                        stats_ptr = RTE_PTR_ADD(&eth_stats,
2861                                        eth_dev_txq_stats_strings[i].offset +
2862                                        q * sizeof(uint64_t));
2863                        val = *stats_ptr;
2864                        xstats[count++].value = val;
2865                }
2866        }
2867        return count;
2868}
2869
2870/* retrieve ethdev extended statistics */
2871int
2872rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2873                         uint64_t *values, unsigned int size)
2874{
2875        unsigned int no_basic_stat_requested = 1;
2876        unsigned int no_ext_stat_requested = 1;
2877        unsigned int num_xstats_filled;
2878        unsigned int basic_count;
2879        uint16_t expected_entries;
2880        struct rte_eth_dev *dev;
2881        unsigned int i;
2882        int ret;
2883
2884        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885        dev = &rte_eth_devices[port_id];
2886
2887        ret = eth_dev_get_xstats_count(port_id);
2888        if (ret < 0)
2889                return ret;
2890        expected_entries = (uint16_t)ret;
2891        struct rte_eth_xstat xstats[expected_entries];
2892        basic_count = eth_dev_get_xstats_basic_count(dev);
2893
2894        /* Return max number of stats if no ids given */
2895        if (!ids) {
2896                if (!values)
2897                        return expected_entries;
2898                else if (values && size < expected_entries)
2899                        return expected_entries;
2900        }
2901
2902        if (ids && !values)
2903                return -EINVAL;
2904
2905        if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2906                unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2907                uint64_t ids_copy[size];
2908
2909                for (i = 0; i < size; i++) {
2910                        if (ids[i] < basic_count) {
2911                                no_basic_stat_requested = 0;
2912                                break;
2913                        }
2914
2915                        /*
2916                         * Convert ids to xstats ids that PMD knows.
2917                         * ids known by user are basic + extended stats.
2918                         */
2919                        ids_copy[i] = ids[i] - basic_count;
2920                }
2921
2922                if (no_basic_stat_requested)
2923                        return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2924                                        values, size);
2925        }
2926
2927        if (ids) {
2928                for (i = 0; i < size; i++) {
2929                        if (ids[i] >= basic_count) {
2930                                no_ext_stat_requested = 0;
2931                                break;
2932                        }
2933                }
2934        }
2935
2936        /* Fill the xstats structure */
2937        if (ids && no_ext_stat_requested)
2938                ret = eth_basic_stats_get(port_id, xstats);
2939        else
2940                ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2941
2942        if (ret < 0)
2943                return ret;
2944        num_xstats_filled = (unsigned int)ret;
2945
2946        /* Return all stats */
2947        if (!ids) {
2948                for (i = 0; i < num_xstats_filled; i++)
2949                        values[i] = xstats[i].value;
2950                return expected_entries;
2951        }
2952
2953        /* Filter stats */
2954        for (i = 0; i < size; i++) {
2955                if (ids[i] >= expected_entries) {
2956                        RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2957                        return -1;
2958                }
2959                values[i] = xstats[ids[i]].value;
2960        }
2961        return size;
2962}
2963
2964int
2965rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2966        unsigned int n)
2967{
2968        struct rte_eth_dev *dev;
2969        unsigned int count, i;
2970        signed int xcount = 0;
2971        int ret;
2972
2973        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2974        if (xstats == NULL && n > 0)
2975                return -EINVAL;
2976        dev = &rte_eth_devices[port_id];
2977
2978        count = eth_dev_get_xstats_basic_count(dev);
2979
2980        /* implemented by the driver */
2981        if (dev->dev_ops->xstats_get != NULL) {
2982                /* Retrieve the xstats from the driver at the end of the
2983                 * xstats struct.
2984                 */
2985                xcount = (*dev->dev_ops->xstats_get)(dev,
2986                                     (n > count) ? xstats + count : NULL,
2987                                     (n > count) ? n - count : 0);
2988
2989                if (xcount < 0)
2990                        return eth_err(port_id, xcount);
2991        }
2992
2993        if (n < count + xcount || xstats == NULL)
2994                return count + xcount;
2995
2996        /* now fill the xstats structure */
2997        ret = eth_basic_stats_get(port_id, xstats);
2998        if (ret < 0)
2999                return ret;
3000        count = ret;
3001
3002        for (i = 0; i < count; i++)
3003                xstats[i].id = i;
3004        /* add an offset to driver-specific stats */
3005        for ( ; i < count + xcount; i++)
3006                xstats[i].id += count;
3007
3008        return count + xcount;
3009}
3010
3011/* reset ethdev extended statistics */
3012int
3013rte_eth_xstats_reset(uint16_t port_id)
3014{
3015        struct rte_eth_dev *dev;
3016
3017        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3018        dev = &rte_eth_devices[port_id];
3019
3020        /* implemented by the driver */
3021        if (dev->dev_ops->xstats_reset != NULL)
3022                return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3023
3024        /* fallback to default */
3025        return rte_eth_stats_reset(port_id);
3026}
3027
3028static int
3029eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3030                uint8_t stat_idx, uint8_t is_rx)
3031{
3032        struct rte_eth_dev *dev;
3033
3034        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3035        dev = &rte_eth_devices[port_id];
3036
3037        if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3038                return -EINVAL;
3039
3040        if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3041                return -EINVAL;
3042
3043        if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3044                return -EINVAL;
3045
3046        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3047        return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3048}
3049
3050int
3051rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3052                uint8_t stat_idx)
3053{
3054        return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3055                                                tx_queue_id,
3056                                                stat_idx, STAT_QMAP_TX));
3057}
3058
3059int
3060rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3061                uint8_t stat_idx)
3062{
3063        return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3064                                                rx_queue_id,
3065                                                stat_idx, STAT_QMAP_RX));
3066}
3067
3068int
3069rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3070{
3071        struct rte_eth_dev *dev;
3072
3073        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3074        dev = &rte_eth_devices[port_id];
3075
3076        if (fw_version == NULL && fw_size > 0) {
3077                RTE_ETHDEV_LOG(ERR,
3078                        "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3079                        port_id);
3080                return -EINVAL;
3081        }
3082
3083        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3084        return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3085                                                        fw_version, fw_size));
3086}
3087
3088int
3089rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3090{
3091        struct rte_eth_dev *dev;
3092        const struct rte_eth_desc_lim lim = {
3093                .nb_max = UINT16_MAX,
3094                .nb_min = 0,
3095                .nb_align = 1,
3096                .nb_seg_max = UINT16_MAX,
3097                .nb_mtu_seg_max = UINT16_MAX,
3098        };
3099        int diag;
3100
3101        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102        dev = &rte_eth_devices[port_id];
3103
3104        if (dev_info == NULL) {
3105                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3106                        port_id);
3107                return -EINVAL;
3108        }
3109
3110        /*
3111         * Init dev_info before port_id check since caller does not have
3112         * return status and does not know if get is successful or not.
3113         */
3114        memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3115        dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3116
3117        dev_info->rx_desc_lim = lim;
3118        dev_info->tx_desc_lim = lim;
3119        dev_info->device = dev->device;
3120        dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3121                RTE_ETHER_CRC_LEN;
3122        dev_info->max_mtu = UINT16_MAX;
3123
3124        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3125        diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3126        if (diag != 0) {
3127                /* Cleanup already filled in device information */
3128                memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3129                return eth_err(port_id, diag);
3130        }
3131
3132        /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3133        dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3134                        RTE_MAX_QUEUES_PER_PORT);
3135        dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3136                        RTE_MAX_QUEUES_PER_PORT);
3137
3138        dev_info->driver_name = dev->device->driver->name;
3139        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3140        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3141
3142        dev_info->dev_flags = &dev->data->dev_flags;
3143
3144        return 0;
3145}
3146
3147int
3148rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3149{
3150        struct rte_eth_dev *dev;
3151
3152        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3153        dev = &rte_eth_devices[port_id];
3154
3155        if (dev_conf == NULL) {
3156                RTE_ETHDEV_LOG(ERR,
3157                        "Cannot get ethdev port %u configuration to NULL\n",
3158                        port_id);
3159                return -EINVAL;
3160        }
3161
3162        memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3163
3164        return 0;
3165}
3166
3167int
3168rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3169                                 uint32_t *ptypes, int num)
3170{
3171        int i, j;
3172        struct rte_eth_dev *dev;
3173        const uint32_t *all_ptypes;
3174
3175        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176        dev = &rte_eth_devices[port_id];
3177
3178        if (ptypes == NULL && num > 0) {
3179                RTE_ETHDEV_LOG(ERR,
3180                        "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3181                        port_id);
3182                return -EINVAL;
3183        }
3184
3185        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3186        all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3187
3188        if (!all_ptypes)
3189                return 0;
3190
3191        for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3192                if (all_ptypes[i] & ptype_mask) {
3193                        if (j < num)
3194                                ptypes[j] = all_ptypes[i];
3195                        j++;
3196                }
3197
3198        return j;
3199}
3200
3201int
3202rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3203                                 uint32_t *set_ptypes, unsigned int num)
3204{
3205        const uint32_t valid_ptype_masks[] = {
3206                RTE_PTYPE_L2_MASK,
3207                RTE_PTYPE_L3_MASK,
3208                RTE_PTYPE_L4_MASK,
3209                RTE_PTYPE_TUNNEL_MASK,
3210                RTE_PTYPE_INNER_L2_MASK,
3211                RTE_PTYPE_INNER_L3_MASK,
3212                RTE_PTYPE_INNER_L4_MASK,
3213        };
3214        const uint32_t *all_ptypes;
3215        struct rte_eth_dev *dev;
3216        uint32_t unused_mask;
3217        unsigned int i, j;
3218        int ret;
3219
3220        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3221        dev = &rte_eth_devices[port_id];
3222
3223        if (num > 0 && set_ptypes == NULL) {
3224                RTE_ETHDEV_LOG(ERR,
3225                        "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3226                        port_id);
3227                return -EINVAL;
3228        }
3229
3230        if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3231                        *dev->dev_ops->dev_ptypes_set == NULL) {
3232                ret = 0;
3233                goto ptype_unknown;
3234        }
3235
3236        if (ptype_mask == 0) {
3237                ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3238                                ptype_mask);
3239                goto ptype_unknown;
3240        }
3241
3242        unused_mask = ptype_mask;
3243        for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3244                uint32_t mask = ptype_mask & valid_ptype_masks[i];
3245                if (mask && mask != valid_ptype_masks[i]) {
3246                        ret = -EINVAL;
3247                        goto ptype_unknown;
3248                }
3249                unused_mask &= ~valid_ptype_masks[i];
3250        }
3251
3252        if (unused_mask) {
3253                ret = -EINVAL;
3254                goto ptype_unknown;
3255        }
3256
3257        all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3258        if (all_ptypes == NULL) {
3259                ret = 0;
3260                goto ptype_unknown;
3261        }
3262
3263        /*
3264         * Accommodate as many set_ptypes as possible. If the supplied
3265         * set_ptypes array is insufficient fill it partially.
3266         */
3267        for (i = 0, j = 0; set_ptypes != NULL &&
3268                                (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3269                if (ptype_mask & all_ptypes[i]) {
3270                        if (j < num - 1) {
3271                                set_ptypes[j] = all_ptypes[i];
3272                                j++;
3273                                continue;
3274                        }
3275                        break;
3276                }
3277        }
3278
3279        if (set_ptypes != NULL && j < num)
3280                set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3281
3282        return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3283
3284ptype_unknown:
3285        if (num > 0)
3286                set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3287
3288        return ret;
3289}
3290
3291int
3292rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3293        unsigned int num)
3294{
3295        int32_t ret;
3296        struct rte_eth_dev *dev;
3297        struct rte_eth_dev_info dev_info;
3298
3299        if (ma == NULL) {
3300                RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3301                return -EINVAL;
3302        }
3303
3304        /* will check for us that port_id is a valid one */
3305        ret = rte_eth_dev_info_get(port_id, &dev_info);
3306        if (ret != 0)
3307                return ret;
3308
3309        dev = &rte_eth_devices[port_id];
3310        num = RTE_MIN(dev_info.max_mac_addrs, num);
3311        memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3312
3313        return num;
3314}
3315
3316int
3317rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3318{
3319        struct rte_eth_dev *dev;
3320
3321        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3322        dev = &rte_eth_devices[port_id];
3323
3324        if (mac_addr == NULL) {
3325                RTE_ETHDEV_LOG(ERR,
3326                        "Cannot get ethdev port %u MAC address to NULL\n",
3327                        port_id);
3328                return -EINVAL;
3329        }
3330
3331        rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3332
3333        return 0;
3334}
3335
3336int
3337rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3338{
3339        struct rte_eth_dev *dev;
3340
3341        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3342        dev = &rte_eth_devices[port_id];
3343
3344        if (mtu == NULL) {
3345                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3346                        port_id);
3347                return -EINVAL;
3348        }
3349
3350        *mtu = dev->data->mtu;
3351        return 0;
3352}
3353
3354int
3355rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3356{
3357        int ret;
3358        struct rte_eth_dev_info dev_info;
3359        struct rte_eth_dev *dev;
3360
3361        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3362        dev = &rte_eth_devices[port_id];
3363        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3364
3365        /*
3366         * Check if the device supports dev_infos_get, if it does not
3367         * skip min_mtu/max_mtu validation here as this requires values
3368         * that are populated within the call to rte_eth_dev_info_get()
3369         * which relies on dev->dev_ops->dev_infos_get.
3370         */
3371        if (*dev->dev_ops->dev_infos_get != NULL) {
3372                ret = rte_eth_dev_info_get(port_id, &dev_info);
3373                if (ret != 0)
3374                        return ret;
3375
3376                ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3377                if (ret != 0)
3378                        return ret;
3379        }
3380
3381        if (dev->data->dev_configured == 0) {
3382                RTE_ETHDEV_LOG(ERR,
3383                        "Port %u must be configured before MTU set\n",
3384                        port_id);
3385                return -EINVAL;
3386        }
3387
3388        ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3389        if (ret == 0)
3390                dev->data->mtu = mtu;
3391
3392        return eth_err(port_id, ret);
3393}
3394
3395int
3396rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3397{
3398        struct rte_eth_dev *dev;
3399        int ret;
3400
3401        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3402        dev = &rte_eth_devices[port_id];
3403
3404        if (!(dev->data->dev_conf.rxmode.offloads &
3405              RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3406                RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3407                        port_id);
3408                return -ENOSYS;
3409        }
3410
3411        if (vlan_id > 4095) {
3412                RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3413                        port_id, vlan_id);
3414                return -EINVAL;
3415        }
3416        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3417
3418        ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3419        if (ret == 0) {
3420                struct rte_vlan_filter_conf *vfc;
3421                int vidx;
3422                int vbit;
3423
3424                vfc = &dev->data->vlan_filter_conf;
3425                vidx = vlan_id / 64;
3426                vbit = vlan_id % 64;
3427
3428                if (on)
3429                        vfc->ids[vidx] |= RTE_BIT64(vbit);
3430                else
3431                        vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3432        }
3433
3434        return eth_err(port_id, ret);
3435}
3436
3437int
3438rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3439                                    int on)
3440{
3441        struct rte_eth_dev *dev;
3442
3443        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3444        dev = &rte_eth_devices[port_id];
3445
3446        if (rx_queue_id >= dev->data->nb_rx_queues) {
3447                RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3448                return -EINVAL;
3449        }
3450
3451        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3452        (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3453
3454        return 0;
3455}
3456
3457int
3458rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3459                                enum rte_vlan_type vlan_type,
3460                                uint16_t tpid)
3461{
3462        struct rte_eth_dev *dev;
3463
3464        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3465        dev = &rte_eth_devices[port_id];
3466
3467        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3468        return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3469                                                               tpid));
3470}
3471
3472int
3473rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3474{
3475        struct rte_eth_dev_info dev_info;
3476        struct rte_eth_dev *dev;
3477        int ret = 0;
3478        int mask = 0;
3479        int cur, org = 0;
3480        uint64_t orig_offloads;
3481        uint64_t dev_offloads;
3482        uint64_t new_offloads;
3483
3484        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3485        dev = &rte_eth_devices[port_id];
3486
3487        /* save original values in case of failure */
3488        orig_offloads = dev->data->dev_conf.rxmode.offloads;
3489        dev_offloads = orig_offloads;
3490
3491        /* check which option changed by application */
3492        cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3493        org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3494        if (cur != org) {
3495                if (cur)
3496                        dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3497                else
3498                        dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3499                mask |= RTE_ETH_VLAN_STRIP_MASK;
3500        }
3501
3502        cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3503        org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3504        if (cur != org) {
3505                if (cur)
3506                        dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3507                else
3508                        dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3509                mask |= RTE_ETH_VLAN_FILTER_MASK;
3510        }
3511
3512        cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3513        org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3514        if (cur != org) {
3515                if (cur)
3516                        dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3517                else
3518                        dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3519                mask |= RTE_ETH_VLAN_EXTEND_MASK;
3520        }
3521
3522        cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3523        org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3524        if (cur != org) {
3525                if (cur)
3526                        dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3527                else
3528                        dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3529                mask |= RTE_ETH_QINQ_STRIP_MASK;
3530        }
3531
3532        /*no change*/
3533        if (mask == 0)
3534                return ret;
3535
3536        ret = rte_eth_dev_info_get(port_id, &dev_info);
3537        if (ret != 0)
3538                return ret;
3539
3540        /* Rx VLAN offloading must be within its device capabilities */
3541        if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3542                new_offloads = dev_offloads & ~orig_offloads;
3543                RTE_ETHDEV_LOG(ERR,
3544                        "Ethdev port_id=%u requested new added VLAN offloads "
3545                        "0x%" PRIx64 " must be within Rx offloads capabilities "
3546                        "0x%" PRIx64 " in %s()\n",
3547                        port_id, new_offloads, dev_info.rx_offload_capa,
3548                        __func__);
3549                return -EINVAL;
3550        }
3551
3552        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3553        dev->data->dev_conf.rxmode.offloads = dev_offloads;
3554        ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3555        if (ret) {
3556                /* hit an error restore  original values */
3557                dev->data->dev_conf.rxmode.offloads = orig_offloads;
3558        }
3559
3560        return eth_err(port_id, ret);
3561}
3562
3563int
3564rte_eth_dev_get_vlan_offload(uint16_t port_id)
3565{
3566        struct rte_eth_dev *dev;
3567        uint64_t *dev_offloads;
3568        int ret = 0;
3569
3570        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3571        dev = &rte_eth_devices[port_id];
3572        dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3573
3574        if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3575                ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3576
3577        if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3578                ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3579
3580        if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3581                ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3582
3583        if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3584                ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3585
3586        return ret;
3587}
3588
3589int
3590rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3591{
3592        struct rte_eth_dev *dev;
3593
3594        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3595        dev = &rte_eth_devices[port_id];
3596
3597        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3598        return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3599}
3600
3601int
3602rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3603{
3604        struct rte_eth_dev *dev;
3605
3606        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3607        dev = &rte_eth_devices[port_id];
3608
3609        if (fc_conf == NULL) {
3610                RTE_ETHDEV_LOG(ERR,
3611                        "Cannot get ethdev port %u flow control config to NULL\n",
3612                        port_id);
3613                return -EINVAL;
3614        }
3615
3616        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3617        memset(fc_conf, 0, sizeof(*fc_conf));
3618        return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3619}
3620
3621int
3622rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3623{
3624        struct rte_eth_dev *dev;
3625
3626        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3627        dev = &rte_eth_devices[port_id];
3628
3629        if (fc_conf == NULL) {
3630                RTE_ETHDEV_LOG(ERR,
3631                        "Cannot set ethdev port %u flow control from NULL config\n",
3632                        port_id);
3633                return -EINVAL;
3634        }
3635
3636        if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3637                RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3638                return -EINVAL;
3639        }
3640
3641        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3642        return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3643}
3644
3645int
3646rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3647                                   struct rte_eth_pfc_conf *pfc_conf)
3648{
3649        struct rte_eth_dev *dev;
3650
3651        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3652        dev = &rte_eth_devices[port_id];
3653
3654        if (pfc_conf == NULL) {
3655                RTE_ETHDEV_LOG(ERR,
3656                        "Cannot set ethdev port %u priority flow control from NULL config\n",
3657                        port_id);
3658                return -EINVAL;
3659        }
3660
3661        if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3662                RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3663                return -EINVAL;
3664        }
3665
3666        /* High water, low water validation are device specific */
3667        if  (*dev->dev_ops->priority_flow_ctrl_set)
3668                return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3669                                        (dev, pfc_conf));
3670        return -ENOTSUP;
3671}
3672
3673static int
3674validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3675                struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3676{
3677        if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3678                        (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3679                if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3680                        RTE_ETHDEV_LOG(ERR,
3681                                "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3682                                pfc_queue_conf->rx_pause.tx_qid,
3683                                dev_info->nb_tx_queues);
3684                        return -EINVAL;
3685                }
3686
3687                if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3688                        RTE_ETHDEV_LOG(ERR,
3689                                "PFC TC not in range for Rx pause requested:%d max:%d\n",
3690                                pfc_queue_conf->rx_pause.tc, tc_max);
3691                        return -EINVAL;
3692                }
3693        }
3694
3695        return 0;
3696}
3697
3698static int
3699validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3700                struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3701{
3702        if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3703                        (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3704                if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3705                        RTE_ETHDEV_LOG(ERR,
3706                                "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3707                                pfc_queue_conf->tx_pause.rx_qid,
3708                                dev_info->nb_rx_queues);
3709                        return -EINVAL;
3710                }
3711
3712                if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3713                        RTE_ETHDEV_LOG(ERR,
3714                                "PFC TC not in range for Tx pause requested:%d max:%d\n",
3715                                pfc_queue_conf->tx_pause.tc, tc_max);
3716                        return -EINVAL;
3717                }
3718        }
3719
3720        return 0;
3721}
3722
3723int
3724rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3725                struct rte_eth_pfc_queue_info *pfc_queue_info)
3726{
3727        struct rte_eth_dev *dev;
3728
3729        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3730        dev = &rte_eth_devices[port_id];
3731
3732        if (pfc_queue_info == NULL) {
3733                RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3734                        port_id);
3735                return -EINVAL;
3736        }
3737
3738        if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3739                return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3740                        (dev, pfc_queue_info));
3741        return -ENOTSUP;
3742}
3743
3744int
3745rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3746                struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3747{
3748        struct rte_eth_pfc_queue_info pfc_info;
3749        struct rte_eth_dev_info dev_info;
3750        struct rte_eth_dev *dev;
3751        int ret;
3752
3753        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3754        dev = &rte_eth_devices[port_id];
3755
3756        if (pfc_queue_conf == NULL) {
3757                RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3758                        port_id);
3759                return -EINVAL;
3760        }
3761
3762        ret = rte_eth_dev_info_get(port_id, &dev_info);
3763        if (ret != 0)
3764                return ret;
3765
3766        ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3767        if (ret != 0)
3768                return ret;
3769
3770        if (pfc_info.tc_max == 0) {
3771                RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3772                        port_id);
3773                return -ENOTSUP;
3774        }
3775
3776        /* Check requested mode supported or not */
3777        if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3778                        pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3779                RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3780                        port_id);
3781                return -EINVAL;
3782        }
3783
3784        if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3785                        pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3786                RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3787                        port_id);
3788                return -EINVAL;
3789        }
3790
3791        /* Validate Rx pause parameters */
3792        if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3793                        pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3794                ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3795                                pfc_queue_conf);
3796                if (ret != 0)
3797                        return ret;
3798        }
3799
3800        /* Validate Tx pause parameters */
3801        if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3802                        pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3803                ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3804                                pfc_queue_conf);
3805                if (ret != 0)
3806                        return ret;
3807        }
3808
3809        if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3810                return eth_err(port_id,
3811                               (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3812                                dev, pfc_queue_conf));
3813        return -ENOTSUP;
3814}
3815
3816static int
3817eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3818                        uint16_t reta_size)
3819{
3820        uint16_t i, num;
3821
3822        num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3823        for (i = 0; i < num; i++) {
3824                if (reta_conf[i].mask)
3825                        return 0;
3826        }
3827
3828        return -EINVAL;
3829}
3830
3831static int
3832eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3833                         uint16_t reta_size,
3834                         uint16_t max_rxq)
3835{
3836        uint16_t i, idx, shift;
3837
3838        if (max_rxq == 0) {
3839                RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3840                return -EINVAL;
3841        }
3842
3843        for (i = 0; i < reta_size; i++) {
3844                idx = i / RTE_ETH_RETA_GROUP_SIZE;
3845                shift = i % RTE_ETH_RETA_GROUP_SIZE;
3846                if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3847                        (reta_conf[idx].reta[shift] >= max_rxq)) {
3848                        RTE_ETHDEV_LOG(ERR,
3849                                "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3850                                idx, shift,
3851                                reta_conf[idx].reta[shift], max_rxq);
3852                        return -EINVAL;
3853                }
3854        }
3855
3856        return 0;
3857}
3858
3859int
3860rte_eth_dev_rss_reta_update(uint16_t port_id,
3861                            struct rte_eth_rss_reta_entry64 *reta_conf,
3862                            uint16_t reta_size)
3863{
3864        enum rte_eth_rx_mq_mode mq_mode;
3865        struct rte_eth_dev *dev;
3866        int ret;
3867
3868        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3869        dev = &rte_eth_devices[port_id];
3870
3871        if (reta_conf == NULL) {
3872                RTE_ETHDEV_LOG(ERR,
3873                        "Cannot update ethdev port %u RSS RETA to NULL\n",
3874                        port_id);
3875                return -EINVAL;
3876        }
3877
3878        if (reta_size == 0) {
3879                RTE_ETHDEV_LOG(ERR,
3880                        "Cannot update ethdev port %u RSS RETA with zero size\n",
3881                        port_id);
3882                return -EINVAL;
3883        }
3884
3885        /* Check mask bits */
3886        ret = eth_check_reta_mask(reta_conf, reta_size);
3887        if (ret < 0)
3888                return ret;
3889
3890        /* Check entry value */
3891        ret = eth_check_reta_entry(reta_conf, reta_size,
3892                                dev->data->nb_rx_queues);
3893        if (ret < 0)
3894                return ret;
3895
3896        mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3897        if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3898                RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3899                return -ENOTSUP;
3900        }
3901
3902        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3903        return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3904                                                             reta_size));
3905}
3906
3907int
3908rte_eth_dev_rss_reta_query(uint16_t port_id,
3909                           struct rte_eth_rss_reta_entry64 *reta_conf,
3910                           uint16_t reta_size)
3911{
3912        struct rte_eth_dev *dev;
3913        int ret;
3914
3915        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3916        dev = &rte_eth_devices[port_id];
3917
3918        if (reta_conf == NULL) {
3919                RTE_ETHDEV_LOG(ERR,
3920                        "Cannot query ethdev port %u RSS RETA from NULL config\n",
3921                        port_id);
3922                return -EINVAL;
3923        }
3924
3925        /* Check mask bits */
3926        ret = eth_check_reta_mask(reta_conf, reta_size);
3927        if (ret < 0)
3928                return ret;
3929
3930        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3931        return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3932                                                            reta_size));
3933}
3934
3935int
3936rte_eth_dev_rss_hash_update(uint16_t port_id,
3937                            struct rte_eth_rss_conf *rss_conf)
3938{
3939        struct rte_eth_dev *dev;
3940        struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3941        enum rte_eth_rx_mq_mode mq_mode;
3942        int ret;
3943
3944        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3945        dev = &rte_eth_devices[port_id];
3946
3947        if (rss_conf == NULL) {
3948                RTE_ETHDEV_LOG(ERR,
3949                        "Cannot update ethdev port %u RSS hash from NULL config\n",
3950                        port_id);
3951                return -EINVAL;
3952        }
3953
3954        ret = rte_eth_dev_info_get(port_id, &dev_info);
3955        if (ret != 0)
3956                return ret;
3957
3958        rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3959        if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3960            dev_info.flow_type_rss_offloads) {
3961                RTE_ETHDEV_LOG(ERR,
3962                        "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3963                        port_id, rss_conf->rss_hf,
3964                        dev_info.flow_type_rss_offloads);
3965                return -EINVAL;
3966        }
3967
3968        mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3969        if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3970                RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3971                return -ENOTSUP;
3972        }
3973
3974        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3975        return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3976                                                                 rss_conf));
3977}
3978
3979int
3980rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3981                              struct rte_eth_rss_conf *rss_conf)
3982{
3983        struct rte_eth_dev *dev;
3984
3985        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3986        dev = &rte_eth_devices[port_id];
3987
3988        if (rss_conf == NULL) {
3989                RTE_ETHDEV_LOG(ERR,
3990                        "Cannot get ethdev port %u RSS hash config to NULL\n",
3991                        port_id);
3992                return -EINVAL;
3993        }
3994
3995        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3996        return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3997                                                                   rss_conf));
3998}
3999
4000int
4001rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4002                                struct rte_eth_udp_tunnel *udp_tunnel)
4003{
4004        struct rte_eth_dev *dev;
4005
4006        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4007        dev = &rte_eth_devices[port_id];
4008
4009        if (udp_tunnel == NULL) {
4010                RTE_ETHDEV_LOG(ERR,
4011                        "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4012                        port_id);
4013                return -EINVAL;
4014        }
4015
4016        if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4017                RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4018                return -EINVAL;
4019        }
4020
4021        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4022        return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4023                                                                udp_tunnel));
4024}
4025
4026int
4027rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4028                                   struct rte_eth_udp_tunnel *udp_tunnel)
4029{
4030        struct rte_eth_dev *dev;
4031
4032        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4033        dev = &rte_eth_devices[port_id];
4034
4035        if (udp_tunnel == NULL) {
4036                RTE_ETHDEV_LOG(ERR,
4037                        "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4038                        port_id);
4039                return -EINVAL;
4040        }
4041
4042        if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4043                RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4044                return -EINVAL;
4045        }
4046
4047        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4048        return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4049                                                                udp_tunnel));
4050}
4051
4052int
4053rte_eth_led_on(uint16_t port_id)
4054{
4055        struct rte_eth_dev *dev;
4056
4057        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4058        dev = &rte_eth_devices[port_id];
4059
4060        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4061        return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4062}
4063
4064int
4065rte_eth_led_off(uint16_t port_id)
4066{
4067        struct rte_eth_dev *dev;
4068
4069        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4070        dev = &rte_eth_devices[port_id];
4071
4072        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4073        return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4074}
4075
4076int
4077rte_eth_fec_get_capability(uint16_t port_id,
4078                           struct rte_eth_fec_capa *speed_fec_capa,
4079                           unsigned int num)
4080{
4081        struct rte_eth_dev *dev;
4082        int ret;
4083
4084        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4085        dev = &rte_eth_devices[port_id];
4086
4087        if (speed_fec_capa == NULL && num > 0) {
4088                RTE_ETHDEV_LOG(ERR,
4089                        "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4090                        port_id);
4091                return -EINVAL;
4092        }
4093
4094        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4095        ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4096
4097        return ret;
4098}
4099
4100int
4101rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4102{
4103        struct rte_eth_dev *dev;
4104
4105        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4106        dev = &rte_eth_devices[port_id];
4107
4108        if (fec_capa == NULL) {
4109                RTE_ETHDEV_LOG(ERR,
4110                        "Cannot get ethdev port %u current FEC mode to NULL\n",
4111                        port_id);
4112                return -EINVAL;
4113        }
4114
4115        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4116        return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4117}
4118
4119int
4120rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4121{
4122        struct rte_eth_dev *dev;
4123
4124        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4125        dev = &rte_eth_devices[port_id];
4126
4127        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4128        return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4129}
4130
4131/*
4132 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4133 * an empty spot.
4134 */
4135static int
4136eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4137{
4138        struct rte_eth_dev_info dev_info;
4139        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4140        unsigned i;
4141        int ret;
4142
4143        ret = rte_eth_dev_info_get(port_id, &dev_info);
4144        if (ret != 0)
4145                return -1;
4146
4147        for (i = 0; i < dev_info.max_mac_addrs; i++)
4148                if (memcmp(addr, &dev->data->mac_addrs[i],
4149                                RTE_ETHER_ADDR_LEN) == 0)
4150                        return i;
4151
4152        return -1;
4153}
4154
4155static const struct rte_ether_addr null_mac_addr;
4156
4157int
4158rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4159                        uint32_t pool)
4160{
4161        struct rte_eth_dev *dev;
4162        int index;
4163        uint64_t pool_mask;
4164        int ret;
4165
4166        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4167        dev = &rte_eth_devices[port_id];
4168
4169        if (addr == NULL) {
4170                RTE_ETHDEV_LOG(ERR,
4171                        "Cannot add ethdev port %u MAC address from NULL address\n",
4172                        port_id);
4173                return -EINVAL;
4174        }
4175
4176        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4177
4178        if (rte_is_zero_ether_addr(addr)) {
4179                RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4180                        port_id);
4181                return -EINVAL;
4182        }
4183        if (pool >= RTE_ETH_64_POOLS) {
4184                RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4185                return -EINVAL;
4186        }
4187
4188        index = eth_dev_get_mac_addr_index(port_id, addr);
4189        if (index < 0) {
4190                index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4191                if (index < 0) {
4192                        RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4193                                port_id);
4194                        return -ENOSPC;
4195                }
4196        } else {
4197                pool_mask = dev->data->mac_pool_sel[index];
4198
4199                /* Check if both MAC address and pool is already there, and do nothing */
4200                if (pool_mask & RTE_BIT64(pool))
4201                        return 0;
4202        }
4203
4204        /* Update NIC */
4205        ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4206
4207        if (ret == 0) {
4208                /* Update address in NIC data structure */
4209                rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4210
4211                /* Update pool bitmap in NIC data structure */
4212                dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4213        }
4214
4215        return eth_err(port_id, ret);
4216}
4217
4218int
4219rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4220{
4221        struct rte_eth_dev *dev;
4222        int index;
4223
4224        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4225        dev = &rte_eth_devices[port_id];
4226
4227        if (addr == NULL) {
4228                RTE_ETHDEV_LOG(ERR,
4229                        "Cannot remove ethdev port %u MAC address from NULL address\n",
4230                        port_id);
4231                return -EINVAL;
4232        }
4233
4234        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4235
4236        index = eth_dev_get_mac_addr_index(port_id, addr);
4237        if (index == 0) {
4238                RTE_ETHDEV_LOG(ERR,
4239                        "Port %u: Cannot remove default MAC address\n",
4240                        port_id);
4241                return -EADDRINUSE;
4242        } else if (index < 0)
4243                return 0;  /* Do nothing if address wasn't found */
4244
4245        /* Update NIC */
4246        (*dev->dev_ops->mac_addr_remove)(dev, index);
4247
4248        /* Update address in NIC data structure */
4249        rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4250
4251        /* reset pool bitmap */
4252        dev->data->mac_pool_sel[index] = 0;
4253
4254        return 0;
4255}
4256
4257int
4258rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4259{
4260        struct rte_eth_dev *dev;
4261        int ret;
4262
4263        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4264        dev = &rte_eth_devices[port_id];
4265
4266        if (addr == NULL) {
4267                RTE_ETHDEV_LOG(ERR,
4268                        "Cannot set ethdev port %u default MAC address from NULL address\n",
4269                        port_id);
4270                return -EINVAL;
4271        }
4272
4273        if (!rte_is_valid_assigned_ether_addr(addr))
4274                return -EINVAL;
4275
4276        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4277
4278        ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4279        if (ret < 0)
4280                return ret;
4281
4282        /* Update default address in NIC data structure */
4283        rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4284
4285        return 0;
4286}
4287
4288
4289/*
4290 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4291 * an empty spot.
4292 */
4293static int
4294eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4295                const struct rte_ether_addr *addr)
4296{
4297        struct rte_eth_dev_info dev_info;
4298        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4299        unsigned i;
4300        int ret;
4301
4302        ret = rte_eth_dev_info_get(port_id, &dev_info);
4303        if (ret != 0)
4304                return -1;
4305
4306        if (!dev->data->hash_mac_addrs)
4307                return -1;
4308
4309        for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4310                if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4311                        RTE_ETHER_ADDR_LEN) == 0)
4312                        return i;
4313
4314        return -1;
4315}
4316
4317int
4318rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4319                                uint8_t on)
4320{
4321        int index;
4322        int ret;
4323        struct rte_eth_dev *dev;
4324
4325        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4326        dev = &rte_eth_devices[port_id];
4327
4328        if (addr == NULL) {
4329                RTE_ETHDEV_LOG(ERR,
4330                        "Cannot set ethdev port %u unicast hash table from NULL address\n",
4331                        port_id);
4332                return -EINVAL;
4333        }
4334
4335        if (rte_is_zero_ether_addr(addr)) {
4336                RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4337                        port_id);
4338                return -EINVAL;
4339        }
4340
4341        index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4342        /* Check if it's already there, and do nothing */
4343        if ((index >= 0) && on)
4344                return 0;
4345
4346        if (index < 0) {
4347                if (!on) {
4348                        RTE_ETHDEV_LOG(ERR,
4349                                "Port %u: the MAC address was not set in UTA\n",
4350                                port_id);
4351                        return -EINVAL;
4352                }
4353
4354                index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4355                if (index < 0) {
4356                        RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4357                                port_id);
4358                        return -ENOSPC;
4359                }
4360        }
4361
4362        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4363        ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4364        if (ret == 0) {
4365                /* Update address in NIC data structure */
4366                if (on)
4367                        rte_ether_addr_copy(addr,
4368                                        &dev->data->hash_mac_addrs[index]);
4369                else
4370                        rte_ether_addr_copy(&null_mac_addr,
4371                                        &dev->data->hash_mac_addrs[index]);
4372        }
4373
4374        return eth_err(port_id, ret);
4375}
4376
4377int
4378rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4379{
4380        struct rte_eth_dev *dev;
4381
4382        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4383        dev = &rte_eth_devices[port_id];
4384
4385        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4386        return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4387                                                                       on));
4388}
4389
4390int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4391                                        uint16_t tx_rate)
4392{
4393        struct rte_eth_dev *dev;
4394        struct rte_eth_dev_info dev_info;
4395        struct rte_eth_link link;
4396        int ret;
4397
4398        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4399        dev = &rte_eth_devices[port_id];
4400
4401        ret = rte_eth_dev_info_get(port_id, &dev_info);
4402        if (ret != 0)
4403                return ret;
4404
4405        link = dev->data->dev_link;
4406
4407        if (queue_idx > dev_info.max_tx_queues) {
4408                RTE_ETHDEV_LOG(ERR,
4409                        "Set queue rate limit:port %u: invalid queue ID=%u\n",
4410                        port_id, queue_idx);
4411                return -EINVAL;
4412        }
4413
4414        if (tx_rate > link.link_speed) {
4415                RTE_ETHDEV_LOG(ERR,
4416                        "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4417                        tx_rate, link.link_speed);
4418                return -EINVAL;
4419        }
4420
4421        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4422        return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4423                                                        queue_idx, tx_rate));
4424}
4425
4426int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
4427                               uint8_t avail_thresh)
4428{
4429        struct rte_eth_dev *dev;
4430
4431        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4432        dev = &rte_eth_devices[port_id];
4433
4434        if (queue_id > dev->data->nb_rx_queues) {
4435                RTE_ETHDEV_LOG(ERR,
4436                        "Set queue avail thresh: port %u: invalid queue ID=%u.\n",
4437                        port_id, queue_id);
4438                return -EINVAL;
4439        }
4440
4441        if (avail_thresh > 99) {
4442                RTE_ETHDEV_LOG(ERR,
4443                        "Set queue avail thresh: port %u: threshold should be <= 99.\n",
4444                        port_id);
4445                return -EINVAL;
4446        }
4447        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_set, -ENOTSUP);
4448        return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
4449                                                             queue_id, avail_thresh));
4450}
4451
4452int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
4453                                 uint8_t *avail_thresh)
4454{
4455        struct rte_eth_dev *dev;
4456
4457        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4458        dev = &rte_eth_devices[port_id];
4459
4460        if (queue_id == NULL)
4461                return -EINVAL;
4462        if (*queue_id >= dev->data->nb_rx_queues)
4463                *queue_id = 0;
4464
4465        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_query, -ENOTSUP);
4466        return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
4467                                                             queue_id, avail_thresh));
4468}
4469
4470RTE_INIT(eth_dev_init_fp_ops)
4471{
4472        uint32_t i;
4473
4474        for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4475                eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4476}
4477
4478RTE_INIT(eth_dev_init_cb_lists)
4479{
4480        uint16_t i;
4481
4482        for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4483                TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4484}
4485
4486int
4487rte_eth_dev_callback_register(uint16_t port_id,
4488                        enum rte_eth_event_type event,
4489                        rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4490{
4491        struct rte_eth_dev *dev;
4492        struct rte_eth_dev_callback *user_cb;
4493        uint16_t next_port;
4494        uint16_t last_port;
4495
4496        if (cb_fn == NULL) {
4497                RTE_ETHDEV_LOG(ERR,
4498                        "Cannot register ethdev port %u callback from NULL\n",
4499                        port_id);
4500                return -EINVAL;
4501        }
4502
4503        if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4504                RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4505                return -EINVAL;
4506        }
4507
4508        if (port_id == RTE_ETH_ALL) {
4509                next_port = 0;
4510                last_port = RTE_MAX_ETHPORTS - 1;
4511        } else {
4512                next_port = last_port = port_id;
4513        }
4514
4515        rte_spinlock_lock(&eth_dev_cb_lock);
4516
4517        do {
4518                dev = &rte_eth_devices[next_port];
4519
4520                TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4521                        if (user_cb->cb_fn == cb_fn &&
4522                                user_cb->cb_arg == cb_arg &&
4523                                user_cb->event == event) {
4524                                break;
4525                        }
4526                }
4527
4528                /* create a new callback. */
4529                if (user_cb == NULL) {
4530                        user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4531                                sizeof(struct rte_eth_dev_callback), 0);
4532                        if (user_cb != NULL) {
4533                                user_cb->cb_fn = cb_fn;
4534                                user_cb->cb_arg = cb_arg;
4535                                user_cb->event = event;
4536                                TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4537                                                  user_cb, next);
4538                        } else {
4539                                rte_spinlock_unlock(&eth_dev_cb_lock);
4540                                rte_eth_dev_callback_unregister(port_id, event,
4541                                                                cb_fn, cb_arg);
4542                                return -ENOMEM;
4543                        }
4544
4545                }
4546        } while (++next_port <= last_port);
4547
4548        rte_spinlock_unlock(&eth_dev_cb_lock);
4549        return 0;
4550}
4551
4552int
4553rte_eth_dev_callback_unregister(uint16_t port_id,
4554                        enum rte_eth_event_type event,
4555                        rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4556{
4557        int ret;
4558        struct rte_eth_dev *dev;
4559        struct rte_eth_dev_callback *cb, *next;
4560        uint16_t next_port;
4561        uint16_t last_port;
4562
4563        if (cb_fn == NULL) {
4564                RTE_ETHDEV_LOG(ERR,
4565                        "Cannot unregister ethdev port %u callback from NULL\n",
4566                        port_id);
4567                return -EINVAL;
4568        }
4569
4570        if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4571                RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4572                return -EINVAL;
4573        }
4574
4575        if (port_id == RTE_ETH_ALL) {
4576                next_port = 0;
4577                last_port = RTE_MAX_ETHPORTS - 1;
4578        } else {
4579                next_port = last_port = port_id;
4580        }
4581
4582        rte_spinlock_lock(&eth_dev_cb_lock);
4583
4584        do {
4585                dev = &rte_eth_devices[next_port];
4586                ret = 0;
4587                for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4588                     cb = next) {
4589
4590                        next = TAILQ_NEXT(cb, next);
4591
4592                        if (cb->cb_fn != cb_fn || cb->event != event ||
4593                            (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4594                                continue;
4595
4596                        /*
4597                         * if this callback is not executing right now,
4598                         * then remove it.
4599                         */
4600                        if (cb->active == 0) {
4601                                TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4602                                rte_free(cb);
4603                        } else {
4604                                ret = -EAGAIN;
4605                        }
4606                }
4607        } while (++next_port <= last_port);
4608
4609        rte_spinlock_unlock(&eth_dev_cb_lock);
4610        return ret;
4611}
4612
4613int
4614rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4615{
4616        uint32_t vec;
4617        struct rte_eth_dev *dev;
4618        struct rte_intr_handle *intr_handle;
4619        uint16_t qid;
4620        int rc;
4621
4622        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4623        dev = &rte_eth_devices[port_id];
4624
4625        if (!dev->intr_handle) {
4626                RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4627                return -ENOTSUP;
4628        }
4629
4630        intr_handle = dev->intr_handle;
4631        if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4632                RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4633                return -EPERM;
4634        }
4635
4636        for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4637                vec = rte_intr_vec_list_index_get(intr_handle, qid);
4638                rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4639                if (rc && rc != -EEXIST) {
4640                        RTE_ETHDEV_LOG(ERR,
4641                                "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4642                                port_id, qid, op, epfd, vec);
4643                }
4644        }
4645
4646        return 0;
4647}
4648
4649int
4650rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4651{
4652        struct rte_intr_handle *intr_handle;
4653        struct rte_eth_dev *dev;
4654        unsigned int efd_idx;
4655        uint32_t vec;
4656        int fd;
4657
4658        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4659        dev = &rte_eth_devices[port_id];
4660
4661        if (queue_id >= dev->data->nb_rx_queues) {
4662                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4663                return -1;
4664        }
4665
4666        if (!dev->intr_handle) {
4667                RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4668                return -1;
4669        }
4670
4671        intr_handle = dev->intr_handle;
4672        if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4673                RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4674                return -1;
4675        }
4676
4677        vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4678        efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4679                (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4680        fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4681
4682        return fd;
4683}
4684
4685int
4686rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4687                          int epfd, int op, void *data)
4688{
4689        uint32_t vec;
4690        struct rte_eth_dev *dev;
4691        struct rte_intr_handle *intr_handle;
4692        int rc;
4693
4694        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4695        dev = &rte_eth_devices[port_id];
4696
4697        if (queue_id >= dev->data->nb_rx_queues) {
4698                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4699                return -EINVAL;
4700        }
4701
4702        if (!dev->intr_handle) {
4703                RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4704                return -ENOTSUP;
4705        }
4706
4707        intr_handle = dev->intr_handle;
4708        if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4709                RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4710                return -EPERM;
4711        }
4712
4713        vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4714        rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4715        if (rc && rc != -EEXIST) {
4716                RTE_ETHDEV_LOG(ERR,
4717                        "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4718                        port_id, queue_id, op, epfd, vec);
4719                return rc;
4720        }
4721
4722        return 0;
4723}
4724
4725int
4726rte_eth_dev_rx_intr_enable(uint16_t port_id,
4727                           uint16_t queue_id)
4728{
4729        struct rte_eth_dev *dev;
4730        int ret;
4731
4732        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4733        dev = &rte_eth_devices[port_id];
4734
4735        ret = eth_dev_validate_rx_queue(dev, queue_id);
4736        if (ret != 0)
4737                return ret;
4738
4739        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4740        return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4741}
4742
4743int
4744rte_eth_dev_rx_intr_disable(uint16_t port_id,
4745                            uint16_t queue_id)
4746{
4747        struct rte_eth_dev *dev;
4748        int ret;
4749
4750        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4751        dev = &rte_eth_devices[port_id];
4752
4753        ret = eth_dev_validate_rx_queue(dev, queue_id);
4754        if (ret != 0)
4755                return ret;
4756
4757        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4758        return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4759}
4760
4761
4762const struct rte_eth_rxtx_callback *
4763rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4764                rte_rx_callback_fn fn, void *user_param)
4765{
4766#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4767        rte_errno = ENOTSUP;
4768        return NULL;
4769#endif
4770        struct rte_eth_dev *dev;
4771
4772        /* check input parameters */
4773        if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4774                    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4775                rte_errno = EINVAL;
4776                return NULL;
4777        }
4778        dev = &rte_eth_devices[port_id];
4779        if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4780                rte_errno = EINVAL;
4781                return NULL;
4782        }
4783        struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4784
4785        if (cb == NULL) {
4786                rte_errno = ENOMEM;
4787                return NULL;
4788        }
4789
4790        cb->fn.rx = fn;
4791        cb->param = user_param;
4792
4793        rte_spinlock_lock(&eth_dev_rx_cb_lock);
4794        /* Add the callbacks in fifo order. */
4795        struct rte_eth_rxtx_callback *tail =
4796                rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4797
4798        if (!tail) {
4799                /* Stores to cb->fn and cb->param should complete before
4800                 * cb is visible to data plane.
4801                 */
4802                __atomic_store_n(
4803                        &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4804                        cb, __ATOMIC_RELEASE);
4805
4806        } else {
4807                while (tail->next)
4808                        tail = tail->next;
4809                /* Stores to cb->fn and cb->param should complete before
4810                 * cb is visible to data plane.
4811                 */
4812                __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4813        }
4814        rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4815
4816        return cb;
4817}
4818
4819const struct rte_eth_rxtx_callback *
4820rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4821                rte_rx_callback_fn fn, void *user_param)
4822{
4823#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4824        rte_errno = ENOTSUP;
4825        return NULL;
4826#endif
4827        /* check input parameters */
4828        if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4829                queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4830                rte_errno = EINVAL;
4831                return NULL;
4832        }
4833
4834        struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4835
4836        if (cb == NULL) {
4837                rte_errno = ENOMEM;
4838                return NULL;
4839        }
4840
4841        cb->fn.rx = fn;
4842        cb->param = user_param;
4843
4844        rte_spinlock_lock(&eth_dev_rx_cb_lock);
4845        /* Add the callbacks at first position */
4846        cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4847        /* Stores to cb->fn, cb->param and cb->next should complete before
4848         * cb is visible to data plane threads.
4849         */
4850        __atomic_store_n(
4851                &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4852                cb, __ATOMIC_RELEASE);
4853        rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4854
4855        return cb;
4856}
4857
4858const struct rte_eth_rxtx_callback *
4859rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4860                rte_tx_callback_fn fn, void *user_param)
4861{
4862#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4863        rte_errno = ENOTSUP;
4864        return NULL;
4865#endif
4866        struct rte_eth_dev *dev;
4867
4868        /* check input parameters */
4869        if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4870                    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4871                rte_errno = EINVAL;
4872                return NULL;
4873        }
4874
4875        dev = &rte_eth_devices[port_id];
4876        if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4877                rte_errno = EINVAL;
4878                return NULL;
4879        }
4880
4881        struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4882
4883        if (cb == NULL) {
4884                rte_errno = ENOMEM;
4885                return NULL;
4886        }
4887
4888        cb->fn.tx = fn;
4889        cb->param = user_param;
4890
4891        rte_spinlock_lock(&eth_dev_tx_cb_lock);
4892        /* Add the callbacks in fifo order. */
4893        struct rte_eth_rxtx_callback *tail =
4894                rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4895
4896        if (!tail) {
4897                /* Stores to cb->fn and cb->param should complete before
4898                 * cb is visible to data plane.
4899                 */
4900                __atomic_store_n(
4901                        &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4902                        cb, __ATOMIC_RELEASE);
4903
4904        } else {
4905                while (tail->next)
4906                        tail = tail->next;
4907                /* Stores to cb->fn and cb->param should complete before
4908                 * cb is visible to data plane.
4909                 */
4910                __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4911        }
4912        rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4913
4914        return cb;
4915}
4916
4917int
4918rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4919                const struct rte_eth_rxtx_callback *user_cb)
4920{
4921#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4922        return -ENOTSUP;
4923#endif
4924        /* Check input parameters. */
4925        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4926        if (user_cb == NULL ||
4927                        queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4928                return -EINVAL;
4929
4930        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4931        struct rte_eth_rxtx_callback *cb;
4932        struct rte_eth_rxtx_callback **prev_cb;
4933        int ret = -EINVAL;
4934
4935        rte_spinlock_lock(&eth_dev_rx_cb_lock);
4936        prev_cb = &dev->post_rx_burst_cbs[queue_id];
4937        for (; *prev_cb != NULL; prev_cb = &cb->next) {
4938                cb = *prev_cb;
4939                if (cb == user_cb) {
4940                        /* Remove the user cb from the callback list. */
4941                        __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4942                        ret = 0;
4943                        break;
4944                }
4945        }
4946        rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4947
4948        return ret;
4949}
4950
4951int
4952rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4953                const struct rte_eth_rxtx_callback *user_cb)
4954{
4955#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4956        return -ENOTSUP;
4957#endif
4958        /* Check input parameters. */
4959        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4960        if (user_cb == NULL ||
4961                        queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4962                return -EINVAL;
4963
4964        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4965        int ret = -EINVAL;
4966        struct rte_eth_rxtx_callback *cb;
4967        struct rte_eth_rxtx_callback **prev_cb;
4968
4969        rte_spinlock_lock(&eth_dev_tx_cb_lock);
4970        prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4971        for (; *prev_cb != NULL; prev_cb = &cb->next) {
4972                cb = *prev_cb;
4973                if (cb == user_cb) {
4974                        /* Remove the user cb from the callback list. */
4975                        __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4976                        ret = 0;
4977                        break;
4978                }
4979        }
4980        rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4981
4982        return ret;
4983}
4984
4985int
4986rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4987        struct rte_eth_rxq_info *qinfo)
4988{
4989        struct rte_eth_dev *dev;
4990
4991        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4992        dev = &rte_eth_devices[port_id];
4993
4994        if (queue_id >= dev->data->nb_rx_queues) {
4995                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4996                return -EINVAL;
4997        }
4998
4999        if (qinfo == NULL) {
5000                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5001                        port_id, queue_id);
5002                return -EINVAL;
5003        }
5004
5005        if (dev->data->rx_queues == NULL ||
5006                        dev->data->rx_queues[queue_id] == NULL) {
5007                RTE_ETHDEV_LOG(ERR,
5008                               "Rx queue %"PRIu16" of device with port_id=%"
5009                               PRIu16" has not been setup\n",
5010                               queue_id, port_id);
5011                return -EINVAL;
5012        }
5013
5014        if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5015                RTE_ETHDEV_LOG(INFO,
5016                        "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5017                        queue_id, port_id);
5018                return -EINVAL;
5019        }
5020
5021        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5022
5023        memset(qinfo, 0, sizeof(*qinfo));
5024        dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5025        qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5026
5027        return 0;
5028}
5029
5030int
5031rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5032        struct rte_eth_txq_info *qinfo)
5033{
5034        struct rte_eth_dev *dev;
5035
5036        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5037        dev = &rte_eth_devices[port_id];
5038
5039        if (queue_id >= dev->data->nb_tx_queues) {
5040                RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5041                return -EINVAL;
5042        }
5043
5044        if (qinfo == NULL) {
5045                RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5046                        port_id, queue_id);
5047                return -EINVAL;
5048        }
5049
5050        if (dev->data->tx_queues == NULL ||
5051                        dev->data->tx_queues[queue_id] == NULL) {
5052                RTE_ETHDEV_LOG(ERR,
5053                               "Tx queue %"PRIu16" of device with port_id=%"
5054                               PRIu16" has not been setup\n",
5055                               queue_id, port_id);
5056                return -EINVAL;
5057        }
5058
5059        if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5060                RTE_ETHDEV_LOG(INFO,
5061                        "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5062                        queue_id, port_id);
5063                return -EINVAL;
5064        }
5065
5066        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5067
5068        memset(qinfo, 0, sizeof(*qinfo));
5069        dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5070        qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5071
5072        return 0;
5073}
5074
5075int
5076rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5077                          struct rte_eth_burst_mode *mode)
5078{
5079        struct rte_eth_dev *dev;
5080
5081        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5082        dev = &rte_eth_devices[port_id];
5083
5084        if (queue_id >= dev->data->nb_rx_queues) {
5085                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5086                return -EINVAL;
5087        }
5088
5089        if (mode == NULL) {
5090                RTE_ETHDEV_LOG(ERR,
5091                        "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5092                        port_id, queue_id);
5093                return -EINVAL;
5094        }
5095
5096        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5097        memset(mode, 0, sizeof(*mode));
5098        return eth_err(port_id,
5099                       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5100}
5101
5102int
5103rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5104                          struct rte_eth_burst_mode *mode)
5105{
5106        struct rte_eth_dev *dev;
5107
5108        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5109        dev = &rte_eth_devices[port_id];
5110
5111        if (queue_id >= dev->data->nb_tx_queues) {
5112                RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5113                return -EINVAL;
5114        }
5115
5116        if (mode == NULL) {
5117                RTE_ETHDEV_LOG(ERR,
5118                        "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5119                        port_id, queue_id);
5120                return -EINVAL;
5121        }
5122
5123        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5124        memset(mode, 0, sizeof(*mode));
5125        return eth_err(port_id,
5126                       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5127}
5128
5129int
5130rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5131                struct rte_power_monitor_cond *pmc)
5132{
5133        struct rte_eth_dev *dev;
5134
5135        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5136        dev = &rte_eth_devices[port_id];
5137
5138        if (queue_id >= dev->data->nb_rx_queues) {
5139                RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5140                return -EINVAL;
5141        }
5142
5143        if (pmc == NULL) {
5144                RTE_ETHDEV_LOG(ERR,
5145                        "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5146                        port_id, queue_id);
5147                return -EINVAL;
5148        }
5149
5150        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5151        return eth_err(port_id,
5152                dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5153}
5154
5155int
5156rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5157                             struct rte_ether_addr *mc_addr_set,
5158                             uint32_t nb_mc_addr)
5159{
5160        struct rte_eth_dev *dev;
5161
5162        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5163        dev = &rte_eth_devices[port_id];
5164
5165        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5166        return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5167                                                mc_addr_set, nb_mc_addr));
5168}
5169
5170int
5171rte_eth_timesync_enable(uint16_t port_id)
5172{
5173        struct rte_eth_dev *dev;
5174
5175        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5176        dev = &rte_eth_devices[port_id];
5177
5178        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5179        return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5180}
5181
5182int
5183rte_eth_timesync_disable(uint16_t port_id)
5184{
5185        struct rte_eth_dev *dev;
5186
5187        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5188        dev = &rte_eth_devices[port_id];
5189
5190        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5191        return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5192}
5193
5194int
5195rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5196                                   uint32_t flags)
5197{
5198        struct rte_eth_dev *dev;
5199
5200        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5201        dev = &rte_eth_devices[port_id];
5202
5203        if (timestamp == NULL) {
5204                RTE_ETHDEV_LOG(ERR,
5205                        "Cannot read ethdev port %u Rx timestamp to NULL\n",
5206                        port_id);
5207                return -EINVAL;
5208        }
5209
5210        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5211        return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5212                                (dev, timestamp, flags));
5213}
5214
5215int
5216rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5217                                   struct timespec *timestamp)
5218{
5219        struct rte_eth_dev *dev;
5220
5221        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5222        dev = &rte_eth_devices[port_id];
5223
5224        if (timestamp == NULL) {
5225                RTE_ETHDEV_LOG(ERR,
5226                        "Cannot read ethdev port %u Tx timestamp to NULL\n",
5227                        port_id);
5228                return -EINVAL;
5229        }
5230
5231        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5232        return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5233                                (dev, timestamp));
5234}
5235
5236int
5237rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5238{
5239        struct rte_eth_dev *dev;
5240
5241        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5242        dev = &rte_eth_devices[port_id];
5243
5244        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5245        return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5246}
5247
5248int
5249rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5250{
5251        struct rte_eth_dev *dev;
5252
5253        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5254        dev = &rte_eth_devices[port_id];
5255
5256        if (timestamp == NULL) {
5257                RTE_ETHDEV_LOG(ERR,
5258                        "Cannot read ethdev port %u timesync time to NULL\n",
5259                        port_id);
5260                return -EINVAL;
5261        }
5262
5263        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5264        return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5265                                                                timestamp));
5266}
5267
5268int
5269rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5270{
5271        struct rte_eth_dev *dev;
5272
5273        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5274        dev = &rte_eth_devices[port_id];
5275
5276        if (timestamp == NULL) {
5277                RTE_ETHDEV_LOG(ERR,
5278                        "Cannot write ethdev port %u timesync from NULL time\n",
5279                        port_id);
5280                return -EINVAL;
5281        }
5282
5283        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5284        return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5285                                                                timestamp));
5286}
5287
5288int
5289rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5290{
5291        struct rte_eth_dev *dev;
5292
5293        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5294        dev = &rte_eth_devices[port_id];
5295
5296        if (clock == NULL) {
5297                RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5298                        port_id);
5299                return -EINVAL;
5300        }
5301
5302        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5303        return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5304}
5305
5306int
5307rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5308{
5309        struct rte_eth_dev *dev;
5310
5311        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5312        dev = &rte_eth_devices[port_id];
5313
5314        if (info == NULL) {
5315                RTE_ETHDEV_LOG(ERR,
5316                        "Cannot get ethdev port %u register info to NULL\n",
5317                        port_id);
5318                return -EINVAL;
5319        }
5320
5321        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5322        return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5323}
5324
5325int
5326rte_eth_dev_get_eeprom_length(uint16_t port_id)
5327{
5328        struct rte_eth_dev *dev;
5329
5330        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5331        dev = &rte_eth_devices[port_id];
5332
5333        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5334        return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5335}
5336
5337int
5338rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5339{
5340        struct rte_eth_dev *dev;
5341
5342        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5343        dev = &rte_eth_devices[port_id];
5344
5345        if (info == NULL) {
5346                RTE_ETHDEV_LOG(ERR,
5347                        "Cannot get ethdev port %u EEPROM info to NULL\n",
5348                        port_id);
5349                return -EINVAL;
5350        }
5351
5352        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5353        return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5354}
5355
5356int
5357rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5358{
5359        struct rte_eth_dev *dev;
5360
5361        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5362        dev = &rte_eth_devices[port_id];
5363
5364        if (info == NULL) {
5365                RTE_ETHDEV_LOG(ERR,
5366                        "Cannot set ethdev port %u EEPROM from NULL info\n",
5367                        port_id);
5368                return -EINVAL;
5369        }
5370
5371        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5372        return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5373}
5374
5375int
5376rte_eth_dev_get_module_info(uint16_t port_id,
5377                            struct rte_eth_dev_module_info *modinfo)
5378{
5379        struct rte_eth_dev *dev;
5380
5381        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5382        dev = &rte_eth_devices[port_id];
5383
5384        if (modinfo == NULL) {
5385                RTE_ETHDEV_LOG(ERR,
5386                        "Cannot get ethdev port %u EEPROM module info to NULL\n",
5387                        port_id);
5388                return -EINVAL;
5389        }
5390
5391        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5392        return (*dev->dev_ops->get_module_info)(dev, modinfo);
5393}
5394
5395int
5396rte_eth_dev_get_module_eeprom(uint16_t port_id,
5397                              struct rte_dev_eeprom_info *info)
5398{
5399        struct rte_eth_dev *dev;
5400
5401        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5402        dev = &rte_eth_devices[port_id];
5403
5404        if (info == NULL) {
5405                RTE_ETHDEV_LOG(ERR,
5406                        "Cannot get ethdev port %u module EEPROM info to NULL\n",
5407                        port_id);
5408                return -EINVAL;
5409        }
5410
5411        if (info->data == NULL) {
5412                RTE_ETHDEV_LOG(ERR,
5413                        "Cannot get ethdev port %u module EEPROM data to NULL\n",
5414                        port_id);
5415                return -EINVAL;
5416        }
5417
5418        if (info->length == 0) {
5419                RTE_ETHDEV_LOG(ERR,
5420                        "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5421                        port_id);
5422                return -EINVAL;
5423        }
5424
5425        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5426        return (*dev->dev_ops->get_module_eeprom)(dev, info);
5427}
5428
5429int
5430rte_eth_dev_get_dcb_info(uint16_t port_id,
5431                             struct rte_eth_dcb_info *dcb_info)
5432{
5433        struct rte_eth_dev *dev;
5434
5435        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5436        dev = &rte_eth_devices[port_id];
5437
5438        if (dcb_info == NULL) {
5439                RTE_ETHDEV_LOG(ERR,
5440                        "Cannot get ethdev port %u DCB info to NULL\n",
5441                        port_id);
5442                return -EINVAL;
5443        }
5444
5445        memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5446
5447        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5448        return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5449}
5450
5451static void
5452eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5453                const struct rte_eth_desc_lim *desc_lim)
5454{
5455        if (desc_lim->nb_align != 0)
5456                *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5457
5458        if (desc_lim->nb_max != 0)
5459                *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5460
5461        *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5462}
5463
5464int
5465rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5466                                 uint16_t *nb_rx_desc,
5467                                 uint16_t *nb_tx_desc)
5468{
5469        struct rte_eth_dev_info dev_info;
5470        int ret;
5471
5472        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5473
5474        ret = rte_eth_dev_info_get(port_id, &dev_info);
5475        if (ret != 0)
5476                return ret;
5477
5478        if (nb_rx_desc != NULL)
5479                eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5480
5481        if (nb_tx_desc != NULL)
5482                eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5483
5484        return 0;
5485}
5486
5487int
5488rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5489                                   struct rte_eth_hairpin_cap *cap)
5490{
5491        struct rte_eth_dev *dev;
5492
5493        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5494        dev = &rte_eth_devices[port_id];
5495
5496        if (cap == NULL) {
5497                RTE_ETHDEV_LOG(ERR,
5498                        "Cannot get ethdev port %u hairpin capability to NULL\n",
5499                        port_id);
5500                return -EINVAL;
5501        }
5502
5503        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5504        memset(cap, 0, sizeof(*cap));
5505        return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5506}
5507
5508int
5509rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5510{
5511        struct rte_eth_dev *dev;
5512
5513        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5514        dev = &rte_eth_devices[port_id];
5515
5516        if (pool == NULL) {
5517                RTE_ETHDEV_LOG(ERR,
5518                        "Cannot test ethdev port %u mempool operation from NULL pool\n",
5519                        port_id);
5520                return -EINVAL;
5521        }
5522
5523        if (*dev->dev_ops->pool_ops_supported == NULL)
5524                return 1; /* all pools are supported */
5525
5526        return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5527}
5528
5529static int
5530eth_dev_handle_port_list(const char *cmd __rte_unused,
5531                const char *params __rte_unused,
5532                struct rte_tel_data *d)
5533{
5534        int port_id;
5535
5536        rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5537        RTE_ETH_FOREACH_DEV(port_id)
5538                rte_tel_data_add_array_int(d, port_id);
5539        return 0;
5540}
5541
5542static void
5543eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5544                const char *stat_name)
5545{
5546        int q;
5547        struct rte_tel_data *q_data = rte_tel_data_alloc();
5548        if (q_data == NULL)
5549                return;
5550        rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5551        for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5552                rte_tel_data_add_array_u64(q_data, q_stats[q]);
5553        rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5554}
5555
5556#define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5557
5558static int
5559eth_dev_handle_port_stats(const char *cmd __rte_unused,
5560                const char *params,
5561                struct rte_tel_data *d)
5562{
5563        struct rte_eth_stats stats;
5564        int port_id, ret;
5565
5566        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5567                return -1;
5568
5569        port_id = atoi(params);
5570        if (!rte_eth_dev_is_valid_port(port_id))
5571                return -1;
5572
5573        ret = rte_eth_stats_get(port_id, &stats);
5574        if (ret < 0)
5575                return -1;
5576
5577        rte_tel_data_start_dict(d);
5578        ADD_DICT_STAT(stats, ipackets);
5579        ADD_DICT_STAT(stats, opackets);
5580        ADD_DICT_STAT(stats, ibytes);
5581        ADD_DICT_STAT(stats, obytes);
5582        ADD_DICT_STAT(stats, imissed);
5583        ADD_DICT_STAT(stats, ierrors);
5584        ADD_DICT_STAT(stats, oerrors);
5585        ADD_DICT_STAT(stats, rx_nombuf);
5586        eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5587        eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5588        eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5589        eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5590        eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5591
5592        return 0;
5593}
5594
5595static int
5596eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5597                const char *params,
5598                struct rte_tel_data *d)
5599{
5600        struct rte_eth_xstat *eth_xstats;
5601        struct rte_eth_xstat_name *xstat_names;
5602        int port_id, num_xstats;
5603        int i, ret;
5604        char *end_param;
5605
5606        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5607                return -1;
5608
5609        port_id = strtoul(params, &end_param, 0);
5610        if (*end_param != '\0')
5611                RTE_ETHDEV_LOG(NOTICE,
5612                        "Extra parameters passed to ethdev telemetry command, ignoring");
5613        if (!rte_eth_dev_is_valid_port(port_id))
5614                return -1;
5615
5616        num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5617        if (num_xstats < 0)
5618                return -1;
5619
5620        /* use one malloc for both names and stats */
5621        eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5622                        sizeof(struct rte_eth_xstat_name)) * num_xstats);
5623        if (eth_xstats == NULL)
5624                return -1;
5625        xstat_names = (void *)&eth_xstats[num_xstats];
5626
5627        ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5628        if (ret < 0 || ret > num_xstats) {
5629                free(eth_xstats);
5630                return -1;
5631        }
5632
5633        ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5634        if (ret < 0 || ret > num_xstats) {
5635                free(eth_xstats);
5636                return -1;
5637        }
5638
5639        rte_tel_data_start_dict(d);
5640        for (i = 0; i < num_xstats; i++)
5641                rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5642                                eth_xstats[i].value);
5643        free(eth_xstats);
5644        return 0;
5645}
5646
5647static int
5648eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5649                const char *params,
5650                struct rte_tel_data *d)
5651{
5652        static const char *status_str = "status";
5653        int ret, port_id;
5654        struct rte_eth_link link;
5655        char *end_param;
5656
5657        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5658                return -1;
5659
5660        port_id = strtoul(params, &end_param, 0);
5661        if (*end_param != '\0')
5662                RTE_ETHDEV_LOG(NOTICE,
5663                        "Extra parameters passed to ethdev telemetry command, ignoring");
5664        if (!rte_eth_dev_is_valid_port(port_id))
5665                return -1;
5666
5667        ret = rte_eth_link_get_nowait(port_id, &link);
5668        if (ret < 0)
5669                return -1;
5670
5671        rte_tel_data_start_dict(d);
5672        if (!link.link_status) {
5673                rte_tel_data_add_dict_string(d, status_str, "DOWN");
5674                return 0;
5675        }
5676        rte_tel_data_add_dict_string(d, status_str, "UP");
5677        rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5678        rte_tel_data_add_dict_string(d, "duplex",
5679                        (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5680                                "full-duplex" : "half-duplex");
5681        return 0;
5682}
5683
5684static int
5685eth_dev_handle_port_info(const char *cmd __rte_unused,
5686                const char *params,
5687                struct rte_tel_data *d)
5688{
5689        struct rte_tel_data *rxq_state, *txq_state;
5690        char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
5691        struct rte_eth_dev *eth_dev;
5692        char *end_param;
5693        int port_id, i;
5694
5695        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5696                return -1;
5697
5698        port_id = strtoul(params, &end_param, 0);
5699        if (*end_param != '\0')
5700                RTE_ETHDEV_LOG(NOTICE,
5701                        "Extra parameters passed to ethdev telemetry command, ignoring");
5702
5703        if (!rte_eth_dev_is_valid_port(port_id))
5704                return -EINVAL;
5705
5706        eth_dev = &rte_eth_devices[port_id];
5707
5708        rxq_state = rte_tel_data_alloc();
5709        if (!rxq_state)
5710                return -ENOMEM;
5711
5712        txq_state = rte_tel_data_alloc();
5713        if (!txq_state) {
5714                rte_tel_data_free(rxq_state);
5715                return -ENOMEM;
5716        }
5717
5718        rte_tel_data_start_dict(d);
5719        rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5720        rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5721        rte_tel_data_add_dict_int(d, "nb_rx_queues",
5722                        eth_dev->data->nb_rx_queues);
5723        rte_tel_data_add_dict_int(d, "nb_tx_queues",
5724                        eth_dev->data->nb_tx_queues);
5725        rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5726        rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5727        rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5728                        eth_dev->data->min_rx_buf_size);
5729        rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5730                        eth_dev->data->rx_mbuf_alloc_failed);
5731        rte_ether_format_addr(mac_addr, sizeof(mac_addr),
5732                        eth_dev->data->mac_addrs);
5733        rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5734        rte_tel_data_add_dict_int(d, "promiscuous",
5735                        eth_dev->data->promiscuous);
5736        rte_tel_data_add_dict_int(d, "scattered_rx",
5737                        eth_dev->data->scattered_rx);
5738        rte_tel_data_add_dict_int(d, "all_multicast",
5739                        eth_dev->data->all_multicast);
5740        rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5741        rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5742        rte_tel_data_add_dict_int(d, "dev_configured",
5743                        eth_dev->data->dev_configured);
5744
5745        rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5746        for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5747                rte_tel_data_add_array_int(rxq_state,
5748                                eth_dev->data->rx_queue_state[i]);
5749
5750        rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5751        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5752                rte_tel_data_add_array_int(txq_state,
5753                                eth_dev->data->tx_queue_state[i]);
5754
5755        rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5756        rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5757        rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5758        rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5759        rte_tel_data_add_dict_int(d, "rx_offloads",
5760                        eth_dev->data->dev_conf.rxmode.offloads);
5761        rte_tel_data_add_dict_int(d, "tx_offloads",
5762                        eth_dev->data->dev_conf.txmode.offloads);
5763        rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5764                        eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5765
5766        return 0;
5767}
5768
5769int
5770rte_eth_representor_info_get(uint16_t port_id,
5771                             struct rte_eth_representor_info *info)
5772{
5773        struct rte_eth_dev *dev;
5774
5775        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5776        dev = &rte_eth_devices[port_id];
5777
5778        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
5779        return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5780}
5781
5782int
5783rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5784{
5785        struct rte_eth_dev *dev;
5786
5787        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5788        dev = &rte_eth_devices[port_id];
5789
5790        if (dev->data->dev_configured != 0) {
5791                RTE_ETHDEV_LOG(ERR,
5792                        "The port (ID=%"PRIu16") is already configured\n",
5793                        port_id);
5794                return -EBUSY;
5795        }
5796
5797        if (features == NULL) {
5798                RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5799                return -EINVAL;
5800        }
5801
5802        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
5803        return eth_err(port_id,
5804                       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5805}
5806
5807int
5808rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5809                struct rte_eth_ip_reassembly_params *reassembly_capa)
5810{
5811        struct rte_eth_dev *dev;
5812
5813        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5814        dev = &rte_eth_devices[port_id];
5815
5816        if (dev->data->dev_configured == 0) {
5817                RTE_ETHDEV_LOG(ERR,
5818                        "Device with port_id=%u is not configured.\n"
5819                        "Cannot get IP reassembly capability\n",
5820                        port_id);
5821                return -EINVAL;
5822        }
5823
5824        if (reassembly_capa == NULL) {
5825                RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5826                return -EINVAL;
5827        }
5828
5829        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get,
5830                                -ENOTSUP);
5831        memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5832
5833        return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5834                                        (dev, reassembly_capa));
5835}
5836
5837int
5838rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5839                struct rte_eth_ip_reassembly_params *conf)
5840{
5841        struct rte_eth_dev *dev;
5842
5843        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5844        dev = &rte_eth_devices[port_id];
5845
5846        if (dev->data->dev_configured == 0) {
5847                RTE_ETHDEV_LOG(ERR,
5848                        "Device with port_id=%u is not configured.\n"
5849                        "Cannot get IP reassembly configuration\n",
5850                        port_id);
5851                return -EINVAL;
5852        }
5853
5854        if (conf == NULL) {
5855                RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5856                return -EINVAL;
5857        }
5858
5859        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get,
5860                                -ENOTSUP);
5861        memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5862        return eth_err(port_id,
5863                       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5864}
5865
5866int
5867rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5868                const struct rte_eth_ip_reassembly_params *conf)
5869{
5870        struct rte_eth_dev *dev;
5871
5872        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5873        dev = &rte_eth_devices[port_id];
5874
5875        if (dev->data->dev_configured == 0) {
5876                RTE_ETHDEV_LOG(ERR,
5877                        "Device with port_id=%u is not configured.\n"
5878                        "Cannot set IP reassembly configuration",
5879                        port_id);
5880                return -EINVAL;
5881        }
5882
5883        if (dev->data->dev_started != 0) {
5884                RTE_ETHDEV_LOG(ERR,
5885                        "Device with port_id=%u started,\n"
5886                        "cannot configure IP reassembly params.\n",
5887                        port_id);
5888                return -EINVAL;
5889        }
5890
5891        if (conf == NULL) {
5892                RTE_ETHDEV_LOG(ERR,
5893                                "Invalid IP reassembly configuration (NULL)\n");
5894                return -EINVAL;
5895        }
5896
5897        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set,
5898                                -ENOTSUP);
5899        return eth_err(port_id,
5900                       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
5901}
5902
5903int
5904rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
5905{
5906        struct rte_eth_dev *dev;
5907
5908        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5909        dev = &rte_eth_devices[port_id];
5910
5911        if (file == NULL) {
5912                RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
5913                return -EINVAL;
5914        }
5915
5916        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP);
5917        return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
5918}
5919
5920RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
5921
5922RTE_INIT(ethdev_init_telemetry)
5923{
5924        rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5925                        "Returns list of available ethdev ports. Takes no parameters");
5926        rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5927                        "Returns the common stats for a port. Parameters: int port_id");
5928        rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5929                        "Returns the extended stats for a port. Parameters: int port_id");
5930        rte_telemetry_register_cmd("/ethdev/link_status",
5931                        eth_dev_handle_port_link_status,
5932                        "Returns the link status for a port. Parameters: int port_id");
5933        rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
5934                        "Returns the device info for a port. Parameters: int port_id");
5935        rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom,
5936                        "Returns module EEPROM info with SFF specs. Parameters: int port_id");
5937}
5938