dpdk/drivers/net/null/rte_eth_null.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (C) IGEL Co.,Ltd.
   3 *  All rights reserved.
   4 */
   5
   6#include <rte_mbuf.h>
   7#include <rte_ethdev_driver.h>
   8#include <rte_ethdev_vdev.h>
   9#include <rte_malloc.h>
  10#include <rte_memcpy.h>
  11#include <rte_bus_vdev.h>
  12#include <rte_kvargs.h>
  13#include <rte_spinlock.h>
  14
  15#define ETH_NULL_PACKET_SIZE_ARG        "size"
  16#define ETH_NULL_PACKET_COPY_ARG        "copy"
  17#define ETH_NULL_PACKET_NO_RX_ARG       "no-rx"
  18
  19static unsigned int default_packet_size = 64;
  20static unsigned int default_packet_copy;
  21static unsigned int default_no_rx;
  22
  23static const char *valid_arguments[] = {
  24        ETH_NULL_PACKET_SIZE_ARG,
  25        ETH_NULL_PACKET_COPY_ARG,
  26        ETH_NULL_PACKET_NO_RX_ARG,
  27        NULL
  28};
  29
  30struct pmd_internals;
  31
  32struct null_queue {
  33        struct pmd_internals *internals;
  34
  35        struct rte_mempool *mb_pool;
  36        struct rte_mbuf *dummy_packet;
  37
  38        rte_atomic64_t rx_pkts;
  39        rte_atomic64_t tx_pkts;
  40};
  41
  42struct pmd_options {
  43        unsigned int packet_copy;
  44        unsigned int packet_size;
  45        unsigned int no_rx;
  46};
  47
  48struct pmd_internals {
  49        unsigned int packet_size;
  50        unsigned int packet_copy;
  51        unsigned int no_rx;
  52        uint16_t port_id;
  53
  54        struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
  55        struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
  56
  57        struct rte_ether_addr eth_addr;
  58        /** Bit mask of RSS offloads, the bit offset also means flow type */
  59        uint64_t flow_type_rss_offloads;
  60
  61        rte_spinlock_t rss_lock;
  62
  63        uint16_t reta_size;
  64        struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
  65                        RTE_RETA_GROUP_SIZE];
  66
  67        uint8_t rss_key[40];                /**< 40-byte hash key. */
  68};
  69static struct rte_eth_link pmd_link = {
  70        .link_speed = ETH_SPEED_NUM_10G,
  71        .link_duplex = ETH_LINK_FULL_DUPLEX,
  72        .link_status = ETH_LINK_DOWN,
  73        .link_autoneg = ETH_LINK_FIXED,
  74};
  75
  76RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
  77
  78#define PMD_LOG(level, fmt, args...) \
  79        rte_log(RTE_LOG_ ## level, eth_null_logtype, \
  80                "%s(): " fmt "\n", __func__, ##args)
  81
  82static uint16_t
  83eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
  84{
  85        int i;
  86        struct null_queue *h = q;
  87        unsigned int packet_size;
  88
  89        if ((q == NULL) || (bufs == NULL))
  90                return 0;
  91
  92        packet_size = h->internals->packet_size;
  93        if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
  94                return 0;
  95
  96        for (i = 0; i < nb_bufs; i++) {
  97                bufs[i]->data_len = (uint16_t)packet_size;
  98                bufs[i]->pkt_len = packet_size;
  99                bufs[i]->port = h->internals->port_id;
 100        }
 101
 102        rte_atomic64_add(&(h->rx_pkts), i);
 103
 104        return i;
 105}
 106
 107static uint16_t
 108eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 109{
 110        int i;
 111        struct null_queue *h = q;
 112        unsigned int packet_size;
 113
 114        if ((q == NULL) || (bufs == NULL))
 115                return 0;
 116
 117        packet_size = h->internals->packet_size;
 118        if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
 119                return 0;
 120
 121        for (i = 0; i < nb_bufs; i++) {
 122                rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
 123                                        packet_size);
 124                bufs[i]->data_len = (uint16_t)packet_size;
 125                bufs[i]->pkt_len = packet_size;
 126                bufs[i]->port = h->internals->port_id;
 127        }
 128
 129        rte_atomic64_add(&(h->rx_pkts), i);
 130
 131        return i;
 132}
 133
 134static uint16_t
 135eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
 136                uint16_t nb_bufs __rte_unused)
 137{
 138        return 0;
 139}
 140
 141static uint16_t
 142eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 143{
 144        int i;
 145        struct null_queue *h = q;
 146
 147        if ((q == NULL) || (bufs == NULL))
 148                return 0;
 149
 150        for (i = 0; i < nb_bufs; i++)
 151                rte_pktmbuf_free(bufs[i]);
 152
 153        rte_atomic64_add(&(h->tx_pkts), i);
 154
 155        return i;
 156}
 157
 158static uint16_t
 159eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 160{
 161        int i;
 162        struct null_queue *h = q;
 163        unsigned int packet_size;
 164
 165        if ((q == NULL) || (bufs == NULL))
 166                return 0;
 167
 168        packet_size = h->internals->packet_size;
 169        for (i = 0; i < nb_bufs; i++) {
 170                rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
 171                                        packet_size);
 172                rte_pktmbuf_free(bufs[i]);
 173        }
 174
 175        rte_atomic64_add(&(h->tx_pkts), i);
 176
 177        return i;
 178}
 179
 180static int
 181eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 182{
 183        return 0;
 184}
 185
 186static int
 187eth_dev_start(struct rte_eth_dev *dev)
 188{
 189        if (dev == NULL)
 190                return -EINVAL;
 191
 192        dev->data->dev_link.link_status = ETH_LINK_UP;
 193        return 0;
 194}
 195
 196static void
 197eth_dev_stop(struct rte_eth_dev *dev)
 198{
 199        if (dev == NULL)
 200                return;
 201
 202        dev->data->dev_link.link_status = ETH_LINK_DOWN;
 203}
 204
 205static int
 206eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 207                uint16_t nb_rx_desc __rte_unused,
 208                unsigned int socket_id __rte_unused,
 209                const struct rte_eth_rxconf *rx_conf __rte_unused,
 210                struct rte_mempool *mb_pool)
 211{
 212        struct rte_mbuf *dummy_packet;
 213        struct pmd_internals *internals;
 214        unsigned int packet_size;
 215
 216        if ((dev == NULL) || (mb_pool == NULL))
 217                return -EINVAL;
 218
 219        internals = dev->data->dev_private;
 220
 221        if (rx_queue_id >= dev->data->nb_rx_queues)
 222                return -ENODEV;
 223
 224        packet_size = internals->packet_size;
 225
 226        internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
 227        dev->data->rx_queues[rx_queue_id] =
 228                &internals->rx_null_queues[rx_queue_id];
 229        dummy_packet = rte_zmalloc_socket(NULL,
 230                        packet_size, 0, dev->data->numa_node);
 231        if (dummy_packet == NULL)
 232                return -ENOMEM;
 233
 234        internals->rx_null_queues[rx_queue_id].internals = internals;
 235        internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
 236
 237        return 0;
 238}
 239
 240static int
 241eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 242                uint16_t nb_tx_desc __rte_unused,
 243                unsigned int socket_id __rte_unused,
 244                const struct rte_eth_txconf *tx_conf __rte_unused)
 245{
 246        struct rte_mbuf *dummy_packet;
 247        struct pmd_internals *internals;
 248        unsigned int packet_size;
 249
 250        if (dev == NULL)
 251                return -EINVAL;
 252
 253        internals = dev->data->dev_private;
 254
 255        if (tx_queue_id >= dev->data->nb_tx_queues)
 256                return -ENODEV;
 257
 258        packet_size = internals->packet_size;
 259
 260        dev->data->tx_queues[tx_queue_id] =
 261                &internals->tx_null_queues[tx_queue_id];
 262        dummy_packet = rte_zmalloc_socket(NULL,
 263                        packet_size, 0, dev->data->numa_node);
 264        if (dummy_packet == NULL)
 265                return -ENOMEM;
 266
 267        internals->tx_null_queues[tx_queue_id].internals = internals;
 268        internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
 269
 270        return 0;
 271}
 272
 273static int
 274eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
 275{
 276        return 0;
 277}
 278
 279static int
 280eth_dev_info(struct rte_eth_dev *dev,
 281                struct rte_eth_dev_info *dev_info)
 282{
 283        struct pmd_internals *internals;
 284
 285        if ((dev == NULL) || (dev_info == NULL))
 286                return -EINVAL;
 287
 288        internals = dev->data->dev_private;
 289        dev_info->max_mac_addrs = 1;
 290        dev_info->max_rx_pktlen = (uint32_t)-1;
 291        dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 292        dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
 293        dev_info->min_rx_bufsize = 0;
 294        dev_info->reta_size = internals->reta_size;
 295        dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
 296
 297        return 0;
 298}
 299
 300static int
 301eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
 302{
 303        unsigned int i, num_stats;
 304        unsigned long rx_total = 0, tx_total = 0;
 305        const struct pmd_internals *internal;
 306
 307        if ((dev == NULL) || (igb_stats == NULL))
 308                return -EINVAL;
 309
 310        internal = dev->data->dev_private;
 311        num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
 312                        RTE_MIN(dev->data->nb_rx_queues,
 313                                RTE_DIM(internal->rx_null_queues)));
 314        for (i = 0; i < num_stats; i++) {
 315                igb_stats->q_ipackets[i] =
 316                        internal->rx_null_queues[i].rx_pkts.cnt;
 317                rx_total += igb_stats->q_ipackets[i];
 318        }
 319
 320        num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
 321                        RTE_MIN(dev->data->nb_tx_queues,
 322                                RTE_DIM(internal->tx_null_queues)));
 323        for (i = 0; i < num_stats; i++) {
 324                igb_stats->q_opackets[i] =
 325                        internal->tx_null_queues[i].tx_pkts.cnt;
 326                tx_total += igb_stats->q_opackets[i];
 327        }
 328
 329        igb_stats->ipackets = rx_total;
 330        igb_stats->opackets = tx_total;
 331
 332        return 0;
 333}
 334
 335static int
 336eth_stats_reset(struct rte_eth_dev *dev)
 337{
 338        unsigned int i;
 339        struct pmd_internals *internal;
 340
 341        if (dev == NULL)
 342                return -EINVAL;
 343
 344        internal = dev->data->dev_private;
 345        for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
 346                internal->rx_null_queues[i].rx_pkts.cnt = 0;
 347        for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
 348                internal->tx_null_queues[i].tx_pkts.cnt = 0;
 349
 350        return 0;
 351}
 352
 353static void
 354eth_queue_release(void *q)
 355{
 356        struct null_queue *nq;
 357
 358        if (q == NULL)
 359                return;
 360
 361        nq = q;
 362        rte_free(nq->dummy_packet);
 363}
 364
 365static int
 366eth_link_update(struct rte_eth_dev *dev __rte_unused,
 367                int wait_to_complete __rte_unused) { return 0; }
 368
 369static int
 370eth_rss_reta_update(struct rte_eth_dev *dev,
 371                struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
 372{
 373        int i, j;
 374        struct pmd_internals *internal = dev->data->dev_private;
 375
 376        if (reta_size != internal->reta_size)
 377                return -EINVAL;
 378
 379        rte_spinlock_lock(&internal->rss_lock);
 380
 381        /* Copy RETA table */
 382        for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
 383                internal->reta_conf[i].mask = reta_conf[i].mask;
 384                for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
 385                        if ((reta_conf[i].mask >> j) & 0x01)
 386                                internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
 387        }
 388
 389        rte_spinlock_unlock(&internal->rss_lock);
 390
 391        return 0;
 392}
 393
 394static int
 395eth_rss_reta_query(struct rte_eth_dev *dev,
 396                struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
 397{
 398        int i, j;
 399        struct pmd_internals *internal = dev->data->dev_private;
 400
 401        if (reta_size != internal->reta_size)
 402                return -EINVAL;
 403
 404        rte_spinlock_lock(&internal->rss_lock);
 405
 406        /* Copy RETA table */
 407        for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
 408                for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
 409                        if ((reta_conf[i].mask >> j) & 0x01)
 410                                reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
 411        }
 412
 413        rte_spinlock_unlock(&internal->rss_lock);
 414
 415        return 0;
 416}
 417
 418static int
 419eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
 420{
 421        struct pmd_internals *internal = dev->data->dev_private;
 422
 423        rte_spinlock_lock(&internal->rss_lock);
 424
 425        if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
 426                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
 427                                rss_conf->rss_hf & internal->flow_type_rss_offloads;
 428
 429        if (rss_conf->rss_key)
 430                rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
 431
 432        rte_spinlock_unlock(&internal->rss_lock);
 433
 434        return 0;
 435}
 436
 437static int
 438eth_rss_hash_conf_get(struct rte_eth_dev *dev,
 439                struct rte_eth_rss_conf *rss_conf)
 440{
 441        struct pmd_internals *internal = dev->data->dev_private;
 442
 443        rte_spinlock_lock(&internal->rss_lock);
 444
 445        rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
 446        if (rss_conf->rss_key)
 447                rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
 448
 449        rte_spinlock_unlock(&internal->rss_lock);
 450
 451        return 0;
 452}
 453
 454static int
 455eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
 456                    __rte_unused struct rte_ether_addr *addr)
 457{
 458        return 0;
 459}
 460
 461static const struct eth_dev_ops ops = {
 462        .dev_start = eth_dev_start,
 463        .dev_stop = eth_dev_stop,
 464        .dev_configure = eth_dev_configure,
 465        .dev_infos_get = eth_dev_info,
 466        .rx_queue_setup = eth_rx_queue_setup,
 467        .tx_queue_setup = eth_tx_queue_setup,
 468        .rx_queue_release = eth_queue_release,
 469        .tx_queue_release = eth_queue_release,
 470        .mtu_set = eth_mtu_set,
 471        .link_update = eth_link_update,
 472        .mac_addr_set = eth_mac_address_set,
 473        .stats_get = eth_stats_get,
 474        .stats_reset = eth_stats_reset,
 475        .reta_update = eth_rss_reta_update,
 476        .reta_query = eth_rss_reta_query,
 477        .rss_hash_update = eth_rss_hash_update,
 478        .rss_hash_conf_get = eth_rss_hash_conf_get
 479};
 480
 481static int
 482eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 483{
 484        const unsigned int nb_rx_queues = 1;
 485        const unsigned int nb_tx_queues = 1;
 486        struct rte_eth_dev_data *data;
 487        struct pmd_internals *internals = NULL;
 488        struct rte_eth_dev *eth_dev = NULL;
 489
 490        static const uint8_t default_rss_key[40] = {
 491                0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
 492                0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
 493                0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
 494                0xBE, 0xAC, 0x01, 0xFA
 495        };
 496
 497        if (dev->device.numa_node == SOCKET_ID_ANY)
 498                dev->device.numa_node = rte_socket_id();
 499
 500        PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
 501                dev->device.numa_node);
 502
 503        eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
 504        if (!eth_dev)
 505                return -ENOMEM;
 506
 507        /* now put it all together
 508         * - store queue data in internals,
 509         * - store numa_node info in ethdev data
 510         * - point eth_dev_data to internals
 511         * - and point eth_dev structure to new eth_dev_data structure
 512         */
 513        /* NOTE: we'll replace the data element, of originally allocated eth_dev
 514         * so the nulls are local per-process */
 515
 516        internals = eth_dev->data->dev_private;
 517        internals->packet_size = args->packet_size;
 518        internals->packet_copy = args->packet_copy;
 519        internals->no_rx = args->no_rx;
 520        internals->port_id = eth_dev->data->port_id;
 521        rte_eth_random_addr(internals->eth_addr.addr_bytes);
 522
 523        internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
 524        internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
 525
 526        rte_memcpy(internals->rss_key, default_rss_key, 40);
 527
 528        data = eth_dev->data;
 529        data->nb_rx_queues = (uint16_t)nb_rx_queues;
 530        data->nb_tx_queues = (uint16_t)nb_tx_queues;
 531        data->dev_link = pmd_link;
 532        data->mac_addrs = &internals->eth_addr;
 533        data->promiscuous = 1;
 534        data->all_multicast = 1;
 535
 536        eth_dev->dev_ops = &ops;
 537
 538        /* finally assign rx and tx ops */
 539        if (internals->packet_copy) {
 540                eth_dev->rx_pkt_burst = eth_null_copy_rx;
 541                eth_dev->tx_pkt_burst = eth_null_copy_tx;
 542        } else if (internals->no_rx) {
 543                eth_dev->rx_pkt_burst = eth_null_no_rx;
 544                eth_dev->tx_pkt_burst = eth_null_tx;
 545        } else {
 546                eth_dev->rx_pkt_burst = eth_null_rx;
 547                eth_dev->tx_pkt_burst = eth_null_tx;
 548        }
 549
 550        rte_eth_dev_probing_finish(eth_dev);
 551        return 0;
 552}
 553
 554static inline int
 555get_packet_size_arg(const char *key __rte_unused,
 556                const char *value, void *extra_args)
 557{
 558        const char *a = value;
 559        unsigned int *packet_size = extra_args;
 560
 561        if ((value == NULL) || (extra_args == NULL))
 562                return -EINVAL;
 563
 564        *packet_size = (unsigned int)strtoul(a, NULL, 0);
 565        if (*packet_size == UINT_MAX)
 566                return -1;
 567
 568        return 0;
 569}
 570
 571static inline int
 572get_packet_copy_arg(const char *key __rte_unused,
 573                const char *value, void *extra_args)
 574{
 575        const char *a = value;
 576        unsigned int *packet_copy = extra_args;
 577
 578        if ((value == NULL) || (extra_args == NULL))
 579                return -EINVAL;
 580
 581        *packet_copy = (unsigned int)strtoul(a, NULL, 0);
 582        if (*packet_copy == UINT_MAX)
 583                return -1;
 584
 585        return 0;
 586}
 587
 588static int
 589get_packet_no_rx_arg(const char *key __rte_unused,
 590                const char *value, void *extra_args)
 591{
 592        const char *a = value;
 593        unsigned int no_rx;
 594
 595        if (value == NULL || extra_args == NULL)
 596                return -EINVAL;
 597
 598        no_rx = (unsigned int)strtoul(a, NULL, 0);
 599        if (no_rx != 0 && no_rx != 1)
 600                return -1;
 601
 602        *(unsigned int *)extra_args = no_rx;
 603        return 0;
 604}
 605
 606static int
 607rte_pmd_null_probe(struct rte_vdev_device *dev)
 608{
 609        const char *name, *params;
 610        struct pmd_options args = {
 611                .packet_copy = default_packet_copy,
 612                .packet_size = default_packet_size,
 613                .no_rx = default_no_rx,
 614        };
 615        struct rte_kvargs *kvlist = NULL;
 616        struct rte_eth_dev *eth_dev;
 617        int ret;
 618
 619        if (!dev)
 620                return -EINVAL;
 621
 622        name = rte_vdev_device_name(dev);
 623        params = rte_vdev_device_args(dev);
 624        PMD_LOG(INFO, "Initializing pmd_null for %s", name);
 625
 626        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
 627                struct pmd_internals *internals;
 628                eth_dev = rte_eth_dev_attach_secondary(name);
 629                if (!eth_dev) {
 630                        PMD_LOG(ERR, "Failed to probe %s", name);
 631                        return -1;
 632                }
 633                /* TODO: request info from primary to set up Rx and Tx */
 634                eth_dev->dev_ops = &ops;
 635                eth_dev->device = &dev->device;
 636                internals = eth_dev->data->dev_private;
 637                if (internals->packet_copy) {
 638                        eth_dev->rx_pkt_burst = eth_null_copy_rx;
 639                        eth_dev->tx_pkt_burst = eth_null_copy_tx;
 640                } else if (internals->no_rx) {
 641                        eth_dev->rx_pkt_burst = eth_null_no_rx;
 642                        eth_dev->tx_pkt_burst = eth_null_tx;
 643                } else {
 644                        eth_dev->rx_pkt_burst = eth_null_rx;
 645                        eth_dev->tx_pkt_burst = eth_null_tx;
 646                }
 647                rte_eth_dev_probing_finish(eth_dev);
 648                return 0;
 649        }
 650
 651        if (params != NULL) {
 652                kvlist = rte_kvargs_parse(params, valid_arguments);
 653                if (kvlist == NULL)
 654                        return -1;
 655
 656                ret = rte_kvargs_process(kvlist,
 657                                ETH_NULL_PACKET_SIZE_ARG,
 658                                &get_packet_size_arg, &args.packet_size);
 659                if (ret < 0)
 660                        goto free_kvlist;
 661
 662
 663                ret = rte_kvargs_process(kvlist,
 664                                ETH_NULL_PACKET_COPY_ARG,
 665                                &get_packet_copy_arg, &args.packet_copy);
 666                if (ret < 0)
 667                        goto free_kvlist;
 668
 669                ret = rte_kvargs_process(kvlist,
 670                                ETH_NULL_PACKET_NO_RX_ARG,
 671                                &get_packet_no_rx_arg, &args.no_rx);
 672                if (ret < 0)
 673                        goto free_kvlist;
 674
 675                if (args.no_rx && args.packet_copy) {
 676                        PMD_LOG(ERR,
 677                                "Both %s and %s arguments at the same time not supported",
 678                                ETH_NULL_PACKET_COPY_ARG,
 679                                ETH_NULL_PACKET_NO_RX_ARG);
 680                        goto free_kvlist;
 681                }
 682        }
 683
 684        PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
 685                        "packet copy is %s", args.packet_size,
 686                        args.packet_copy ? "enabled" : "disabled");
 687
 688        ret = eth_dev_null_create(dev, &args);
 689
 690free_kvlist:
 691        if (kvlist)
 692                rte_kvargs_free(kvlist);
 693        return ret;
 694}
 695
 696static int
 697rte_pmd_null_remove(struct rte_vdev_device *dev)
 698{
 699        struct rte_eth_dev *eth_dev = NULL;
 700
 701        if (!dev)
 702                return -EINVAL;
 703
 704        PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
 705                        rte_socket_id());
 706
 707        /* find the ethdev entry */
 708        eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
 709        if (eth_dev == NULL)
 710                return -1;
 711
 712        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 713                /* mac_addrs must not be freed alone because part of dev_private */
 714                eth_dev->data->mac_addrs = NULL;
 715
 716        rte_eth_dev_release_port(eth_dev);
 717
 718        return 0;
 719}
 720
 721static struct rte_vdev_driver pmd_null_drv = {
 722        .probe = rte_pmd_null_probe,
 723        .remove = rte_pmd_null_remove,
 724};
 725
 726RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
 727RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
 728RTE_PMD_REGISTER_PARAM_STRING(net_null,
 729        "size=<int> "
 730        "copy=<int> "
 731        ETH_NULL_PACKET_NO_RX_ARG "=0|1");
 732