dpdk/app/test-pmd/testpmd.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2017 Intel Corporation
   3 */
   4
   5#include <stdarg.h>
   6#include <stdio.h>
   7#include <stdlib.h>
   8#include <signal.h>
   9#include <string.h>
  10#include <time.h>
  11#include <fcntl.h>
  12#ifndef RTE_EXEC_ENV_WINDOWS
  13#include <sys/mman.h>
  14#endif
  15#include <sys/types.h>
  16#include <errno.h>
  17#include <stdbool.h>
  18
  19#include <sys/queue.h>
  20#include <sys/stat.h>
  21
  22#include <stdint.h>
  23#include <unistd.h>
  24#include <inttypes.h>
  25
  26#include <rte_common.h>
  27#include <rte_errno.h>
  28#include <rte_byteorder.h>
  29#include <rte_log.h>
  30#include <rte_debug.h>
  31#include <rte_cycles.h>
  32#include <rte_memory.h>
  33#include <rte_memcpy.h>
  34#include <rte_launch.h>
  35#include <rte_eal.h>
  36#include <rte_alarm.h>
  37#include <rte_per_lcore.h>
  38#include <rte_lcore.h>
  39#include <rte_branch_prediction.h>
  40#include <rte_mempool.h>
  41#include <rte_malloc.h>
  42#include <rte_mbuf.h>
  43#include <rte_mbuf_pool_ops.h>
  44#include <rte_interrupts.h>
  45#include <rte_pci.h>
  46#include <rte_ether.h>
  47#include <rte_ethdev.h>
  48#include <rte_dev.h>
  49#include <rte_string_fns.h>
  50#ifdef RTE_NET_IXGBE
  51#include <rte_pmd_ixgbe.h>
  52#endif
  53#ifdef RTE_LIB_PDUMP
  54#include <rte_pdump.h>
  55#endif
  56#include <rte_flow.h>
  57#ifdef RTE_LIB_METRICS
  58#include <rte_metrics.h>
  59#endif
  60#ifdef RTE_LIB_BITRATESTATS
  61#include <rte_bitrate.h>
  62#endif
  63#ifdef RTE_LIB_LATENCYSTATS
  64#include <rte_latencystats.h>
  65#endif
  66#ifdef RTE_EXEC_ENV_WINDOWS
  67#include <process.h>
  68#endif
  69#ifdef RTE_NET_BOND
  70#include <rte_eth_bond.h>
  71#endif
  72#ifdef RTE_NET_MLX5
  73#include "mlx5_testpmd.h"
  74#endif
  75
  76#include "testpmd.h"
  77
  78#ifndef MAP_HUGETLB
  79/* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
  80#define HUGE_FLAG (0x40000)
  81#else
  82#define HUGE_FLAG MAP_HUGETLB
  83#endif
  84
  85#ifndef MAP_HUGE_SHIFT
  86/* older kernels (or FreeBSD) will not have this define */
  87#define HUGE_SHIFT (26)
  88#else
  89#define HUGE_SHIFT MAP_HUGE_SHIFT
  90#endif
  91
  92#define EXTMEM_HEAP_NAME "extmem"
  93/*
  94 * Zone size with the malloc overhead (max of debug and release variants)
  95 * must fit into the smallest supported hugepage size (2M),
  96 * so that an IOVA-contiguous zone of this size can always be allocated
  97 * if there are free 2M hugepages.
  98 */
  99#define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
 100
 101uint16_t verbose_level = 0; /**< Silent by default. */
 102int testpmd_logtype; /**< Log type for testpmd logs */
 103
 104/* use main core for command line ? */
 105uint8_t interactive = 0;
 106uint8_t auto_start = 0;
 107uint8_t tx_first;
 108char cmdline_filename[PATH_MAX] = {0};
 109
 110/*
 111 * NUMA support configuration.
 112 * When set, the NUMA support attempts to dispatch the allocation of the
 113 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
 114 * probed ports among the CPU sockets 0 and 1.
 115 * Otherwise, all memory is allocated from CPU socket 0.
 116 */
 117uint8_t numa_support = 1; /**< numa enabled by default */
 118
 119/*
 120 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
 121 * not configured.
 122 */
 123uint8_t socket_num = UMA_NO_CONFIG;
 124
 125/*
 126 * Select mempool allocation type:
 127 * - native: use regular DPDK memory
 128 * - anon: use regular DPDK memory to create mempool, but populate using
 129 *         anonymous memory (may not be IOVA-contiguous)
 130 * - xmem: use externally allocated hugepage memory
 131 */
 132uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
 133
 134/*
 135 * Store specified sockets on which memory pool to be used by ports
 136 * is allocated.
 137 */
 138uint8_t port_numa[RTE_MAX_ETHPORTS];
 139
 140/*
 141 * Store specified sockets on which RX ring to be used by ports
 142 * is allocated.
 143 */
 144uint8_t rxring_numa[RTE_MAX_ETHPORTS];
 145
 146/*
 147 * Store specified sockets on which TX ring to be used by ports
 148 * is allocated.
 149 */
 150uint8_t txring_numa[RTE_MAX_ETHPORTS];
 151
 152/*
 153 * Record the Ethernet address of peer target ports to which packets are
 154 * forwarded.
 155 * Must be instantiated with the ethernet addresses of peer traffic generator
 156 * ports.
 157 */
 158struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
 159portid_t nb_peer_eth_addrs = 0;
 160
 161/*
 162 * Probed Target Environment.
 163 */
 164struct rte_port *ports;        /**< For all probed ethernet ports. */
 165portid_t nb_ports;             /**< Number of probed ethernet ports. */
 166struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
 167lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
 168
 169portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
 170
 171/*
 172 * Test Forwarding Configuration.
 173 *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
 174 *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
 175 */
 176lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
 177lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
 178portid_t  nb_cfg_ports;  /**< Number of configured ports. */
 179portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
 180
 181unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
 182portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
 183
 184struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
 185streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
 186
 187/*
 188 * Forwarding engines.
 189 */
 190struct fwd_engine * fwd_engines[] = {
 191        &io_fwd_engine,
 192        &mac_fwd_engine,
 193        &mac_swap_engine,
 194        &flow_gen_engine,
 195        &rx_only_engine,
 196        &tx_only_engine,
 197        &csum_fwd_engine,
 198        &icmp_echo_engine,
 199        &noisy_vnf_engine,
 200        &five_tuple_swap_fwd_engine,
 201#ifdef RTE_LIBRTE_IEEE1588
 202        &ieee1588_fwd_engine,
 203#endif
 204        &shared_rxq_engine,
 205        NULL,
 206};
 207
 208struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
 209uint16_t mempool_flags;
 210
 211struct fwd_config cur_fwd_config;
 212struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
 213uint32_t retry_enabled;
 214uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
 215uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
 216
 217uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
 218uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
 219        DEFAULT_MBUF_DATA_SIZE
 220}; /**< Mbuf data space size. */
 221uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
 222                                      * specified on command-line. */
 223uint16_t stats_period; /**< Period to show statistics (disabled by default) */
 224
 225/** Extended statistics to show. */
 226struct rte_eth_xstat_name *xstats_display;
 227
 228unsigned int xstats_display_num; /**< Size of extended statistics to show */
 229
 230/*
 231 * In container, it cannot terminate the process which running with 'stats-period'
 232 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
 233 */
 234uint8_t f_quit;
 235uint8_t cl_quit; /* Quit testpmd from cmdline. */
 236
 237/*
 238 * Max Rx frame size, set by '--max-pkt-len' parameter.
 239 */
 240uint32_t max_rx_pkt_len;
 241
 242/*
 243 * Configuration of packet segments used to scatter received packets
 244 * if some of split features is configured.
 245 */
 246uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
 247uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
 248uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
 249uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
 250
 251/*
 252 * Configuration of packet segments used by the "txonly" processing engine.
 253 */
 254uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
 255uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
 256        TXONLY_DEF_PACKET_LEN,
 257};
 258uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 259
 260enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 261/**< Split policy for packets to TX. */
 262
 263uint8_t txonly_multi_flow;
 264/**< Whether multiple flows are generated in TXONLY mode. */
 265
 266uint32_t tx_pkt_times_inter;
 267/**< Timings for send scheduling in TXONLY mode, time between bursts. */
 268
 269uint32_t tx_pkt_times_intra;
 270/**< Timings for send scheduling in TXONLY mode, time between packets. */
 271
 272uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
 273uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
 274int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
 275uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
 276
 277/* current configuration is in DCB or not,0 means it is not in DCB mode */
 278uint8_t dcb_config = 0;
 279
 280/*
 281 * Configurable number of RX/TX queues.
 282 */
 283queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
 284queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
 285queueid_t nb_txq = 1; /**< Number of TX queues per port. */
 286
 287/*
 288 * Configurable number of RX/TX ring descriptors.
 289 * Defaults are supplied by drivers via ethdev.
 290 */
 291#define RTE_TEST_RX_DESC_DEFAULT 0
 292#define RTE_TEST_TX_DESC_DEFAULT 0
 293uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
 294uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
 295
 296#define RTE_PMD_PARAM_UNSET -1
 297/*
 298 * Configurable values of RX and TX ring threshold registers.
 299 */
 300
 301int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
 302int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
 303int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
 304
 305int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
 306int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
 307int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
 308
 309/*
 310 * Configurable value of RX free threshold.
 311 */
 312int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
 313
 314/*
 315 * Configurable value of RX drop enable.
 316 */
 317int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
 318
 319/*
 320 * Configurable value of TX free threshold.
 321 */
 322int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
 323
 324/*
 325 * Configurable value of TX RS bit threshold.
 326 */
 327int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
 328
 329/*
 330 * Configurable value of buffered packets before sending.
 331 */
 332uint16_t noisy_tx_sw_bufsz;
 333
 334/*
 335 * Configurable value of packet buffer timeout.
 336 */
 337uint16_t noisy_tx_sw_buf_flush_time;
 338
 339/*
 340 * Configurable value for size of VNF internal memory area
 341 * used for simulating noisy neighbour behaviour
 342 */
 343uint64_t noisy_lkup_mem_sz;
 344
 345/*
 346 * Configurable value of number of random writes done in
 347 * VNF simulation memory area.
 348 */
 349uint64_t noisy_lkup_num_writes;
 350
 351/*
 352 * Configurable value of number of random reads done in
 353 * VNF simulation memory area.
 354 */
 355uint64_t noisy_lkup_num_reads;
 356
 357/*
 358 * Configurable value of number of random reads/writes done in
 359 * VNF simulation memory area.
 360 */
 361uint64_t noisy_lkup_num_reads_writes;
 362
 363/*
 364 * Receive Side Scaling (RSS) configuration.
 365 */
 366uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
 367
 368/*
 369 * Port topology configuration
 370 */
 371uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
 372
 373/*
 374 * Avoids to flush all the RX streams before starts forwarding.
 375 */
 376uint8_t no_flush_rx = 0; /* flush by default */
 377
 378/*
 379 * Flow API isolated mode.
 380 */
 381uint8_t flow_isolate_all;
 382
 383/*
 384 * Avoids to check link status when starting/stopping a port.
 385 */
 386uint8_t no_link_check = 0; /* check by default */
 387
 388/*
 389 * Don't automatically start all ports in interactive mode.
 390 */
 391uint8_t no_device_start = 0;
 392
 393/*
 394 * Enable link status change notification
 395 */
 396uint8_t lsc_interrupt = 1; /* enabled by default */
 397
 398/*
 399 * Enable device removal notification.
 400 */
 401uint8_t rmv_interrupt = 1; /* enabled by default */
 402
 403uint8_t hot_plug = 0; /**< hotplug disabled by default. */
 404
 405/* After attach, port setup is called on event or by iterator */
 406bool setup_on_probe_event = true;
 407
 408/* Clear ptypes on port initialization. */
 409uint8_t clear_ptypes = true;
 410
 411/* Hairpin ports configuration mode. */
 412uint16_t hairpin_mode;
 413
 414/* Pretty printing of ethdev events */
 415static const char * const eth_event_desc[] = {
 416        [RTE_ETH_EVENT_UNKNOWN] = "unknown",
 417        [RTE_ETH_EVENT_INTR_LSC] = "link state change",
 418        [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
 419        [RTE_ETH_EVENT_INTR_RESET] = "reset",
 420        [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
 421        [RTE_ETH_EVENT_IPSEC] = "IPsec",
 422        [RTE_ETH_EVENT_MACSEC] = "MACsec",
 423        [RTE_ETH_EVENT_INTR_RMV] = "device removal",
 424        [RTE_ETH_EVENT_NEW] = "device probed",
 425        [RTE_ETH_EVENT_DESTROY] = "device released",
 426        [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
 427        [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
 428        [RTE_ETH_EVENT_MAX] = NULL,
 429};
 430
 431/*
 432 * Display or mask ether events
 433 * Default to all events except VF_MBOX
 434 */
 435uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
 436                            (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
 437                            (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
 438                            (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
 439                            (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
 440                            (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
 441                            (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
 442                            (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
 443/*
 444 * Decide if all memory are locked for performance.
 445 */
 446int do_mlockall = 0;
 447
 448/*
 449 * NIC bypass mode configuration options.
 450 */
 451
 452#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
 453/* The NIC bypass watchdog timeout. */
 454uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
 455#endif
 456
 457
 458#ifdef RTE_LIB_LATENCYSTATS
 459
 460/*
 461 * Set when latency stats is enabled in the commandline
 462 */
 463uint8_t latencystats_enabled;
 464
 465/*
 466 * Lcore ID to service latency statistics.
 467 */
 468lcoreid_t latencystats_lcore_id = -1;
 469
 470#endif
 471
 472/*
 473 * Ethernet device configuration.
 474 */
 475struct rte_eth_rxmode rx_mode;
 476
 477struct rte_eth_txmode tx_mode = {
 478        .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
 479};
 480
 481struct rte_eth_fdir_conf fdir_conf = {
 482        .mode = RTE_FDIR_MODE_NONE,
 483        .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
 484        .status = RTE_FDIR_REPORT_STATUS,
 485        .mask = {
 486                .vlan_tci_mask = 0xFFEF,
 487                .ipv4_mask     = {
 488                        .src_ip = 0xFFFFFFFF,
 489                        .dst_ip = 0xFFFFFFFF,
 490                },
 491                .ipv6_mask     = {
 492                        .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
 493                        .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
 494                },
 495                .src_port_mask = 0xFFFF,
 496                .dst_port_mask = 0xFFFF,
 497                .mac_addr_byte_mask = 0xFF,
 498                .tunnel_type_mask = 1,
 499                .tunnel_id_mask = 0xFFFFFFFF,
 500        },
 501        .drop_queue = 127,
 502};
 503
 504volatile int test_done = 1; /* stop packet forwarding when set to 1. */
 505
 506/*
 507 * Display zero values by default for xstats
 508 */
 509uint8_t xstats_hide_zero;
 510
 511/*
 512 * Measure of CPU cycles disabled by default
 513 */
 514uint8_t record_core_cycles;
 515
 516/*
 517 * Display of RX and TX bursts disabled by default
 518 */
 519uint8_t record_burst_stats;
 520
 521/*
 522 * Number of ports per shared Rx queue group, 0 disable.
 523 */
 524uint32_t rxq_share;
 525
 526unsigned int num_sockets = 0;
 527unsigned int socket_ids[RTE_MAX_NUMA_NODES];
 528
 529#ifdef RTE_LIB_BITRATESTATS
 530/* Bitrate statistics */
 531struct rte_stats_bitrates *bitrate_data;
 532lcoreid_t bitrate_lcore_id;
 533uint8_t bitrate_enabled;
 534#endif
 535
 536#ifdef RTE_LIB_GRO
 537struct gro_status gro_ports[RTE_MAX_ETHPORTS];
 538uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
 539#endif
 540
 541/*
 542 * hexadecimal bitmask of RX mq mode can be enabled.
 543 */
 544enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
 545
 546/*
 547 * Used to set forced link speed
 548 */
 549uint32_t eth_link_speed;
 550
 551/*
 552 * ID of the current process in multi-process, used to
 553 * configure the queues to be polled.
 554 */
 555int proc_id;
 556
 557/*
 558 * Number of processes in multi-process, used to
 559 * configure the queues to be polled.
 560 */
 561unsigned int num_procs = 1;
 562
 563static void
 564eth_rx_metadata_negotiate_mp(uint16_t port_id)
 565{
 566        uint64_t rx_meta_features = 0;
 567        int ret;
 568
 569        if (!is_proc_primary())
 570                return;
 571
 572        rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
 573        rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
 574        rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
 575
 576        ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
 577        if (ret == 0) {
 578                if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
 579                        TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
 580                                    port_id);
 581                }
 582
 583                if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
 584                        TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
 585                                    port_id);
 586                }
 587
 588                if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
 589                        TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
 590                                    port_id);
 591                }
 592        } else if (ret != -ENOTSUP) {
 593                rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
 594                         port_id, rte_strerror(-ret));
 595        }
 596}
 597
 598static int
 599eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 600                      const struct rte_eth_conf *dev_conf)
 601{
 602        if (is_proc_primary())
 603                return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
 604                                        dev_conf);
 605        return 0;
 606}
 607
 608static int
 609change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
 610{
 611#ifdef RTE_NET_BOND
 612
 613        portid_t slave_pids[RTE_MAX_ETHPORTS];
 614        struct rte_port *port;
 615        int num_slaves;
 616        portid_t slave_pid;
 617        int i;
 618
 619        num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
 620                                                RTE_MAX_ETHPORTS);
 621        if (num_slaves < 0) {
 622                fprintf(stderr, "Failed to get slave list for port = %u\n",
 623                        bond_pid);
 624                return num_slaves;
 625        }
 626
 627        for (i = 0; i < num_slaves; i++) {
 628                slave_pid = slave_pids[i];
 629                port = &ports[slave_pid];
 630                port->port_status =
 631                        is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
 632        }
 633#else
 634        RTE_SET_USED(bond_pid);
 635        RTE_SET_USED(is_stop);
 636#endif
 637        return 0;
 638}
 639
 640static int
 641eth_dev_start_mp(uint16_t port_id)
 642{
 643        int ret;
 644
 645        if (is_proc_primary()) {
 646                ret = rte_eth_dev_start(port_id);
 647                if (ret != 0)
 648                        return ret;
 649
 650                struct rte_port *port = &ports[port_id];
 651
 652                /*
 653                 * Starting a bonded port also starts all slaves under the bonded
 654                 * device. So if this port is bond device, we need to modify the
 655                 * port status of these slaves.
 656                 */
 657                if (port->bond_flag == 1)
 658                        return change_bonding_slave_port_status(port_id, false);
 659        }
 660
 661        return 0;
 662}
 663
 664static int
 665eth_dev_stop_mp(uint16_t port_id)
 666{
 667        int ret;
 668
 669        if (is_proc_primary()) {
 670                ret = rte_eth_dev_stop(port_id);
 671                if (ret != 0)
 672                        return ret;
 673
 674                struct rte_port *port = &ports[port_id];
 675
 676                /*
 677                 * Stopping a bonded port also stops all slaves under the bonded
 678                 * device. So if this port is bond device, we need to modify the
 679                 * port status of these slaves.
 680                 */
 681                if (port->bond_flag == 1)
 682                        return change_bonding_slave_port_status(port_id, true);
 683        }
 684
 685        return 0;
 686}
 687
 688static void
 689mempool_free_mp(struct rte_mempool *mp)
 690{
 691        if (is_proc_primary())
 692                rte_mempool_free(mp);
 693}
 694
 695static int
 696eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
 697{
 698        if (is_proc_primary())
 699                return rte_eth_dev_set_mtu(port_id, mtu);
 700
 701        return 0;
 702}
 703
 704/* Forward function declarations */
 705static void setup_attached_port(portid_t pi);
 706static void check_all_ports_link_status(uint32_t port_mask);
 707static int eth_event_callback(portid_t port_id,
 708                              enum rte_eth_event_type type,
 709                              void *param, void *ret_param);
 710static void dev_event_callback(const char *device_name,
 711                                enum rte_dev_event_type type,
 712                                void *param);
 713static void fill_xstats_display_info(void);
 714
 715/*
 716 * Check if all the ports are started.
 717 * If yes, return positive value. If not, return zero.
 718 */
 719static int all_ports_started(void);
 720
 721#ifdef RTE_LIB_GSO
 722struct gso_status gso_ports[RTE_MAX_ETHPORTS];
 723uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
 724#endif
 725
 726/* Holds the registered mbuf dynamic flags names. */
 727char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
 728
 729
 730/*
 731 * Helper function to check if socket is already discovered.
 732 * If yes, return positive value. If not, return zero.
 733 */
 734int
 735new_socket_id(unsigned int socket_id)
 736{
 737        unsigned int i;
 738
 739        for (i = 0; i < num_sockets; i++) {
 740                if (socket_ids[i] == socket_id)
 741                        return 0;
 742        }
 743        return 1;
 744}
 745
 746/*
 747 * Setup default configuration.
 748 */
 749static void
 750set_default_fwd_lcores_config(void)
 751{
 752        unsigned int i;
 753        unsigned int nb_lc;
 754        unsigned int sock_num;
 755
 756        nb_lc = 0;
 757        for (i = 0; i < RTE_MAX_LCORE; i++) {
 758                if (!rte_lcore_is_enabled(i))
 759                        continue;
 760                sock_num = rte_lcore_to_socket_id(i);
 761                if (new_socket_id(sock_num)) {
 762                        if (num_sockets >= RTE_MAX_NUMA_NODES) {
 763                                rte_exit(EXIT_FAILURE,
 764                                         "Total sockets greater than %u\n",
 765                                         RTE_MAX_NUMA_NODES);
 766                        }
 767                        socket_ids[num_sockets++] = sock_num;
 768                }
 769                if (i == rte_get_main_lcore())
 770                        continue;
 771                fwd_lcores_cpuids[nb_lc++] = i;
 772        }
 773        nb_lcores = (lcoreid_t) nb_lc;
 774        nb_cfg_lcores = nb_lcores;
 775        nb_fwd_lcores = 1;
 776}
 777
 778static void
 779set_def_peer_eth_addrs(void)
 780{
 781        portid_t i;
 782
 783        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 784                peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
 785                peer_eth_addrs[i].addr_bytes[5] = i;
 786        }
 787}
 788
 789static void
 790set_default_fwd_ports_config(void)
 791{
 792        portid_t pt_id;
 793        int i = 0;
 794
 795        RTE_ETH_FOREACH_DEV(pt_id) {
 796                fwd_ports_ids[i++] = pt_id;
 797
 798                /* Update sockets info according to the attached device */
 799                int socket_id = rte_eth_dev_socket_id(pt_id);
 800                if (socket_id >= 0 && new_socket_id(socket_id)) {
 801                        if (num_sockets >= RTE_MAX_NUMA_NODES) {
 802                                rte_exit(EXIT_FAILURE,
 803                                         "Total sockets greater than %u\n",
 804                                         RTE_MAX_NUMA_NODES);
 805                        }
 806                        socket_ids[num_sockets++] = socket_id;
 807                }
 808        }
 809
 810        nb_cfg_ports = nb_ports;
 811        nb_fwd_ports = nb_ports;
 812}
 813
 814void
 815set_def_fwd_config(void)
 816{
 817        set_default_fwd_lcores_config();
 818        set_def_peer_eth_addrs();
 819        set_default_fwd_ports_config();
 820}
 821
 822#ifndef RTE_EXEC_ENV_WINDOWS
 823/* extremely pessimistic estimation of memory required to create a mempool */
 824static int
 825calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
 826{
 827        unsigned int n_pages, mbuf_per_pg, leftover;
 828        uint64_t total_mem, mbuf_mem, obj_sz;
 829
 830        /* there is no good way to predict how much space the mempool will
 831         * occupy because it will allocate chunks on the fly, and some of those
 832         * will come from default DPDK memory while some will come from our
 833         * external memory, so just assume 128MB will be enough for everyone.
 834         */
 835        uint64_t hdr_mem = 128 << 20;
 836
 837        /* account for possible non-contiguousness */
 838        obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
 839        if (obj_sz > pgsz) {
 840                TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
 841                return -1;
 842        }
 843
 844        mbuf_per_pg = pgsz / obj_sz;
 845        leftover = (nb_mbufs % mbuf_per_pg) > 0;
 846        n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
 847
 848        mbuf_mem = n_pages * pgsz;
 849
 850        total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
 851
 852        if (total_mem > SIZE_MAX) {
 853                TESTPMD_LOG(ERR, "Memory size too big\n");
 854                return -1;
 855        }
 856        *out = (size_t)total_mem;
 857
 858        return 0;
 859}
 860
 861static int
 862pagesz_flags(uint64_t page_sz)
 863{
 864        /* as per mmap() manpage, all page sizes are log2 of page size
 865         * shifted by MAP_HUGE_SHIFT
 866         */
 867        int log2 = rte_log2_u64(page_sz);
 868
 869        return (log2 << HUGE_SHIFT);
 870}
 871
 872static void *
 873alloc_mem(size_t memsz, size_t pgsz, bool huge)
 874{
 875        void *addr;
 876        int flags;
 877
 878        /* allocate anonymous hugepages */
 879        flags = MAP_ANONYMOUS | MAP_PRIVATE;
 880        if (huge)
 881                flags |= HUGE_FLAG | pagesz_flags(pgsz);
 882
 883        addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
 884        if (addr == MAP_FAILED)
 885                return NULL;
 886
 887        return addr;
 888}
 889
 890struct extmem_param {
 891        void *addr;
 892        size_t len;
 893        size_t pgsz;
 894        rte_iova_t *iova_table;
 895        unsigned int iova_table_len;
 896};
 897
 898static int
 899create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
 900                bool huge)
 901{
 902        uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
 903                        RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
 904        unsigned int cur_page, n_pages, pgsz_idx;
 905        size_t mem_sz, cur_pgsz;
 906        rte_iova_t *iovas = NULL;
 907        void *addr;
 908        int ret;
 909
 910        for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
 911                /* skip anything that is too big */
 912                if (pgsizes[pgsz_idx] > SIZE_MAX)
 913                        continue;
 914
 915                cur_pgsz = pgsizes[pgsz_idx];
 916
 917                /* if we were told not to allocate hugepages, override */
 918                if (!huge)
 919                        cur_pgsz = sysconf(_SC_PAGESIZE);
 920
 921                ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
 922                if (ret < 0) {
 923                        TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
 924                        return -1;
 925                }
 926
 927                /* allocate our memory */
 928                addr = alloc_mem(mem_sz, cur_pgsz, huge);
 929
 930                /* if we couldn't allocate memory with a specified page size,
 931                 * that doesn't mean we can't do it with other page sizes, so
 932                 * try another one.
 933                 */
 934                if (addr == NULL)
 935                        continue;
 936
 937                /* store IOVA addresses for every page in this memory area */
 938                n_pages = mem_sz / cur_pgsz;
 939
 940                iovas = malloc(sizeof(*iovas) * n_pages);
 941
 942                if (iovas == NULL) {
 943                        TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
 944                        goto fail;
 945                }
 946                /* lock memory if it's not huge pages */
 947                if (!huge)
 948                        mlock(addr, mem_sz);
 949
 950                /* populate IOVA addresses */
 951                for (cur_page = 0; cur_page < n_pages; cur_page++) {
 952                        rte_iova_t iova;
 953                        size_t offset;
 954                        void *cur;
 955
 956                        offset = cur_pgsz * cur_page;
 957                        cur = RTE_PTR_ADD(addr, offset);
 958
 959                        /* touch the page before getting its IOVA */
 960                        *(volatile char *)cur = 0;
 961
 962                        iova = rte_mem_virt2iova(cur);
 963
 964                        iovas[cur_page] = iova;
 965                }
 966
 967                break;
 968        }
 969        /* if we couldn't allocate anything */
 970        if (iovas == NULL)
 971                return -1;
 972
 973        param->addr = addr;
 974        param->len = mem_sz;
 975        param->pgsz = cur_pgsz;
 976        param->iova_table = iovas;
 977        param->iova_table_len = n_pages;
 978
 979        return 0;
 980fail:
 981        free(iovas);
 982        if (addr)
 983                munmap(addr, mem_sz);
 984
 985        return -1;
 986}
 987
 988static int
 989setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
 990{
 991        struct extmem_param param;
 992        int socket_id, ret;
 993
 994        memset(&param, 0, sizeof(param));
 995
 996        /* check if our heap exists */
 997        socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
 998        if (socket_id < 0) {
 999                /* create our heap */
1000                ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
1001                if (ret < 0) {
1002                        TESTPMD_LOG(ERR, "Cannot create heap\n");
1003                        return -1;
1004                }
1005        }
1006
1007        ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
1008        if (ret < 0) {
1009                TESTPMD_LOG(ERR, "Cannot create memory area\n");
1010                return -1;
1011        }
1012
1013        /* we now have a valid memory area, so add it to heap */
1014        ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1015                        param.addr, param.len, param.iova_table,
1016                        param.iova_table_len, param.pgsz);
1017
1018        /* when using VFIO, memory is automatically mapped for DMA by EAL */
1019
1020        /* not needed any more */
1021        free(param.iova_table);
1022
1023        if (ret < 0) {
1024                TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1025                munmap(param.addr, param.len);
1026                return -1;
1027        }
1028
1029        /* success */
1030
1031        TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1032                        param.len >> 20);
1033
1034        return 0;
1035}
1036static void
1037dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1038             struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1039{
1040        uint16_t pid = 0;
1041        int ret;
1042
1043        RTE_ETH_FOREACH_DEV(pid) {
1044                struct rte_eth_dev_info dev_info;
1045
1046                ret = eth_dev_info_get_print_err(pid, &dev_info);
1047                if (ret != 0) {
1048                        TESTPMD_LOG(DEBUG,
1049                                    "unable to get device info for port %d on addr 0x%p,"
1050                                    "mempool unmapping will not be performed\n",
1051                                    pid, memhdr->addr);
1052                        continue;
1053                }
1054
1055                ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
1056                if (ret) {
1057                        TESTPMD_LOG(DEBUG,
1058                                    "unable to DMA unmap addr 0x%p "
1059                                    "for device %s\n",
1060                                    memhdr->addr, dev_info.device->name);
1061                }
1062        }
1063        ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1064        if (ret) {
1065                TESTPMD_LOG(DEBUG,
1066                            "unable to un-register addr 0x%p\n", memhdr->addr);
1067        }
1068}
1069
1070static void
1071dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1072           struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1073{
1074        uint16_t pid = 0;
1075        size_t page_size = sysconf(_SC_PAGESIZE);
1076        int ret;
1077
1078        ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1079                                  page_size);
1080        if (ret) {
1081                TESTPMD_LOG(DEBUG,
1082                            "unable to register addr 0x%p\n", memhdr->addr);
1083                return;
1084        }
1085        RTE_ETH_FOREACH_DEV(pid) {
1086                struct rte_eth_dev_info dev_info;
1087
1088                ret = eth_dev_info_get_print_err(pid, &dev_info);
1089                if (ret != 0) {
1090                        TESTPMD_LOG(DEBUG,
1091                                    "unable to get device info for port %d on addr 0x%p,"
1092                                    "mempool mapping will not be performed\n",
1093                                    pid, memhdr->addr);
1094                        continue;
1095                }
1096                ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1097                if (ret) {
1098                        TESTPMD_LOG(DEBUG,
1099                                    "unable to DMA map addr 0x%p "
1100                                    "for device %s\n",
1101                                    memhdr->addr, dev_info.device->name);
1102                }
1103        }
1104}
1105#endif
1106
1107static unsigned int
1108setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1109            char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1110{
1111        struct rte_pktmbuf_extmem *xmem;
1112        unsigned int ext_num, zone_num, elt_num;
1113        uint16_t elt_size;
1114
1115        elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1116        elt_num = EXTBUF_ZONE_SIZE / elt_size;
1117        zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1118
1119        xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1120        if (xmem == NULL) {
1121                TESTPMD_LOG(ERR, "Cannot allocate memory for "
1122                                 "external buffer descriptors\n");
1123                *ext_mem = NULL;
1124                return 0;
1125        }
1126        for (ext_num = 0; ext_num < zone_num; ext_num++) {
1127                struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1128                const struct rte_memzone *mz;
1129                char mz_name[RTE_MEMZONE_NAMESIZE];
1130                int ret;
1131
1132                ret = snprintf(mz_name, sizeof(mz_name),
1133                        RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1134                if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1135                        errno = ENAMETOOLONG;
1136                        ext_num = 0;
1137                        break;
1138                }
1139                mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1140                                         socket_id,
1141                                         RTE_MEMZONE_IOVA_CONTIG |
1142                                         RTE_MEMZONE_1GB |
1143                                         RTE_MEMZONE_SIZE_HINT_ONLY);
1144                if (mz == NULL) {
1145                        /*
1146                         * The caller exits on external buffer creation
1147                         * error, so there is no need to free memzones.
1148                         */
1149                        errno = ENOMEM;
1150                        ext_num = 0;
1151                        break;
1152                }
1153                xseg->buf_ptr = mz->addr;
1154                xseg->buf_iova = mz->iova;
1155                xseg->buf_len = EXTBUF_ZONE_SIZE;
1156                xseg->elt_size = elt_size;
1157        }
1158        if (ext_num == 0 && xmem != NULL) {
1159                free(xmem);
1160                xmem = NULL;
1161        }
1162        *ext_mem = xmem;
1163        return ext_num;
1164}
1165
1166/*
1167 * Configuration initialisation done once at init time.
1168 */
1169static struct rte_mempool *
1170mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1171                 unsigned int socket_id, uint16_t size_idx)
1172{
1173        char pool_name[RTE_MEMPOOL_NAMESIZE];
1174        struct rte_mempool *rte_mp = NULL;
1175#ifndef RTE_EXEC_ENV_WINDOWS
1176        uint32_t mb_size;
1177
1178        mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1179#endif
1180        mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1181        if (!is_proc_primary()) {
1182                rte_mp = rte_mempool_lookup(pool_name);
1183                if (rte_mp == NULL)
1184                        rte_exit(EXIT_FAILURE,
1185                                "Get mbuf pool for socket %u failed: %s\n",
1186                                socket_id, rte_strerror(rte_errno));
1187                return rte_mp;
1188        }
1189
1190        TESTPMD_LOG(INFO,
1191                "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1192                pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1193
1194        switch (mp_alloc_type) {
1195        case MP_ALLOC_NATIVE:
1196                {
1197                        /* wrapper to rte_mempool_create() */
1198                        TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1199                                        rte_mbuf_best_mempool_ops());
1200                        rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1201                                mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1202                        break;
1203                }
1204#ifndef RTE_EXEC_ENV_WINDOWS
1205        case MP_ALLOC_ANON:
1206                {
1207                        rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1208                                mb_size, (unsigned int) mb_mempool_cache,
1209                                sizeof(struct rte_pktmbuf_pool_private),
1210                                socket_id, mempool_flags);
1211                        if (rte_mp == NULL)
1212                                goto err;
1213
1214                        if (rte_mempool_populate_anon(rte_mp) == 0) {
1215                                rte_mempool_free(rte_mp);
1216                                rte_mp = NULL;
1217                                goto err;
1218                        }
1219                        rte_pktmbuf_pool_init(rte_mp, NULL);
1220                        rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1221                        rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1222                        break;
1223                }
1224        case MP_ALLOC_XMEM:
1225        case MP_ALLOC_XMEM_HUGE:
1226                {
1227                        int heap_socket;
1228                        bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1229
1230                        if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1231                                rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1232
1233                        heap_socket =
1234                                rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1235                        if (heap_socket < 0)
1236                                rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1237
1238                        TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1239                                        rte_mbuf_best_mempool_ops());
1240                        rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1241                                        mb_mempool_cache, 0, mbuf_seg_size,
1242                                        heap_socket);
1243                        break;
1244                }
1245#endif
1246        case MP_ALLOC_XBUF:
1247                {
1248                        struct rte_pktmbuf_extmem *ext_mem;
1249                        unsigned int ext_num;
1250
1251                        ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1252                                               socket_id, pool_name, &ext_mem);
1253                        if (ext_num == 0)
1254                                rte_exit(EXIT_FAILURE,
1255                                         "Can't create pinned data buffers\n");
1256
1257                        TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1258                                        rte_mbuf_best_mempool_ops());
1259                        rte_mp = rte_pktmbuf_pool_create_extbuf
1260                                        (pool_name, nb_mbuf, mb_mempool_cache,
1261                                         0, mbuf_seg_size, socket_id,
1262                                         ext_mem, ext_num);
1263                        free(ext_mem);
1264                        break;
1265                }
1266        default:
1267                {
1268                        rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1269                }
1270        }
1271
1272#ifndef RTE_EXEC_ENV_WINDOWS
1273err:
1274#endif
1275        if (rte_mp == NULL) {
1276                rte_exit(EXIT_FAILURE,
1277                        "Creation of mbuf pool for socket %u failed: %s\n",
1278                        socket_id, rte_strerror(rte_errno));
1279        } else if (verbose_level > 0) {
1280                rte_mempool_dump(stdout, rte_mp);
1281        }
1282        return rte_mp;
1283}
1284
1285/*
1286 * Check given socket id is valid or not with NUMA mode,
1287 * if valid, return 0, else return -1
1288 */
1289static int
1290check_socket_id(const unsigned int socket_id)
1291{
1292        static int warning_once = 0;
1293
1294        if (new_socket_id(socket_id)) {
1295                if (!warning_once && numa_support)
1296                        fprintf(stderr,
1297                                "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1298                warning_once = 1;
1299                return -1;
1300        }
1301        return 0;
1302}
1303
1304/*
1305 * Get the allowed maximum number of RX queues.
1306 * *pid return the port id which has minimal value of
1307 * max_rx_queues in all ports.
1308 */
1309queueid_t
1310get_allowed_max_nb_rxq(portid_t *pid)
1311{
1312        queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1313        bool max_rxq_valid = false;
1314        portid_t pi;
1315        struct rte_eth_dev_info dev_info;
1316
1317        RTE_ETH_FOREACH_DEV(pi) {
1318                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1319                        continue;
1320
1321                max_rxq_valid = true;
1322                if (dev_info.max_rx_queues < allowed_max_rxq) {
1323                        allowed_max_rxq = dev_info.max_rx_queues;
1324                        *pid = pi;
1325                }
1326        }
1327        return max_rxq_valid ? allowed_max_rxq : 0;
1328}
1329
1330/*
1331 * Check input rxq is valid or not.
1332 * If input rxq is not greater than any of maximum number
1333 * of RX queues of all ports, it is valid.
1334 * if valid, return 0, else return -1
1335 */
1336int
1337check_nb_rxq(queueid_t rxq)
1338{
1339        queueid_t allowed_max_rxq;
1340        portid_t pid = 0;
1341
1342        allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1343        if (rxq > allowed_max_rxq) {
1344                fprintf(stderr,
1345                        "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1346                        rxq, allowed_max_rxq, pid);
1347                return -1;
1348        }
1349        return 0;
1350}
1351
1352/*
1353 * Get the allowed maximum number of TX queues.
1354 * *pid return the port id which has minimal value of
1355 * max_tx_queues in all ports.
1356 */
1357queueid_t
1358get_allowed_max_nb_txq(portid_t *pid)
1359{
1360        queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1361        bool max_txq_valid = false;
1362        portid_t pi;
1363        struct rte_eth_dev_info dev_info;
1364
1365        RTE_ETH_FOREACH_DEV(pi) {
1366                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1367                        continue;
1368
1369                max_txq_valid = true;
1370                if (dev_info.max_tx_queues < allowed_max_txq) {
1371                        allowed_max_txq = dev_info.max_tx_queues;
1372                        *pid = pi;
1373                }
1374        }
1375        return max_txq_valid ? allowed_max_txq : 0;
1376}
1377
1378/*
1379 * Check input txq is valid or not.
1380 * If input txq is not greater than any of maximum number
1381 * of TX queues of all ports, it is valid.
1382 * if valid, return 0, else return -1
1383 */
1384int
1385check_nb_txq(queueid_t txq)
1386{
1387        queueid_t allowed_max_txq;
1388        portid_t pid = 0;
1389
1390        allowed_max_txq = get_allowed_max_nb_txq(&pid);
1391        if (txq > allowed_max_txq) {
1392                fprintf(stderr,
1393                        "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1394                        txq, allowed_max_txq, pid);
1395                return -1;
1396        }
1397        return 0;
1398}
1399
1400/*
1401 * Get the allowed maximum number of RXDs of every rx queue.
1402 * *pid return the port id which has minimal value of
1403 * max_rxd in all queues of all ports.
1404 */
1405static uint16_t
1406get_allowed_max_nb_rxd(portid_t *pid)
1407{
1408        uint16_t allowed_max_rxd = UINT16_MAX;
1409        portid_t pi;
1410        struct rte_eth_dev_info dev_info;
1411
1412        RTE_ETH_FOREACH_DEV(pi) {
1413                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1414                        continue;
1415
1416                if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1417                        allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1418                        *pid = pi;
1419                }
1420        }
1421        return allowed_max_rxd;
1422}
1423
1424/*
1425 * Get the allowed minimal number of RXDs of every rx queue.
1426 * *pid return the port id which has minimal value of
1427 * min_rxd in all queues of all ports.
1428 */
1429static uint16_t
1430get_allowed_min_nb_rxd(portid_t *pid)
1431{
1432        uint16_t allowed_min_rxd = 0;
1433        portid_t pi;
1434        struct rte_eth_dev_info dev_info;
1435
1436        RTE_ETH_FOREACH_DEV(pi) {
1437                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1438                        continue;
1439
1440                if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1441                        allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1442                        *pid = pi;
1443                }
1444        }
1445
1446        return allowed_min_rxd;
1447}
1448
1449/*
1450 * Check input rxd is valid or not.
1451 * If input rxd is not greater than any of maximum number
1452 * of RXDs of every Rx queues and is not less than any of
1453 * minimal number of RXDs of every Rx queues, it is valid.
1454 * if valid, return 0, else return -1
1455 */
1456int
1457check_nb_rxd(queueid_t rxd)
1458{
1459        uint16_t allowed_max_rxd;
1460        uint16_t allowed_min_rxd;
1461        portid_t pid = 0;
1462
1463        allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1464        if (rxd > allowed_max_rxd) {
1465                fprintf(stderr,
1466                        "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1467                        rxd, allowed_max_rxd, pid);
1468                return -1;
1469        }
1470
1471        allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1472        if (rxd < allowed_min_rxd) {
1473                fprintf(stderr,
1474                        "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1475                        rxd, allowed_min_rxd, pid);
1476                return -1;
1477        }
1478
1479        return 0;
1480}
1481
1482/*
1483 * Get the allowed maximum number of TXDs of every rx queues.
1484 * *pid return the port id which has minimal value of
1485 * max_txd in every tx queue.
1486 */
1487static uint16_t
1488get_allowed_max_nb_txd(portid_t *pid)
1489{
1490        uint16_t allowed_max_txd = UINT16_MAX;
1491        portid_t pi;
1492        struct rte_eth_dev_info dev_info;
1493
1494        RTE_ETH_FOREACH_DEV(pi) {
1495                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1496                        continue;
1497
1498                if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1499                        allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1500                        *pid = pi;
1501                }
1502        }
1503        return allowed_max_txd;
1504}
1505
1506/*
1507 * Get the allowed maximum number of TXDs of every tx queues.
1508 * *pid return the port id which has minimal value of
1509 * min_txd in every tx queue.
1510 */
1511static uint16_t
1512get_allowed_min_nb_txd(portid_t *pid)
1513{
1514        uint16_t allowed_min_txd = 0;
1515        portid_t pi;
1516        struct rte_eth_dev_info dev_info;
1517
1518        RTE_ETH_FOREACH_DEV(pi) {
1519                if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1520                        continue;
1521
1522                if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1523                        allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1524                        *pid = pi;
1525                }
1526        }
1527
1528        return allowed_min_txd;
1529}
1530
1531/*
1532 * Check input txd is valid or not.
1533 * If input txd is not greater than any of maximum number
1534 * of TXDs of every Rx queues, it is valid.
1535 * if valid, return 0, else return -1
1536 */
1537int
1538check_nb_txd(queueid_t txd)
1539{
1540        uint16_t allowed_max_txd;
1541        uint16_t allowed_min_txd;
1542        portid_t pid = 0;
1543
1544        allowed_max_txd = get_allowed_max_nb_txd(&pid);
1545        if (txd > allowed_max_txd) {
1546                fprintf(stderr,
1547                        "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1548                        txd, allowed_max_txd, pid);
1549                return -1;
1550        }
1551
1552        allowed_min_txd = get_allowed_min_nb_txd(&pid);
1553        if (txd < allowed_min_txd) {
1554                fprintf(stderr,
1555                        "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1556                        txd, allowed_min_txd, pid);
1557                return -1;
1558        }
1559        return 0;
1560}
1561
1562
1563/*
1564 * Get the allowed maximum number of hairpin queues.
1565 * *pid return the port id which has minimal value of
1566 * max_hairpin_queues in all ports.
1567 */
1568queueid_t
1569get_allowed_max_nb_hairpinq(portid_t *pid)
1570{
1571        queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1572        portid_t pi;
1573        struct rte_eth_hairpin_cap cap;
1574
1575        RTE_ETH_FOREACH_DEV(pi) {
1576                if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1577                        *pid = pi;
1578                        return 0;
1579                }
1580                if (cap.max_nb_queues < allowed_max_hairpinq) {
1581                        allowed_max_hairpinq = cap.max_nb_queues;
1582                        *pid = pi;
1583                }
1584        }
1585        return allowed_max_hairpinq;
1586}
1587
1588/*
1589 * Check input hairpin is valid or not.
1590 * If input hairpin is not greater than any of maximum number
1591 * of hairpin queues of all ports, it is valid.
1592 * if valid, return 0, else return -1
1593 */
1594int
1595check_nb_hairpinq(queueid_t hairpinq)
1596{
1597        queueid_t allowed_max_hairpinq;
1598        portid_t pid = 0;
1599
1600        allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1601        if (hairpinq > allowed_max_hairpinq) {
1602                fprintf(stderr,
1603                        "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1604                        hairpinq, allowed_max_hairpinq, pid);
1605                return -1;
1606        }
1607        return 0;
1608}
1609
1610static int
1611get_eth_overhead(struct rte_eth_dev_info *dev_info)
1612{
1613        uint32_t eth_overhead;
1614
1615        if (dev_info->max_mtu != UINT16_MAX &&
1616            dev_info->max_rx_pktlen > dev_info->max_mtu)
1617                eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1618        else
1619                eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1620
1621        return eth_overhead;
1622}
1623
1624static void
1625init_config_port_offloads(portid_t pid, uint32_t socket_id)
1626{
1627        struct rte_port *port = &ports[pid];
1628        int ret;
1629        int i;
1630
1631        eth_rx_metadata_negotiate_mp(pid);
1632
1633        port->dev_conf.txmode = tx_mode;
1634        port->dev_conf.rxmode = rx_mode;
1635
1636        ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1637        if (ret != 0)
1638                rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1639
1640        if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1641                port->dev_conf.txmode.offloads &=
1642                        ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1643
1644        /* Apply Rx offloads configuration */
1645        for (i = 0; i < port->dev_info.max_rx_queues; i++)
1646                port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1647        /* Apply Tx offloads configuration */
1648        for (i = 0; i < port->dev_info.max_tx_queues; i++)
1649                port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1650
1651        if (eth_link_speed)
1652                port->dev_conf.link_speeds = eth_link_speed;
1653
1654        if (max_rx_pkt_len)
1655                port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1656                        get_eth_overhead(&port->dev_info);
1657
1658        /* set flag to initialize port/queue */
1659        port->need_reconfig = 1;
1660        port->need_reconfig_queues = 1;
1661        port->socket_id = socket_id;
1662        port->tx_metadata = 0;
1663
1664        /*
1665         * Check for maximum number of segments per MTU.
1666         * Accordingly update the mbuf data size.
1667         */
1668        if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1669            port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1670                uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1671                uint16_t mtu;
1672
1673                if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1674                        uint16_t data_size = (mtu + eth_overhead) /
1675                                port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1676                        uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1677
1678                        if (buffer_size > mbuf_data_size[0]) {
1679                                mbuf_data_size[0] = buffer_size;
1680                                TESTPMD_LOG(WARNING,
1681                                        "Configured mbuf size of the first segment %hu\n",
1682                                        mbuf_data_size[0]);
1683                        }
1684                }
1685        }
1686}
1687
1688static void
1689init_config(void)
1690{
1691        portid_t pid;
1692        struct rte_mempool *mbp;
1693        unsigned int nb_mbuf_per_pool;
1694        lcoreid_t  lc_id;
1695#ifdef RTE_LIB_GRO
1696        struct rte_gro_param gro_param;
1697#endif
1698#ifdef RTE_LIB_GSO
1699        uint32_t gso_types;
1700#endif
1701
1702        /* Configuration of logical cores. */
1703        fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1704                                sizeof(struct fwd_lcore *) * nb_lcores,
1705                                RTE_CACHE_LINE_SIZE);
1706        if (fwd_lcores == NULL) {
1707                rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1708                                                        "failed\n", nb_lcores);
1709        }
1710        for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1711                fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1712                                               sizeof(struct fwd_lcore),
1713                                               RTE_CACHE_LINE_SIZE);
1714                if (fwd_lcores[lc_id] == NULL) {
1715                        rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1716                                                                "failed\n");
1717                }
1718                fwd_lcores[lc_id]->cpuid_idx = lc_id;
1719        }
1720
1721        RTE_ETH_FOREACH_DEV(pid) {
1722                uint32_t socket_id;
1723
1724                if (numa_support) {
1725                        socket_id = port_numa[pid];
1726                        if (port_numa[pid] == NUMA_NO_CONFIG) {
1727                                socket_id = rte_eth_dev_socket_id(pid);
1728
1729                                /*
1730                                 * if socket_id is invalid,
1731                                 * set to the first available socket.
1732                                 */
1733                                if (check_socket_id(socket_id) < 0)
1734                                        socket_id = socket_ids[0];
1735                        }
1736                } else {
1737                        socket_id = (socket_num == UMA_NO_CONFIG) ?
1738                                    0 : socket_num;
1739                }
1740                /* Apply default TxRx configuration for all ports */
1741                init_config_port_offloads(pid, socket_id);
1742        }
1743        /*
1744         * Create pools of mbuf.
1745         * If NUMA support is disabled, create a single pool of mbuf in
1746         * socket 0 memory by default.
1747         * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1748         *
1749         * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1750         * nb_txd can be configured at run time.
1751         */
1752        if (param_total_num_mbufs)
1753                nb_mbuf_per_pool = param_total_num_mbufs;
1754        else {
1755                nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1756                        (nb_lcores * mb_mempool_cache) +
1757                        RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1758                nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1759        }
1760
1761        if (numa_support) {
1762                uint8_t i, j;
1763
1764                for (i = 0; i < num_sockets; i++)
1765                        for (j = 0; j < mbuf_data_size_n; j++)
1766                                mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1767                                        mbuf_pool_create(mbuf_data_size[j],
1768                                                          nb_mbuf_per_pool,
1769                                                          socket_ids[i], j);
1770        } else {
1771                uint8_t i;
1772
1773                for (i = 0; i < mbuf_data_size_n; i++)
1774                        mempools[i] = mbuf_pool_create
1775                                        (mbuf_data_size[i],
1776                                         nb_mbuf_per_pool,
1777                                         socket_num == UMA_NO_CONFIG ?
1778                                         0 : socket_num, i);
1779        }
1780
1781        init_port_config();
1782
1783#ifdef RTE_LIB_GSO
1784        gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1785                RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1786#endif
1787        /*
1788         * Records which Mbuf pool to use by each logical core, if needed.
1789         */
1790        for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1791                mbp = mbuf_pool_find(
1792                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1793
1794                if (mbp == NULL)
1795                        mbp = mbuf_pool_find(0, 0);
1796                fwd_lcores[lc_id]->mbp = mbp;
1797#ifdef RTE_LIB_GSO
1798                /* initialize GSO context */
1799                fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1800                fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1801                fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1802                fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1803                        RTE_ETHER_CRC_LEN;
1804                fwd_lcores[lc_id]->gso_ctx.flag = 0;
1805#endif
1806        }
1807
1808        fwd_config_setup();
1809
1810#ifdef RTE_LIB_GRO
1811        /* create a gro context for each lcore */
1812        gro_param.gro_types = RTE_GRO_TCP_IPV4;
1813        gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1814        gro_param.max_item_per_flow = MAX_PKT_BURST;
1815        for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1816                gro_param.socket_id = rte_lcore_to_socket_id(
1817                                fwd_lcores_cpuids[lc_id]);
1818                fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1819                if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1820                        rte_exit(EXIT_FAILURE,
1821                                        "rte_gro_ctx_create() failed\n");
1822                }
1823        }
1824#endif
1825}
1826
1827
1828void
1829reconfig(portid_t new_port_id, unsigned socket_id)
1830{
1831        /* Reconfiguration of Ethernet ports. */
1832        init_config_port_offloads(new_port_id, socket_id);
1833        init_port_config();
1834}
1835
1836int
1837init_fwd_streams(void)
1838{
1839        portid_t pid;
1840        struct rte_port *port;
1841        streamid_t sm_id, nb_fwd_streams_new;
1842        queueid_t q;
1843
1844        /* set socket id according to numa or not */
1845        RTE_ETH_FOREACH_DEV(pid) {
1846                port = &ports[pid];
1847                if (nb_rxq > port->dev_info.max_rx_queues) {
1848                        fprintf(stderr,
1849                                "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1850                                nb_rxq, port->dev_info.max_rx_queues);
1851                        return -1;
1852                }
1853                if (nb_txq > port->dev_info.max_tx_queues) {
1854                        fprintf(stderr,
1855                                "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1856                                nb_txq, port->dev_info.max_tx_queues);
1857                        return -1;
1858                }
1859                if (numa_support) {
1860                        if (port_numa[pid] != NUMA_NO_CONFIG)
1861                                port->socket_id = port_numa[pid];
1862                        else {
1863                                port->socket_id = rte_eth_dev_socket_id(pid);
1864
1865                                /*
1866                                 * if socket_id is invalid,
1867                                 * set to the first available socket.
1868                                 */
1869                                if (check_socket_id(port->socket_id) < 0)
1870                                        port->socket_id = socket_ids[0];
1871                        }
1872                }
1873                else {
1874                        if (socket_num == UMA_NO_CONFIG)
1875                                port->socket_id = 0;
1876                        else
1877                                port->socket_id = socket_num;
1878                }
1879        }
1880
1881        q = RTE_MAX(nb_rxq, nb_txq);
1882        if (q == 0) {
1883                fprintf(stderr,
1884                        "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1885                return -1;
1886        }
1887        nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1888        if (nb_fwd_streams_new == nb_fwd_streams)
1889                return 0;
1890        /* clear the old */
1891        if (fwd_streams != NULL) {
1892                for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1893                        if (fwd_streams[sm_id] == NULL)
1894                                continue;
1895                        rte_free(fwd_streams[sm_id]);
1896                        fwd_streams[sm_id] = NULL;
1897                }
1898                rte_free(fwd_streams);
1899                fwd_streams = NULL;
1900        }
1901
1902        /* init new */
1903        nb_fwd_streams = nb_fwd_streams_new;
1904        if (nb_fwd_streams) {
1905                fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1906                        sizeof(struct fwd_stream *) * nb_fwd_streams,
1907                        RTE_CACHE_LINE_SIZE);
1908                if (fwd_streams == NULL)
1909                        rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1910                                 " (struct fwd_stream *)) failed\n",
1911                                 nb_fwd_streams);
1912
1913                for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1914                        fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1915                                " struct fwd_stream", sizeof(struct fwd_stream),
1916                                RTE_CACHE_LINE_SIZE);
1917                        if (fwd_streams[sm_id] == NULL)
1918                                rte_exit(EXIT_FAILURE, "rte_zmalloc"
1919                                         "(struct fwd_stream) failed\n");
1920                }
1921        }
1922
1923        return 0;
1924}
1925
1926static void
1927pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1928{
1929        uint64_t total_burst, sburst;
1930        uint64_t nb_burst;
1931        uint64_t burst_stats[4];
1932        uint16_t pktnb_stats[4];
1933        uint16_t nb_pkt;
1934        int burst_percent[4], sburstp;
1935        int i;
1936
1937        /*
1938         * First compute the total number of packet bursts and the
1939         * two highest numbers of bursts of the same number of packets.
1940         */
1941        memset(&burst_stats, 0x0, sizeof(burst_stats));
1942        memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1943
1944        /* Show stats for 0 burst size always */
1945        total_burst = pbs->pkt_burst_spread[0];
1946        burst_stats[0] = pbs->pkt_burst_spread[0];
1947        pktnb_stats[0] = 0;
1948
1949        /* Find the next 2 burst sizes with highest occurrences. */
1950        for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1951                nb_burst = pbs->pkt_burst_spread[nb_pkt];
1952
1953                if (nb_burst == 0)
1954                        continue;
1955
1956                total_burst += nb_burst;
1957
1958                if (nb_burst > burst_stats[1]) {
1959                        burst_stats[2] = burst_stats[1];
1960                        pktnb_stats[2] = pktnb_stats[1];
1961                        burst_stats[1] = nb_burst;
1962                        pktnb_stats[1] = nb_pkt;
1963                } else if (nb_burst > burst_stats[2]) {
1964                        burst_stats[2] = nb_burst;
1965                        pktnb_stats[2] = nb_pkt;
1966                }
1967        }
1968        if (total_burst == 0)
1969                return;
1970
1971        printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1972        for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1973                if (i == 3) {
1974                        printf("%d%% of other]\n", 100 - sburstp);
1975                        return;
1976                }
1977
1978                sburst += burst_stats[i];
1979                if (sburst == total_burst) {
1980                        printf("%d%% of %d pkts]\n",
1981                                100 - sburstp, (int) pktnb_stats[i]);
1982                        return;
1983                }
1984
1985                burst_percent[i] =
1986                        (double)burst_stats[i] / total_burst * 100;
1987                printf("%d%% of %d pkts + ",
1988                        burst_percent[i], (int) pktnb_stats[i]);
1989                sburstp += burst_percent[i];
1990        }
1991}
1992
1993static void
1994fwd_stream_stats_display(streamid_t stream_id)
1995{
1996        struct fwd_stream *fs;
1997        static const char *fwd_top_stats_border = "-------";
1998
1999        fs = fwd_streams[stream_id];
2000        if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
2001            (fs->fwd_dropped == 0))
2002                return;
2003        printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2004               "TX Port=%2d/Queue=%2d %s\n",
2005               fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2006               fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2007        printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2008               " TX-dropped: %-14"PRIu64,
2009               fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2010
2011        /* if checksum mode */
2012        if (cur_fwd_eng == &csum_fwd_engine) {
2013                printf("  RX- bad IP checksum: %-14"PRIu64
2014                       "  Rx- bad L4 checksum: %-14"PRIu64
2015                       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
2016                        fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
2017                        fs->rx_bad_outer_l4_csum);
2018                printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2019                        fs->rx_bad_outer_ip_csum);
2020        } else {
2021                printf("\n");
2022        }
2023
2024        if (record_burst_stats) {
2025                pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2026                pkt_burst_stats_display("TX", &fs->tx_burst_stats);
2027        }
2028}
2029
2030void
2031fwd_stats_display(void)
2032{
2033        static const char *fwd_stats_border = "----------------------";
2034        static const char *acc_stats_border = "+++++++++++++++";
2035        struct {
2036                struct fwd_stream *rx_stream;
2037                struct fwd_stream *tx_stream;
2038                uint64_t tx_dropped;
2039                uint64_t rx_bad_ip_csum;
2040                uint64_t rx_bad_l4_csum;
2041                uint64_t rx_bad_outer_l4_csum;
2042                uint64_t rx_bad_outer_ip_csum;
2043        } ports_stats[RTE_MAX_ETHPORTS];
2044        uint64_t total_rx_dropped = 0;
2045        uint64_t total_tx_dropped = 0;
2046        uint64_t total_rx_nombuf = 0;
2047        struct rte_eth_stats stats;
2048        uint64_t fwd_cycles = 0;
2049        uint64_t total_recv = 0;
2050        uint64_t total_xmit = 0;
2051        struct rte_port *port;
2052        streamid_t sm_id;
2053        portid_t pt_id;
2054        int ret;
2055        int i;
2056
2057        memset(ports_stats, 0, sizeof(ports_stats));
2058
2059        for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2060                struct fwd_stream *fs = fwd_streams[sm_id];
2061
2062                if (cur_fwd_config.nb_fwd_streams >
2063                    cur_fwd_config.nb_fwd_ports) {
2064                        fwd_stream_stats_display(sm_id);
2065                } else {
2066                        ports_stats[fs->tx_port].tx_stream = fs;
2067                        ports_stats[fs->rx_port].rx_stream = fs;
2068                }
2069
2070                ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2071
2072                ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2073                ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2074                ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2075                                fs->rx_bad_outer_l4_csum;
2076                ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2077                                fs->rx_bad_outer_ip_csum;
2078
2079                if (record_core_cycles)
2080                        fwd_cycles += fs->core_cycles;
2081        }
2082        for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2083                pt_id = fwd_ports_ids[i];
2084                port = &ports[pt_id];
2085
2086                ret = rte_eth_stats_get(pt_id, &stats);
2087                if (ret != 0) {
2088                        fprintf(stderr,
2089                                "%s: Error: failed to get stats (port %u): %d",
2090                                __func__, pt_id, ret);
2091                        continue;
2092                }
2093                stats.ipackets -= port->stats.ipackets;
2094                stats.opackets -= port->stats.opackets;
2095                stats.ibytes -= port->stats.ibytes;
2096                stats.obytes -= port->stats.obytes;
2097                stats.imissed -= port->stats.imissed;
2098                stats.oerrors -= port->stats.oerrors;
2099                stats.rx_nombuf -= port->stats.rx_nombuf;
2100
2101                total_recv += stats.ipackets;
2102                total_xmit += stats.opackets;
2103                total_rx_dropped += stats.imissed;
2104                total_tx_dropped += ports_stats[pt_id].tx_dropped;
2105                total_tx_dropped += stats.oerrors;
2106                total_rx_nombuf  += stats.rx_nombuf;
2107
2108                printf("\n  %s Forward statistics for port %-2d %s\n",
2109                       fwd_stats_border, pt_id, fwd_stats_border);
2110
2111                printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2112                       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2113                       stats.ipackets + stats.imissed);
2114
2115                if (cur_fwd_eng == &csum_fwd_engine) {
2116                        printf("  Bad-ipcsum: %-14"PRIu64
2117                               " Bad-l4csum: %-14"PRIu64
2118                               "Bad-outer-l4csum: %-14"PRIu64"\n",
2119                               ports_stats[pt_id].rx_bad_ip_csum,
2120                               ports_stats[pt_id].rx_bad_l4_csum,
2121                               ports_stats[pt_id].rx_bad_outer_l4_csum);
2122                        printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2123                               ports_stats[pt_id].rx_bad_outer_ip_csum);
2124                }
2125                if (stats.ierrors + stats.rx_nombuf > 0) {
2126                        printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
2127                        printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2128                }
2129
2130                printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2131                       "TX-total: %-"PRIu64"\n",
2132                       stats.opackets, ports_stats[pt_id].tx_dropped,
2133                       stats.opackets + ports_stats[pt_id].tx_dropped);
2134
2135                if (record_burst_stats) {
2136                        if (ports_stats[pt_id].rx_stream)
2137                                pkt_burst_stats_display("RX",
2138                                        &ports_stats[pt_id].rx_stream->rx_burst_stats);
2139                        if (ports_stats[pt_id].tx_stream)
2140                                pkt_burst_stats_display("TX",
2141                                &ports_stats[pt_id].tx_stream->tx_burst_stats);
2142                }
2143
2144                printf("  %s--------------------------------%s\n",
2145                       fwd_stats_border, fwd_stats_border);
2146        }
2147
2148        printf("\n  %s Accumulated forward statistics for all ports"
2149               "%s\n",
2150               acc_stats_border, acc_stats_border);
2151        printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2152               "%-"PRIu64"\n"
2153               "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2154               "%-"PRIu64"\n",
2155               total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2156               total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2157        if (total_rx_nombuf > 0)
2158                printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2159        printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
2160               "%s\n",
2161               acc_stats_border, acc_stats_border);
2162        if (record_core_cycles) {
2163#define CYC_PER_MHZ 1E6
2164                if (total_recv > 0 || total_xmit > 0) {
2165                        uint64_t total_pkts = 0;
2166                        if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2167                            strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2168                                total_pkts = total_xmit;
2169                        else
2170                                total_pkts = total_recv;
2171
2172                        printf("\n  CPU cycles/packet=%.2F (total cycles="
2173                               "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2174                               " MHz Clock\n",
2175                               (double) fwd_cycles / total_pkts,
2176                               fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2177                               (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2178                }
2179        }
2180}
2181
2182void
2183fwd_stats_reset(void)
2184{
2185        streamid_t sm_id;
2186        portid_t pt_id;
2187        int ret;
2188        int i;
2189
2190        for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2191                pt_id = fwd_ports_ids[i];
2192                ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2193                if (ret != 0)
2194                        fprintf(stderr,
2195                                "%s: Error: failed to clear stats (port %u):%d",
2196                                __func__, pt_id, ret);
2197        }
2198        for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2199                struct fwd_stream *fs = fwd_streams[sm_id];
2200
2201                fs->rx_packets = 0;
2202                fs->tx_packets = 0;
2203                fs->fwd_dropped = 0;
2204                fs->rx_bad_ip_csum = 0;
2205                fs->rx_bad_l4_csum = 0;
2206                fs->rx_bad_outer_l4_csum = 0;
2207                fs->rx_bad_outer_ip_csum = 0;
2208
2209                memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2210                memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2211                fs->core_cycles = 0;
2212        }
2213}
2214
2215static void
2216flush_fwd_rx_queues(void)
2217{
2218        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2219        portid_t  rxp;
2220        portid_t port_id;
2221        queueid_t rxq;
2222        uint16_t  nb_rx;
2223        uint16_t  i;
2224        uint8_t   j;
2225        uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2226        uint64_t timer_period;
2227
2228        if (num_procs > 1) {
2229                printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2230                return;
2231        }
2232
2233        /* convert to number of cycles */
2234        timer_period = rte_get_timer_hz(); /* 1 second timeout */
2235
2236        for (j = 0; j < 2; j++) {
2237                for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2238                        for (rxq = 0; rxq < nb_rxq; rxq++) {
2239                                port_id = fwd_ports_ids[rxp];
2240
2241                                /* Polling stopped queues is prohibited. */
2242                                if (ports[port_id].rxq[rxq].state ==
2243                                    RTE_ETH_QUEUE_STATE_STOPPED)
2244                                        continue;
2245
2246                                /**
2247                                * testpmd can stuck in the below do while loop
2248                                * if rte_eth_rx_burst() always returns nonzero
2249                                * packets. So timer is added to exit this loop
2250                                * after 1sec timer expiry.
2251                                */
2252                                prev_tsc = rte_rdtsc();
2253                                do {
2254                                        nb_rx = rte_eth_rx_burst(port_id, rxq,
2255                                                pkts_burst, MAX_PKT_BURST);
2256                                        for (i = 0; i < nb_rx; i++)
2257                                                rte_pktmbuf_free(pkts_burst[i]);
2258
2259                                        cur_tsc = rte_rdtsc();
2260                                        diff_tsc = cur_tsc - prev_tsc;
2261                                        timer_tsc += diff_tsc;
2262                                } while ((nb_rx > 0) &&
2263                                        (timer_tsc < timer_period));
2264                                timer_tsc = 0;
2265                        }
2266                }
2267                rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2268        }
2269}
2270
2271static void
2272run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2273{
2274        struct fwd_stream **fsm;
2275        streamid_t nb_fs;
2276        streamid_t sm_id;
2277#ifdef RTE_LIB_BITRATESTATS
2278        uint64_t tics_per_1sec;
2279        uint64_t tics_datum;
2280        uint64_t tics_current;
2281        uint16_t i, cnt_ports;
2282
2283        cnt_ports = nb_ports;
2284        tics_datum = rte_rdtsc();
2285        tics_per_1sec = rte_get_timer_hz();
2286#endif
2287        fsm = &fwd_streams[fc->stream_idx];
2288        nb_fs = fc->stream_nb;
2289        do {
2290                for (sm_id = 0; sm_id < nb_fs; sm_id++)
2291                        if (!fsm[sm_id]->disabled)
2292                                (*pkt_fwd)(fsm[sm_id]);
2293#ifdef RTE_LIB_BITRATESTATS
2294                if (bitrate_enabled != 0 &&
2295                                bitrate_lcore_id == rte_lcore_id()) {
2296                        tics_current = rte_rdtsc();
2297                        if (tics_current - tics_datum >= tics_per_1sec) {
2298                                /* Periodic bitrate calculation */
2299                                for (i = 0; i < cnt_ports; i++)
2300                                        rte_stats_bitrate_calc(bitrate_data,
2301                                                ports_ids[i]);
2302                                tics_datum = tics_current;
2303                        }
2304                }
2305#endif
2306#ifdef RTE_LIB_LATENCYSTATS
2307                if (latencystats_enabled != 0 &&
2308                                latencystats_lcore_id == rte_lcore_id())
2309                        rte_latencystats_update();
2310#endif
2311
2312        } while (! fc->stopped);
2313}
2314
2315static int
2316start_pkt_forward_on_core(void *fwd_arg)
2317{
2318        run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2319                             cur_fwd_config.fwd_eng->packet_fwd);
2320        return 0;
2321}
2322
2323/*
2324 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2325 * Used to start communication flows in network loopback test configurations.
2326 */
2327static int
2328run_one_txonly_burst_on_core(void *fwd_arg)
2329{
2330        struct fwd_lcore *fwd_lc;
2331        struct fwd_lcore tmp_lcore;
2332
2333        fwd_lc = (struct fwd_lcore *) fwd_arg;
2334        tmp_lcore = *fwd_lc;
2335        tmp_lcore.stopped = 1;
2336        run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2337        return 0;
2338}
2339
2340/*
2341 * Launch packet forwarding:
2342 *     - Setup per-port forwarding context.
2343 *     - launch logical cores with their forwarding configuration.
2344 */
2345static void
2346launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2347{
2348        unsigned int i;
2349        unsigned int lc_id;
2350        int diag;
2351
2352        for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2353                lc_id = fwd_lcores_cpuids[i];
2354                if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2355                        fwd_lcores[i]->stopped = 0;
2356                        diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2357                                                     fwd_lcores[i], lc_id);
2358                        if (diag != 0)
2359                                fprintf(stderr,
2360                                        "launch lcore %u failed - diag=%d\n",
2361                                        lc_id, diag);
2362                }
2363        }
2364}
2365
2366/*
2367 * Launch packet forwarding configuration.
2368 */
2369void
2370start_packet_forwarding(int with_tx_first)
2371{
2372        port_fwd_begin_t port_fwd_begin;
2373        port_fwd_end_t  port_fwd_end;
2374        stream_init_t stream_init = cur_fwd_eng->stream_init;
2375        unsigned int i;
2376
2377        if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2378                rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2379
2380        if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2381                rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2382
2383        if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2384                strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2385                (!nb_rxq || !nb_txq))
2386                rte_exit(EXIT_FAILURE,
2387                        "Either rxq or txq are 0, cannot use %s fwd mode\n",
2388                        cur_fwd_eng->fwd_mode_name);
2389
2390        if (all_ports_started() == 0) {
2391                fprintf(stderr, "Not all ports were started\n");
2392                return;
2393        }
2394        if (test_done == 0) {
2395                fprintf(stderr, "Packet forwarding already started\n");
2396                return;
2397        }
2398
2399        fwd_config_setup();
2400
2401        pkt_fwd_config_display(&cur_fwd_config);
2402        if (!pkt_fwd_shared_rxq_check())
2403                return;
2404
2405        if (stream_init != NULL)
2406                for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
2407                        stream_init(fwd_streams[i]);
2408
2409        port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2410        if (port_fwd_begin != NULL) {
2411                for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2412                        if (port_fwd_begin(fwd_ports_ids[i])) {
2413                                fprintf(stderr,
2414                                        "Packet forwarding is not ready\n");
2415                                return;
2416                        }
2417                }
2418        }
2419
2420        if (with_tx_first) {
2421                port_fwd_begin = tx_only_engine.port_fwd_begin;
2422                if (port_fwd_begin != NULL) {
2423                        for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2424                                if (port_fwd_begin(fwd_ports_ids[i])) {
2425                                        fprintf(stderr,
2426                                                "Packet forwarding is not ready\n");
2427                                        return;
2428                                }
2429                        }
2430                }
2431        }
2432
2433        test_done = 0;
2434
2435        if(!no_flush_rx)
2436                flush_fwd_rx_queues();
2437
2438        rxtx_config_display();
2439
2440        fwd_stats_reset();
2441        if (with_tx_first) {
2442                while (with_tx_first--) {
2443                        launch_packet_forwarding(
2444                                        run_one_txonly_burst_on_core);
2445                        rte_eal_mp_wait_lcore();
2446                }
2447                port_fwd_end = tx_only_engine.port_fwd_end;
2448                if (port_fwd_end != NULL) {
2449                        for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2450                                (*port_fwd_end)(fwd_ports_ids[i]);
2451                }
2452        }
2453        launch_packet_forwarding(start_pkt_forward_on_core);
2454}
2455
2456void
2457stop_packet_forwarding(void)
2458{
2459        port_fwd_end_t port_fwd_end;
2460        lcoreid_t lc_id;
2461        portid_t pt_id;
2462        int i;
2463
2464        if (test_done) {
2465                fprintf(stderr, "Packet forwarding not started\n");
2466                return;
2467        }
2468        printf("Telling cores to stop...");
2469        for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2470                fwd_lcores[lc_id]->stopped = 1;
2471        printf("\nWaiting for lcores to finish...\n");
2472        rte_eal_mp_wait_lcore();
2473        port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2474        if (port_fwd_end != NULL) {
2475                for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2476                        pt_id = fwd_ports_ids[i];
2477                        (*port_fwd_end)(pt_id);
2478                }
2479        }
2480
2481        fwd_stats_display();
2482
2483        printf("\nDone.\n");
2484        test_done = 1;
2485}
2486
2487void
2488dev_set_link_up(portid_t pid)
2489{
2490        if (rte_eth_dev_set_link_up(pid) < 0)
2491                fprintf(stderr, "\nSet link up fail.\n");
2492}
2493
2494void
2495dev_set_link_down(portid_t pid)
2496{
2497        if (rte_eth_dev_set_link_down(pid) < 0)
2498                fprintf(stderr, "\nSet link down fail.\n");
2499}
2500
2501static int
2502all_ports_started(void)
2503{
2504        portid_t pi;
2505        struct rte_port *port;
2506
2507        RTE_ETH_FOREACH_DEV(pi) {
2508                port = &ports[pi];
2509                /* Check if there is a port which is not started */
2510                if ((port->port_status != RTE_PORT_STARTED) &&
2511                        (port->slave_flag == 0))
2512                        return 0;
2513        }
2514
2515        /* No port is not started */
2516        return 1;
2517}
2518
2519int
2520port_is_stopped(portid_t port_id)
2521{
2522        struct rte_port *port = &ports[port_id];
2523
2524        if ((port->port_status != RTE_PORT_STOPPED) &&
2525            (port->slave_flag == 0))
2526                return 0;
2527        return 1;
2528}
2529
2530int
2531all_ports_stopped(void)
2532{
2533        portid_t pi;
2534
2535        RTE_ETH_FOREACH_DEV(pi) {
2536                if (!port_is_stopped(pi))
2537                        return 0;
2538        }
2539
2540        return 1;
2541}
2542
2543int
2544port_is_started(portid_t port_id)
2545{
2546        if (port_id_is_invalid(port_id, ENABLED_WARN))
2547                return 0;
2548
2549        if (ports[port_id].port_status != RTE_PORT_STARTED)
2550                return 0;
2551
2552        return 1;
2553}
2554
2555/* Configure the Rx and Tx hairpin queues for the selected port. */
2556static int
2557setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2558{
2559        queueid_t qi;
2560        struct rte_eth_hairpin_conf hairpin_conf = {
2561                .peer_count = 1,
2562        };
2563        int i;
2564        int diag;
2565        struct rte_port *port = &ports[pi];
2566        uint16_t peer_rx_port = pi;
2567        uint16_t peer_tx_port = pi;
2568        uint32_t manual = 1;
2569        uint32_t tx_exp = hairpin_mode & 0x10;
2570
2571        if (!(hairpin_mode & 0xf)) {
2572                peer_rx_port = pi;
2573                peer_tx_port = pi;
2574                manual = 0;
2575        } else if (hairpin_mode & 0x1) {
2576                peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2577                                                       RTE_ETH_DEV_NO_OWNER);
2578                if (peer_tx_port >= RTE_MAX_ETHPORTS)
2579                        peer_tx_port = rte_eth_find_next_owned_by(0,
2580                                                RTE_ETH_DEV_NO_OWNER);
2581                if (p_pi != RTE_MAX_ETHPORTS) {
2582                        peer_rx_port = p_pi;
2583                } else {
2584                        uint16_t next_pi;
2585
2586                        /* Last port will be the peer RX port of the first. */
2587                        RTE_ETH_FOREACH_DEV(next_pi)
2588                                peer_rx_port = next_pi;
2589                }
2590                manual = 1;
2591        } else if (hairpin_mode & 0x2) {
2592                if (cnt_pi & 0x1) {
2593                        peer_rx_port = p_pi;
2594                } else {
2595                        peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2596                                                RTE_ETH_DEV_NO_OWNER);
2597                        if (peer_rx_port >= RTE_MAX_ETHPORTS)
2598                                peer_rx_port = pi;
2599                }
2600                peer_tx_port = peer_rx_port;
2601                manual = 1;
2602        }
2603
2604        for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2605                hairpin_conf.peers[0].port = peer_rx_port;
2606                hairpin_conf.peers[0].queue = i + nb_rxq;
2607                hairpin_conf.manual_bind = !!manual;
2608                hairpin_conf.tx_explicit = !!tx_exp;
2609                diag = rte_eth_tx_hairpin_queue_setup
2610                        (pi, qi, nb_txd, &hairpin_conf);
2611                i++;
2612                if (diag == 0)
2613                        continue;
2614
2615                /* Fail to setup rx queue, return */
2616                if (port->port_status == RTE_PORT_HANDLING)
2617                        port->port_status = RTE_PORT_STOPPED;
2618                else
2619                        fprintf(stderr,
2620                                "Port %d can not be set back to stopped\n", pi);
2621                fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2622                        pi);
2623                /* try to reconfigure queues next time */
2624                port->need_reconfig_queues = 1;
2625                return -1;
2626        }
2627        for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2628                hairpin_conf.peers[0].port = peer_tx_port;
2629                hairpin_conf.peers[0].queue = i + nb_txq;
2630                hairpin_conf.manual_bind = !!manual;
2631                hairpin_conf.tx_explicit = !!tx_exp;
2632                diag = rte_eth_rx_hairpin_queue_setup
2633                        (pi, qi, nb_rxd, &hairpin_conf);
2634                i++;
2635                if (diag == 0)
2636                        continue;
2637
2638                /* Fail to setup rx queue, return */
2639                if (port->port_status == RTE_PORT_HANDLING)
2640                        port->port_status = RTE_PORT_STOPPED;
2641                else
2642                        fprintf(stderr,
2643                                "Port %d can not be set back to stopped\n", pi);
2644                fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2645                        pi);
2646                /* try to reconfigure queues next time */
2647                port->need_reconfig_queues = 1;
2648                return -1;
2649        }
2650        return 0;
2651}
2652
2653/* Configure the Rx with optional split. */
2654int
2655rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2656               uint16_t nb_rx_desc, unsigned int socket_id,
2657               struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2658{
2659        union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2660        unsigned int i, mp_n;
2661        int ret;
2662
2663        if (rx_pkt_nb_segs <= 1 ||
2664            (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2665                rx_conf->rx_seg = NULL;
2666                rx_conf->rx_nseg = 0;
2667                ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2668                                             nb_rx_desc, socket_id,
2669                                             rx_conf, mp);
2670                goto exit;
2671        }
2672        for (i = 0; i < rx_pkt_nb_segs; i++) {
2673                struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2674                struct rte_mempool *mpx;
2675                /*
2676                 * Use last valid pool for the segments with number
2677                 * exceeding the pool index.
2678                 */
2679                mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2680                mpx = mbuf_pool_find(socket_id, mp_n);
2681                /* Handle zero as mbuf data buffer size. */
2682                rx_seg->length = rx_pkt_seg_lengths[i] ?
2683                                   rx_pkt_seg_lengths[i] :
2684                                   mbuf_data_size[mp_n];
2685                rx_seg->offset = i < rx_pkt_nb_offs ?
2686                                   rx_pkt_seg_offsets[i] : 0;
2687                rx_seg->mp = mpx ? mpx : mp;
2688        }
2689        rx_conf->rx_nseg = rx_pkt_nb_segs;
2690        rx_conf->rx_seg = rx_useg;
2691        ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2692                                    socket_id, rx_conf, NULL);
2693        rx_conf->rx_seg = NULL;
2694        rx_conf->rx_nseg = 0;
2695exit:
2696        ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
2697                                                RTE_ETH_QUEUE_STATE_STOPPED :
2698                                                RTE_ETH_QUEUE_STATE_STARTED;
2699        return ret;
2700}
2701
2702static int
2703alloc_xstats_display_info(portid_t pi)
2704{
2705        uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2706        uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2707        uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2708
2709        if (xstats_display_num == 0)
2710                return 0;
2711
2712        *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2713        if (*ids_supp == NULL)
2714                goto fail_ids_supp;
2715
2716        *prev_values = calloc(xstats_display_num,
2717                              sizeof(**prev_values));
2718        if (*prev_values == NULL)
2719                goto fail_prev_values;
2720
2721        *curr_values = calloc(xstats_display_num,
2722                              sizeof(**curr_values));
2723        if (*curr_values == NULL)
2724                goto fail_curr_values;
2725
2726        ports[pi].xstats_info.allocated = true;
2727
2728        return 0;
2729
2730fail_curr_values:
2731        free(*prev_values);
2732fail_prev_values:
2733        free(*ids_supp);
2734fail_ids_supp:
2735        return -ENOMEM;
2736}
2737
2738static void
2739free_xstats_display_info(portid_t pi)
2740{
2741        if (!ports[pi].xstats_info.allocated)
2742                return;
2743        free(ports[pi].xstats_info.ids_supp);
2744        free(ports[pi].xstats_info.prev_values);
2745        free(ports[pi].xstats_info.curr_values);
2746        ports[pi].xstats_info.allocated = false;
2747}
2748
2749/** Fill helper structures for specified port to show extended statistics. */
2750static void
2751fill_xstats_display_info_for_port(portid_t pi)
2752{
2753        unsigned int stat, stat_supp;
2754        const char *xstat_name;
2755        struct rte_port *port;
2756        uint64_t *ids_supp;
2757        int rc;
2758
2759        if (xstats_display_num == 0)
2760                return;
2761
2762        if (pi == (portid_t)RTE_PORT_ALL) {
2763                fill_xstats_display_info();
2764                return;
2765        }
2766
2767        port = &ports[pi];
2768        if (port->port_status != RTE_PORT_STARTED)
2769                return;
2770
2771        if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2772                rte_exit(EXIT_FAILURE,
2773                         "Failed to allocate xstats display memory\n");
2774
2775        ids_supp = port->xstats_info.ids_supp;
2776        for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2777                xstat_name = xstats_display[stat].name;
2778                rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2779                                                   ids_supp + stat_supp);
2780                if (rc != 0) {
2781                        fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2782                                xstat_name, pi, stat);
2783                        continue;
2784                }
2785                stat_supp++;
2786        }
2787
2788        port->xstats_info.ids_supp_sz = stat_supp;
2789}
2790
2791/** Fill helper structures for all ports to show extended statistics. */
2792static void
2793fill_xstats_display_info(void)
2794{
2795        portid_t pi;
2796
2797        if (xstats_display_num == 0)
2798                return;
2799
2800        RTE_ETH_FOREACH_DEV(pi)
2801                fill_xstats_display_info_for_port(pi);
2802}
2803
2804int
2805start_port(portid_t pid)
2806{
2807        int diag, need_check_link_status = -1;
2808        portid_t pi;
2809        portid_t p_pi = RTE_MAX_ETHPORTS;
2810        portid_t pl[RTE_MAX_ETHPORTS];
2811        portid_t peer_pl[RTE_MAX_ETHPORTS];
2812        uint16_t cnt_pi = 0;
2813        uint16_t cfg_pi = 0;
2814        int peer_pi;
2815        queueid_t qi;
2816        struct rte_port *port;
2817        struct rte_eth_hairpin_cap cap;
2818
2819        if (port_id_is_invalid(pid, ENABLED_WARN))
2820                return 0;
2821
2822        RTE_ETH_FOREACH_DEV(pi) {
2823                if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2824                        continue;
2825
2826                if (port_is_bonding_slave(pi)) {
2827                        fprintf(stderr,
2828                                "Please remove port %d from bonded device.\n",
2829                                pi);
2830                        continue;
2831                }
2832
2833                need_check_link_status = 0;
2834                port = &ports[pi];
2835                if (port->port_status == RTE_PORT_STOPPED)
2836                        port->port_status = RTE_PORT_HANDLING;
2837                else {
2838                        fprintf(stderr, "Port %d is now not stopped\n", pi);
2839                        continue;
2840                }
2841
2842                if (port->need_reconfig > 0) {
2843                        struct rte_eth_conf dev_conf;
2844                        int k;
2845
2846                        port->need_reconfig = 0;
2847
2848                        if (flow_isolate_all) {
2849                                int ret = port_flow_isolate(pi, 1);
2850                                if (ret) {
2851                                        fprintf(stderr,
2852                                                "Failed to apply isolated mode on port %d\n",
2853                                                pi);
2854                                        return -1;
2855                                }
2856                        }
2857                        configure_rxtx_dump_callbacks(0);
2858                        printf("Configuring Port %d (socket %u)\n", pi,
2859                                        port->socket_id);
2860                        if (nb_hairpinq > 0 &&
2861                            rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2862                                fprintf(stderr,
2863                                        "Port %d doesn't support hairpin queues\n",
2864                                        pi);
2865                                return -1;
2866                        }
2867
2868                        /* configure port */
2869                        diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2870                                                     nb_txq + nb_hairpinq,
2871                                                     &(port->dev_conf));
2872                        if (diag != 0) {
2873                                if (port->port_status == RTE_PORT_HANDLING)
2874                                        port->port_status = RTE_PORT_STOPPED;
2875                                else
2876                                        fprintf(stderr,
2877                                                "Port %d can not be set back to stopped\n",
2878                                                pi);
2879                                fprintf(stderr, "Fail to configure port %d\n",
2880                                        pi);
2881                                /* try to reconfigure port next time */
2882                                port->need_reconfig = 1;
2883                                return -1;
2884                        }
2885                        /* get device configuration*/
2886                        if (0 !=
2887                                eth_dev_conf_get_print_err(pi, &dev_conf)) {
2888                                fprintf(stderr,
2889                                        "port %d can not get device configuration\n",
2890                                        pi);
2891                                return -1;
2892                        }
2893                        /* Apply Rx offloads configuration */
2894                        if (dev_conf.rxmode.offloads !=
2895                            port->dev_conf.rxmode.offloads) {
2896                                port->dev_conf.rxmode.offloads |=
2897                                        dev_conf.rxmode.offloads;
2898                                for (k = 0;
2899                                     k < port->dev_info.max_rx_queues;
2900                                     k++)
2901                                        port->rxq[k].conf.offloads |=
2902                                                dev_conf.rxmode.offloads;
2903                        }
2904                        /* Apply Tx offloads configuration */
2905                        if (dev_conf.txmode.offloads !=
2906                            port->dev_conf.txmode.offloads) {
2907                                port->dev_conf.txmode.offloads |=
2908                                        dev_conf.txmode.offloads;
2909                                for (k = 0;
2910                                     k < port->dev_info.max_tx_queues;
2911                                     k++)
2912                                        port->txq[k].conf.offloads |=
2913                                                dev_conf.txmode.offloads;
2914                        }
2915                }
2916                if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2917                        port->need_reconfig_queues = 0;
2918                        /* setup tx queues */
2919                        for (qi = 0; qi < nb_txq; qi++) {
2920                                struct rte_eth_txconf *conf =
2921                                                        &port->txq[qi].conf;
2922
2923                                if ((numa_support) &&
2924                                        (txring_numa[pi] != NUMA_NO_CONFIG))
2925                                        diag = rte_eth_tx_queue_setup(pi, qi,
2926                                                port->nb_tx_desc[qi],
2927                                                txring_numa[pi],
2928                                                &(port->txq[qi].conf));
2929                                else
2930                                        diag = rte_eth_tx_queue_setup(pi, qi,
2931                                                port->nb_tx_desc[qi],
2932                                                port->socket_id,
2933                                                &(port->txq[qi].conf));
2934
2935                                if (diag == 0) {
2936                                        port->txq[qi].state =
2937                                                conf->tx_deferred_start ?
2938                                                RTE_ETH_QUEUE_STATE_STOPPED :
2939                                                RTE_ETH_QUEUE_STATE_STARTED;
2940                                        continue;
2941                                }
2942
2943                                /* Fail to setup tx queue, return */
2944                                if (port->port_status == RTE_PORT_HANDLING)
2945                                        port->port_status = RTE_PORT_STOPPED;
2946                                else
2947                                        fprintf(stderr,
2948                                                "Port %d can not be set back to stopped\n",
2949                                                pi);
2950                                fprintf(stderr,
2951                                        "Fail to configure port %d tx queues\n",
2952                                        pi);
2953                                /* try to reconfigure queues next time */
2954                                port->need_reconfig_queues = 1;
2955                                return -1;
2956                        }
2957                        for (qi = 0; qi < nb_rxq; qi++) {
2958                                /* setup rx queues */
2959                                if ((numa_support) &&
2960                                        (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2961                                        struct rte_mempool * mp =
2962                                                mbuf_pool_find
2963                                                        (rxring_numa[pi], 0);
2964                                        if (mp == NULL) {
2965                                                fprintf(stderr,
2966                                                        "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2967                                                        rxring_numa[pi]);
2968                                                return -1;
2969                                        }
2970
2971                                        diag = rx_queue_setup(pi, qi,
2972                                             port->nb_rx_desc[qi],
2973                                             rxring_numa[pi],
2974                                             &(port->rxq[qi].conf),
2975                                             mp);
2976                                } else {
2977                                        struct rte_mempool *mp =
2978                                                mbuf_pool_find
2979                                                        (port->socket_id, 0);
2980                                        if (mp == NULL) {
2981                                                fprintf(stderr,
2982                                                        "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2983                                                        port->socket_id);
2984                                                return -1;
2985                                        }
2986                                        diag = rx_queue_setup(pi, qi,
2987                                             port->nb_rx_desc[qi],
2988                                             port->socket_id,
2989                                             &(port->rxq[qi].conf),
2990                                             mp);
2991                                }
2992                                if (diag == 0)
2993                                        continue;
2994
2995                                /* Fail to setup rx queue, return */
2996                                if (port->port_status == RTE_PORT_HANDLING)
2997                                        port->port_status = RTE_PORT_STOPPED;
2998                                else
2999                                        fprintf(stderr,
3000                                                "Port %d can not be set back to stopped\n",
3001                                                pi);
3002                                fprintf(stderr,
3003                                        "Fail to configure port %d rx queues\n",
3004                                        pi);
3005                                /* try to reconfigure queues next time */
3006                                port->need_reconfig_queues = 1;
3007                                return -1;
3008                        }
3009                        /* setup hairpin queues */
3010                        if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
3011                                return -1;
3012                }
3013                configure_rxtx_dump_callbacks(verbose_level);
3014                if (clear_ptypes) {
3015                        diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3016                                        NULL, 0);
3017                        if (diag < 0)
3018                                fprintf(stderr,
3019                                        "Port %d: Failed to disable Ptype parsing\n",
3020                                        pi);
3021                }
3022
3023                p_pi = pi;
3024                cnt_pi++;
3025
3026                /* start port */
3027                diag = eth_dev_start_mp(pi);
3028                if (diag < 0) {
3029                        fprintf(stderr, "Fail to start port %d: %s\n",
3030                                pi, rte_strerror(-diag));
3031
3032                        /* Fail to setup rx queue, return */
3033                        if (port->port_status == RTE_PORT_HANDLING)
3034                                port->port_status = RTE_PORT_STOPPED;
3035                        else
3036                                fprintf(stderr,
3037                                        "Port %d can not be set back to stopped\n",
3038                                        pi);
3039                        continue;
3040                }
3041
3042                if (port->port_status == RTE_PORT_HANDLING)
3043                        port->port_status = RTE_PORT_STARTED;
3044                else
3045                        fprintf(stderr, "Port %d can not be set into started\n",
3046                                pi);
3047
3048                if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3049                        printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3050                                        RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3051
3052                /* at least one port started, need checking link status */
3053                need_check_link_status = 1;
3054
3055                pl[cfg_pi++] = pi;
3056        }
3057
3058        if (need_check_link_status == 1 && !no_link_check)
3059                check_all_ports_link_status(RTE_PORT_ALL);
3060        else if (need_check_link_status == 0)
3061                fprintf(stderr, "Please stop the ports first\n");
3062
3063        if (hairpin_mode & 0xf) {
3064                uint16_t i;
3065                int j;
3066
3067                /* bind all started hairpin ports */
3068                for (i = 0; i < cfg_pi; i++) {
3069                        pi = pl[i];
3070                        /* bind current Tx to all peer Rx */
3071                        peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3072                                                        RTE_MAX_ETHPORTS, 1);
3073                        if (peer_pi < 0)
3074                                return peer_pi;
3075                        for (j = 0; j < peer_pi; j++) {
3076                                if (!port_is_started(peer_pl[j]))
3077                                        continue;
3078                                diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
3079                                if (diag < 0) {
3080                                        fprintf(stderr,
3081                                                "Error during binding hairpin Tx port %u to %u: %s\n",
3082                                                pi, peer_pl[j],
3083                                                rte_strerror(-diag));
3084                                        return -1;
3085                                }
3086                        }
3087                        /* bind all peer Tx to current Rx */
3088                        peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3089                                                        RTE_MAX_ETHPORTS, 0);
3090                        if (peer_pi < 0)
3091                                return peer_pi;
3092                        for (j = 0; j < peer_pi; j++) {
3093                                if (!port_is_started(peer_pl[j]))
3094                                        continue;
3095                                diag = rte_eth_hairpin_bind(peer_pl[j], pi);
3096                                if (diag < 0) {
3097                                        fprintf(stderr,
3098                                                "Error during binding hairpin Tx port %u to %u: %s\n",
3099                                                peer_pl[j], pi,
3100                                                rte_strerror(-diag));
3101                                        return -1;
3102                                }
3103                        }
3104                }
3105        }
3106
3107        fill_xstats_display_info_for_port(pid);
3108
3109        printf("Done\n");
3110        return 0;
3111}
3112
3113void
3114stop_port(portid_t pid)
3115{
3116        portid_t pi;
3117        struct rte_port *port;
3118        int need_check_link_status = 0;
3119        portid_t peer_pl[RTE_MAX_ETHPORTS];
3120        int peer_pi;
3121
3122        if (port_id_is_invalid(pid, ENABLED_WARN))
3123                return;
3124
3125        printf("Stopping ports...\n");
3126
3127        RTE_ETH_FOREACH_DEV(pi) {
3128                if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3129                        continue;
3130
3131                if (port_is_forwarding(pi) != 0 && test_done == 0) {
3132                        fprintf(stderr,
3133                                "Please remove port %d from forwarding configuration.\n",
3134                                pi);
3135                        continue;
3136                }
3137
3138                if (port_is_bonding_slave(pi)) {
3139                        fprintf(stderr,
3140                                "Please remove port %d from bonded device.\n",
3141                                pi);
3142                        continue;
3143                }
3144
3145                port = &ports[pi];
3146                if (port->port_status == RTE_PORT_STARTED)
3147                        port->port_status = RTE_PORT_HANDLING;
3148                else
3149                        continue;
3150
3151                if (hairpin_mode & 0xf) {
3152                        int j;
3153
3154                        rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3155                        /* unbind all peer Tx from current Rx */
3156                        peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3157                                                        RTE_MAX_ETHPORTS, 0);
3158                        if (peer_pi < 0)
3159                                continue;
3160                        for (j = 0; j < peer_pi; j++) {
3161                                if (!port_is_started(peer_pl[j]))
3162                                        continue;
3163                                rte_eth_hairpin_unbind(peer_pl[j], pi);
3164                        }
3165                }
3166
3167                if (port->flow_list)
3168                        port_flow_flush(pi);
3169
3170                if (eth_dev_stop_mp(pi) != 0)
3171                        RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3172                                pi);
3173
3174                if (port->port_status == RTE_PORT_HANDLING)
3175                        port->port_status = RTE_PORT_STOPPED;
3176                else
3177                        fprintf(stderr, "Port %d can not be set into stopped\n",
3178                                pi);
3179                need_check_link_status = 1;
3180        }
3181        if (need_check_link_status && !no_link_check)
3182                check_all_ports_link_status(RTE_PORT_ALL);
3183
3184        printf("Done\n");
3185}
3186
3187static void
3188remove_invalid_ports_in(portid_t *array, portid_t *total)
3189{
3190        portid_t i;
3191        portid_t new_total = 0;
3192
3193        for (i = 0; i < *total; i++)
3194                if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3195                        array[new_total] = array[i];
3196                        new_total++;
3197                }
3198        *total = new_total;
3199}
3200
3201static void
3202remove_invalid_ports(void)
3203{
3204        remove_invalid_ports_in(ports_ids, &nb_ports);
3205        remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3206        nb_cfg_ports = nb_fwd_ports;
3207}
3208
3209static void
3210flush_port_owned_resources(portid_t pi)
3211{
3212        mcast_addr_pool_destroy(pi);
3213        port_flow_flush(pi);
3214        port_flex_item_flush(pi);
3215        port_action_handle_flush(pi);
3216}
3217
3218static void
3219clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
3220{
3221        struct rte_port *port;
3222        portid_t slave_pid;
3223        uint16_t i;
3224
3225        for (i = 0; i < num_slaves; i++) {
3226                slave_pid = slave_pids[i];
3227                if (port_is_started(slave_pid) == 1) {
3228                        if (rte_eth_dev_stop(slave_pid) != 0)
3229                                fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
3230                                        slave_pid);
3231
3232                        port = &ports[slave_pid];
3233                        port->port_status = RTE_PORT_STOPPED;
3234                }
3235
3236                clear_port_slave_flag(slave_pid);
3237
3238                /* Close slave device when testpmd quit or is killed. */
3239                if (cl_quit == 1 || f_quit == 1)
3240                        rte_eth_dev_close(slave_pid);
3241        }
3242}
3243
3244void
3245close_port(portid_t pid)
3246{
3247        portid_t pi;
3248        struct rte_port *port;
3249        portid_t slave_pids[RTE_MAX_ETHPORTS];
3250        int num_slaves = 0;
3251
3252        if (port_id_is_invalid(pid, ENABLED_WARN))
3253                return;
3254
3255        printf("Closing ports...\n");
3256
3257        RTE_ETH_FOREACH_DEV(pi) {
3258                if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3259                        continue;
3260
3261                if (port_is_forwarding(pi) != 0 && test_done == 0) {
3262                        fprintf(stderr,
3263                                "Please remove port %d from forwarding configuration.\n",
3264                                pi);
3265                        continue;
3266                }
3267
3268                if (port_is_bonding_slave(pi)) {
3269                        fprintf(stderr,
3270                                "Please remove port %d from bonded device.\n",
3271                                pi);
3272                        continue;
3273                }
3274
3275                port = &ports[pi];
3276                if (port->port_status == RTE_PORT_CLOSED) {
3277                        fprintf(stderr, "Port %d is already closed\n", pi);
3278                        continue;
3279                }
3280
3281                if (is_proc_primary()) {
3282                        flush_port_owned_resources(pi);
3283#ifdef RTE_NET_BOND
3284                        if (port->bond_flag == 1)
3285                                num_slaves = rte_eth_bond_slaves_get(pi,
3286                                                slave_pids, RTE_MAX_ETHPORTS);
3287#endif
3288                        rte_eth_dev_close(pi);
3289                        /*
3290                         * If this port is bonded device, all slaves under the
3291                         * device need to be removed or closed.
3292                         */
3293                        if (port->bond_flag == 1 && num_slaves > 0)
3294                                clear_bonding_slave_device(slave_pids,
3295                                                        num_slaves);
3296                }
3297
3298                free_xstats_display_info(pi);
3299        }
3300
3301        remove_invalid_ports();
3302        printf("Done\n");
3303}
3304
3305void
3306reset_port(portid_t pid)
3307{
3308        int diag;
3309        portid_t pi;
3310        struct rte_port *port;
3311
3312        if (port_id_is_invalid(pid, ENABLED_WARN))
3313                return;
3314
3315        if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3316                (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3317                fprintf(stderr,
3318                        "Can not reset port(s), please stop port(s) first.\n");
3319                return;
3320        }
3321
3322        printf("Resetting ports...\n");
3323
3324        RTE_ETH_FOREACH_DEV(pi) {
3325                if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3326                        continue;
3327
3328                if (port_is_forwarding(pi) != 0 && test_done == 0) {
3329                        fprintf(stderr,
3330                                "Please remove port %d from forwarding configuration.\n",
3331                                pi);
3332                        continue;
3333                }
3334
3335                if (port_is_bonding_slave(pi)) {
3336                        fprintf(stderr,
3337                                "Please remove port %d from bonded device.\n",
3338                                pi);
3339                        continue;
3340                }
3341
3342                diag = rte_eth_dev_reset(pi);
3343                if (diag == 0) {
3344                        port = &ports[pi];
3345                        port->need_reconfig = 1;
3346                        port->need_reconfig_queues = 1;
3347                } else {
3348                        fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3349                                pi, diag);
3350                }
3351        }
3352
3353        printf("Done\n");
3354}
3355
3356void
3357attach_port(char *identifier)
3358{
3359        portid_t pi;
3360        struct rte_dev_iterator iterator;
3361
3362        printf("Attaching a new port...\n");
3363
3364        if (identifier == NULL) {
3365                fprintf(stderr, "Invalid parameters are specified\n");
3366                return;
3367        }
3368
3369        if (rte_dev_probe(identifier) < 0) {
3370                TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3371                return;
3372        }
3373
3374        /* first attach mode: event */
3375        if (setup_on_probe_event) {
3376                /* new ports are detected on RTE_ETH_EVENT_NEW event */
3377                for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3378                        if (ports[pi].port_status == RTE_PORT_HANDLING &&
3379                                        ports[pi].need_setup != 0)
3380                                setup_attached_port(pi);
3381                return;
3382        }
3383
3384        /* second attach mode: iterator */
3385        RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3386                /* setup ports matching the devargs used for probing */
3387                if (port_is_forwarding(pi))
3388                        continue; /* port was already attached before */
3389                setup_attached_port(pi);
3390        }
3391}
3392
3393static void
3394setup_attached_port(portid_t pi)
3395{
3396        unsigned int socket_id;
3397        int ret;
3398
3399        socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3400        /* if socket_id is invalid, set to the first available socket. */
3401        if (check_socket_id(socket_id) < 0)
3402                socket_id = socket_ids[0];
3403        reconfig(pi, socket_id);
3404        ret = rte_eth_promiscuous_enable(pi);
3405        if (ret != 0)
3406                fprintf(stderr,
3407                        "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3408                        pi, rte_strerror(-ret));
3409
3410        ports_ids[nb_ports++] = pi;
3411        fwd_ports_ids[nb_fwd_ports++] = pi;
3412        nb_cfg_ports = nb_fwd_ports;
3413        ports[pi].need_setup = 0;
3414        ports[pi].port_status = RTE_PORT_STOPPED;
3415
3416        printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3417        printf("Done\n");
3418}
3419
3420static void
3421detach_device(struct rte_device *dev)
3422{
3423        portid_t sibling;
3424
3425        if (dev == NULL) {
3426                fprintf(stderr, "Device already removed\n");
3427                return;
3428        }
3429
3430        printf("Removing a device...\n");
3431
3432        RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3433                if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3434                        if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3435                                fprintf(stderr, "Port %u not stopped\n",
3436                                        sibling);
3437                                return;
3438                        }
3439                        flush_port_owned_resources(sibling);
3440                }
3441        }
3442
3443        if (rte_dev_remove(dev) < 0) {
3444                TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3445                return;
3446        }
3447        remove_invalid_ports();
3448
3449        printf("Device is detached\n");
3450        printf("Now total ports is %d\n", nb_ports);
3451        printf("Done\n");
3452        return;
3453}
3454
3455void
3456detach_port_device(portid_t port_id)
3457{
3458        int ret;
3459        struct rte_eth_dev_info dev_info;
3460
3461        if (port_id_is_invalid(port_id, ENABLED_WARN))
3462                return;
3463
3464        if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3465                if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3466                        fprintf(stderr, "Port not stopped\n");
3467                        return;
3468                }
3469                fprintf(stderr, "Port was not closed\n");
3470        }
3471
3472        ret = eth_dev_info_get_print_err(port_id, &dev_info);
3473        if (ret != 0) {
3474                TESTPMD_LOG(ERR,
3475                        "Failed to get device info for port %d, not detaching\n",
3476                        port_id);
3477                return;
3478        }
3479        detach_device(dev_info.device);
3480}
3481
3482void
3483detach_devargs(char *identifier)
3484{
3485        struct rte_dev_iterator iterator;
3486        struct rte_devargs da;
3487        portid_t port_id;
3488
3489        printf("Removing a device...\n");
3490
3491        memset(&da, 0, sizeof(da));
3492        if (rte_devargs_parsef(&da, "%s", identifier)) {
3493                fprintf(stderr, "cannot parse identifier\n");
3494                return;
3495        }
3496
3497        RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3498                if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3499                        if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3500                                fprintf(stderr, "Port %u not stopped\n",
3501                                        port_id);
3502                                rte_eth_iterator_cleanup(&iterator);
3503                                rte_devargs_reset(&da);
3504                                return;
3505                        }
3506                        flush_port_owned_resources(port_id);
3507                }
3508        }
3509
3510        if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3511                TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3512                            da.name, da.bus->name);
3513                rte_devargs_reset(&da);
3514                return;
3515        }
3516
3517        remove_invalid_ports();
3518
3519        printf("Device %s is detached\n", identifier);
3520        printf("Now total ports is %d\n", nb_ports);
3521        printf("Done\n");
3522        rte_devargs_reset(&da);
3523}
3524
3525void
3526pmd_test_exit(void)
3527{
3528        portid_t pt_id;
3529        unsigned int i;
3530        int ret;
3531
3532        if (test_done == 0)
3533                stop_packet_forwarding();
3534
3535#ifndef RTE_EXEC_ENV_WINDOWS
3536        for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3537                if (mempools[i]) {
3538                        if (mp_alloc_type == MP_ALLOC_ANON)
3539                                rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3540                                                     NULL);
3541                }
3542        }
3543#endif
3544        if (ports != NULL) {
3545                no_link_check = 1;
3546                RTE_ETH_FOREACH_DEV(pt_id) {
3547                        printf("\nStopping port %d...\n", pt_id);
3548                        fflush(stdout);
3549                        stop_port(pt_id);
3550                }
3551                RTE_ETH_FOREACH_DEV(pt_id) {
3552                        printf("\nShutting down port %d...\n", pt_id);
3553                        fflush(stdout);
3554                        close_port(pt_id);
3555                }
3556        }
3557
3558        if (hot_plug) {
3559                ret = rte_dev_event_monitor_stop();
3560                if (ret) {
3561                        RTE_LOG(ERR, EAL,
3562                                "fail to stop device event monitor.");
3563                        return;
3564                }
3565
3566                ret = rte_dev_event_callback_unregister(NULL,
3567                        dev_event_callback, NULL);
3568                if (ret < 0) {
3569                        RTE_LOG(ERR, EAL,
3570                                "fail to unregister device event callback.\n");
3571                        return;
3572                }
3573
3574                ret = rte_dev_hotplug_handle_disable();
3575                if (ret) {
3576                        RTE_LOG(ERR, EAL,
3577                                "fail to disable hotplug handling.\n");
3578                        return;
3579                }
3580        }
3581        for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3582                if (mempools[i])
3583                        mempool_free_mp(mempools[i]);
3584        }
3585        free(xstats_display);
3586
3587        printf("\nBye...\n");
3588}
3589
3590typedef void (*cmd_func_t)(void);
3591struct pmd_test_command {
3592        const char *cmd_name;
3593        cmd_func_t cmd_func;
3594};
3595
3596/* Check the link status of all ports in up to 9s, and print them finally */
3597static void
3598check_all_ports_link_status(uint32_t port_mask)
3599{
3600#define CHECK_INTERVAL 100 /* 100ms */
3601#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3602        portid_t portid;
3603        uint8_t count, all_ports_up, print_flag = 0;
3604        struct rte_eth_link link;
3605        int ret;
3606        char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3607
3608        printf("Checking link statuses...\n");
3609        fflush(stdout);
3610        for (count = 0; count <= MAX_CHECK_TIME; count++) {
3611                all_ports_up = 1;
3612                RTE_ETH_FOREACH_DEV(portid) {
3613                        if ((port_mask & (1 << portid)) == 0)
3614                                continue;
3615                        memset(&link, 0, sizeof(link));
3616                        ret = rte_eth_link_get_nowait(portid, &link);
3617                        if (ret < 0) {
3618                                all_ports_up = 0;
3619                                if (print_flag == 1)
3620                                        fprintf(stderr,
3621                                                "Port %u link get failed: %s\n",
3622                                                portid, rte_strerror(-ret));
3623                                continue;
3624                        }
3625                        /* print link status if flag set */
3626                        if (print_flag == 1) {
3627                                rte_eth_link_to_str(link_status,
3628                                        sizeof(link_status), &link);
3629                                printf("Port %d %s\n", portid, link_status);
3630                                continue;
3631                        }
3632                        /* clear all_ports_up flag if any link down */
3633                        if (link.link_status == RTE_ETH_LINK_DOWN) {
3634                                all_ports_up = 0;
3635                                break;
3636                        }
3637                }
3638                /* after finally printing all link status, get out */
3639                if (print_flag == 1)
3640                        break;
3641
3642                if (all_ports_up == 0) {
3643                        fflush(stdout);
3644                        rte_delay_ms(CHECK_INTERVAL);
3645                }
3646
3647                /* set the print_flag if all ports up or timeout */
3648                if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3649                        print_flag = 1;
3650                }
3651
3652                if (lsc_interrupt)
3653                        break;
3654        }
3655}
3656
3657static void
3658rmv_port_callback(void *arg)
3659{
3660        int need_to_start = 0;
3661        int org_no_link_check = no_link_check;
3662        portid_t port_id = (intptr_t)arg;
3663        struct rte_eth_dev_info dev_info;
3664        int ret;
3665
3666        RTE_ETH_VALID_PORTID_OR_RET(port_id);
3667
3668        if (!test_done && port_is_forwarding(port_id)) {
3669                need_to_start = 1;
3670                stop_packet_forwarding();
3671        }
3672        no_link_check = 1;
3673        stop_port(port_id);
3674        no_link_check = org_no_link_check;
3675
3676        ret = eth_dev_info_get_print_err(port_id, &dev_info);
3677        if (ret != 0)
3678                TESTPMD_LOG(ERR,
3679                        "Failed to get device info for port %d, not detaching\n",
3680                        port_id);
3681        else {
3682                struct rte_device *device = dev_info.device;
3683                close_port(port_id);
3684                detach_device(device); /* might be already removed or have more ports */
3685        }
3686        if (need_to_start)
3687                start_packet_forwarding(0);
3688}
3689
3690/* This function is used by the interrupt thread */
3691static int
3692eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3693                  void *ret_param)
3694{
3695        RTE_SET_USED(param);
3696        RTE_SET_USED(ret_param);
3697
3698        if (type >= RTE_ETH_EVENT_MAX) {
3699                fprintf(stderr,
3700                        "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3701                        port_id, __func__, type);
3702                fflush(stderr);
3703        } else if (event_print_mask & (UINT32_C(1) << type)) {
3704                printf("\nPort %" PRIu16 ": %s event\n", port_id,
3705                        eth_event_desc[type]);
3706                fflush(stdout);
3707        }
3708
3709        switch (type) {
3710        case RTE_ETH_EVENT_NEW:
3711                ports[port_id].need_setup = 1;
3712                ports[port_id].port_status = RTE_PORT_HANDLING;
3713                break;
3714        case RTE_ETH_EVENT_INTR_RMV:
3715                if (port_id_is_invalid(port_id, DISABLED_WARN))
3716                        break;
3717                if (rte_eal_alarm_set(100000,
3718                                rmv_port_callback, (void *)(intptr_t)port_id))
3719                        fprintf(stderr,
3720                                "Could not set up deferred device removal\n");
3721                break;
3722        case RTE_ETH_EVENT_DESTROY:
3723                ports[port_id].port_status = RTE_PORT_CLOSED;
3724                printf("Port %u is closed\n", port_id);
3725                break;
3726        case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3727                uint16_t rxq_id;
3728                int ret;
3729
3730                /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3731                for (rxq_id = 0; ; rxq_id++) {
3732                        ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3733                                                            NULL);
3734                        if (ret <= 0)
3735                                break;
3736                        printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3737                               port_id, rxq_id);
3738
3739#ifdef RTE_NET_MLX5
3740                        mlx5_test_avail_thresh_event_handler(port_id, rxq_id);
3741#endif
3742                }
3743                break;
3744        }
3745        default:
3746                break;
3747        }
3748        return 0;
3749}
3750
3751static int
3752register_eth_event_callback(void)
3753{
3754        int ret;
3755        enum rte_eth_event_type event;
3756
3757        for (event = RTE_ETH_EVENT_UNKNOWN;
3758                        event < RTE_ETH_EVENT_MAX; event++) {
3759                ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3760                                event,
3761                                eth_event_callback,
3762                                NULL);
3763                if (ret != 0) {
3764                        TESTPMD_LOG(ERR, "Failed to register callback for "
3765                                        "%s event\n", eth_event_desc[event]);
3766                        return -1;
3767                }
3768        }
3769
3770        return 0;
3771}
3772
3773/* This function is used by the interrupt thread */
3774static void
3775dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3776                             __rte_unused void *arg)
3777{
3778        uint16_t port_id;
3779        int ret;
3780
3781        if (type >= RTE_DEV_EVENT_MAX) {
3782                fprintf(stderr, "%s called upon invalid event %d\n",
3783                        __func__, type);
3784                fflush(stderr);
3785        }
3786
3787        switch (type) {
3788        case RTE_DEV_EVENT_REMOVE:
3789                RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3790                        device_name);
3791                ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3792                if (ret) {
3793                        RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3794                                device_name);
3795                        return;
3796                }
3797                /*
3798                 * Because the user's callback is invoked in eal interrupt
3799                 * callback, the interrupt callback need to be finished before
3800                 * it can be unregistered when detaching device. So finish
3801                 * callback soon and use a deferred removal to detach device
3802                 * is need. It is a workaround, once the device detaching be
3803                 * moved into the eal in the future, the deferred removal could
3804                 * be deleted.
3805                 */
3806                if (rte_eal_alarm_set(100000,
3807                                rmv_port_callback, (void *)(intptr_t)port_id))
3808                        RTE_LOG(ERR, EAL,
3809                                "Could not set up deferred device removal\n");
3810                break;
3811        case RTE_DEV_EVENT_ADD:
3812                RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3813                        device_name);
3814                /* TODO: After finish kernel driver binding,
3815                 * begin to attach port.
3816                 */
3817                break;
3818        default:
3819                break;
3820        }
3821}
3822
3823static void
3824rxtx_port_config(portid_t pid)
3825{
3826        uint16_t qid;
3827        uint64_t offloads;
3828        struct rte_port *port = &ports[pid];
3829
3830        for (qid = 0; qid < nb_rxq; qid++) {
3831                offloads = port->rxq[qid].conf.offloads;
3832                port->rxq[qid].conf = port->dev_info.default_rxconf;
3833
3834                if (rxq_share > 0 &&
3835                    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3836                        /* Non-zero share group to enable RxQ share. */
3837                        port->rxq[qid].conf.share_group = pid / rxq_share + 1;
3838                        port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3839                }
3840
3841                if (offloads != 0)
3842                        port->rxq[qid].conf.offloads = offloads;
3843
3844                /* Check if any Rx parameters have been passed */
3845                if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3846                        port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3847
3848                if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3849                        port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3850
3851                if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3852                        port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3853
3854                if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3855                        port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3856
3857                if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3858                        port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3859
3860                port->nb_rx_desc[qid] = nb_rxd;
3861        }
3862
3863        for (qid = 0; qid < nb_txq; qid++) {
3864                offloads = port->txq[qid].conf.offloads;
3865                port->txq[qid].conf = port->dev_info.default_txconf;
3866                if (offloads != 0)
3867                        port->txq[qid].conf.offloads = offloads;
3868
3869                /* Check if any Tx parameters have been passed */
3870                if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3871                        port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3872
3873                if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3874                        port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3875
3876                if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3877                        port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3878
3879                if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3880                        port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3881
3882                if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3883                        port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3884
3885                port->nb_tx_desc[qid] = nb_txd;
3886        }
3887}
3888
3889/*
3890 * Helper function to set MTU from frame size
3891 *
3892 * port->dev_info should be set before calling this function.
3893 *
3894 * return 0 on success, negative on error
3895 */
3896int
3897update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3898{
3899        struct rte_port *port = &ports[portid];
3900        uint32_t eth_overhead;
3901        uint16_t mtu, new_mtu;
3902
3903        eth_overhead = get_eth_overhead(&port->dev_info);
3904
3905        if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3906                printf("Failed to get MTU for port %u\n", portid);
3907                return -1;
3908        }
3909
3910        new_mtu = max_rx_pktlen - eth_overhead;
3911
3912        if (mtu == new_mtu)
3913                return 0;
3914
3915        if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3916                fprintf(stderr,
3917                        "Failed to set MTU to %u for port %u\n",
3918                        new_mtu, portid);
3919                return -1;
3920        }
3921
3922        port->dev_conf.rxmode.mtu = new_mtu;
3923
3924        return 0;
3925}
3926
3927void
3928init_port_config(void)
3929{
3930        portid_t pid;
3931        struct rte_port *port;
3932        int ret, i;
3933
3934        RTE_ETH_FOREACH_DEV(pid) {
3935                port = &ports[pid];
3936                port->dev_conf.fdir_conf = fdir_conf;
3937
3938                ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3939                if (ret != 0)
3940                        return;
3941
3942                if (nb_rxq > 1) {
3943                        port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3944                        port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3945                                rss_hf & port->dev_info.flow_type_rss_offloads;
3946                } else {
3947                        port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3948                        port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3949                }
3950
3951                if (port->dcb_flag == 0) {
3952                        if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3953                                port->dev_conf.rxmode.mq_mode =
3954                                        (enum rte_eth_rx_mq_mode)
3955                                                (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3956                        } else {
3957                                port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3958                                port->dev_conf.rxmode.offloads &=
3959                                                ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3960
3961                                for (i = 0;
3962                                     i < port->dev_info.nb_rx_queues;
3963                                     i++)
3964                                        port->rxq[i].conf.offloads &=
3965                                                ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3966                        }
3967                }
3968
3969                rxtx_port_config(pid);
3970
3971                ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3972                if (ret != 0)
3973                        return;
3974
3975#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3976                rte_pmd_ixgbe_bypass_init(pid);
3977#endif
3978
3979                if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3980                        port->dev_conf.intr_conf.lsc = 1;
3981                if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3982                        port->dev_conf.intr_conf.rmv = 1;
3983        }
3984}
3985
3986void set_port_slave_flag(portid_t slave_pid)
3987{
3988        struct rte_port *port;
3989
3990        port = &ports[slave_pid];
3991        port->slave_flag = 1;
3992}
3993
3994void clear_port_slave_flag(portid_t slave_pid)
3995{
3996        struct rte_port *port;
3997
3998        port = &ports[slave_pid];
3999        port->slave_flag = 0;
4000}
4001
4002uint8_t port_is_bonding_slave(portid_t slave_pid)
4003{
4004        struct rte_port *port;
4005        struct rte_eth_dev_info dev_info;
4006        int ret;
4007
4008        port = &ports[slave_pid];
4009        ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
4010        if (ret != 0) {
4011                TESTPMD_LOG(ERR,
4012                        "Failed to get device info for port id %d,"
4013                        "cannot determine if the port is a bonded slave",
4014                        slave_pid);
4015                return 0;
4016        }
4017        if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
4018                return 1;
4019        return 0;
4020}
4021
4022const uint16_t vlan_tags[] = {
4023                0,  1,  2,  3,  4,  5,  6,  7,
4024                8,  9, 10, 11,  12, 13, 14, 15,
4025                16, 17, 18, 19, 20, 21, 22, 23,
4026                24, 25, 26, 27, 28, 29, 30, 31
4027};
4028
4029static  int
4030get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
4031                 enum dcb_mode_enable dcb_mode,
4032                 enum rte_eth_nb_tcs num_tcs,
4033                 uint8_t pfc_en)
4034{
4035        uint8_t i;
4036        int32_t rc;
4037        struct rte_eth_rss_conf rss_conf;
4038
4039        /*
4040         * Builds up the correct configuration for dcb+vt based on the vlan tags array
4041         * given above, and the number of traffic classes available for use.
4042         */
4043        if (dcb_mode == DCB_VT_ENABLED) {
4044                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4045                                &eth_conf->rx_adv_conf.vmdq_dcb_conf;
4046                struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
4047                                &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4048
4049                /* VMDQ+DCB RX and TX configurations */
4050                vmdq_rx_conf->enable_default_pool = 0;
4051                vmdq_rx_conf->default_pool = 0;
4052                vmdq_rx_conf->nb_queue_pools =
4053                        (num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4054                vmdq_tx_conf->nb_queue_pools =
4055                        (num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4056
4057                vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
4058                for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
4059                        vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
4060                        vmdq_rx_conf->pool_map[i].pools =
4061                                1 << (i % vmdq_rx_conf->nb_queue_pools);
4062                }
4063                for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4064                        vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4065                        vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4066                }
4067
4068                /* set DCB mode of RX and TX of multiple queues */
4069                eth_conf->rxmode.mq_mode =
4070                                (enum rte_eth_rx_mq_mode)
4071                                        (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4072                eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
4073        } else {
4074                struct rte_eth_dcb_rx_conf *rx_conf =
4075                                &eth_conf->rx_adv_conf.dcb_rx_conf;
4076                struct rte_eth_dcb_tx_conf *tx_conf =
4077                                &eth_conf->tx_adv_conf.dcb_tx_conf;
4078
4079                memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
4080
4081                rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4082                if (rc != 0)
4083                        return rc;
4084
4085                rx_conf->nb_tcs = num_tcs;
4086                tx_conf->nb_tcs = num_tcs;
4087
4088                for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4089                        rx_conf->dcb_tc[i] = i % num_tcs;
4090                        tx_conf->dcb_tc[i] = i % num_tcs;
4091                }
4092
4093                eth_conf->rxmode.mq_mode =
4094                                (enum rte_eth_rx_mq_mode)
4095                                        (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4096                eth_conf->rx_adv_conf.rss_conf = rss_conf;
4097                eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
4098        }
4099
4100        if (pfc_en)
4101                eth_conf->dcb_capability_en =
4102                                RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4103        else
4104                eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4105
4106        return 0;
4107}
4108
4109int
4110init_port_dcb_config(portid_t pid,
4111                     enum dcb_mode_enable dcb_mode,
4112                     enum rte_eth_nb_tcs num_tcs,
4113                     uint8_t pfc_en)
4114{
4115        struct rte_eth_conf port_conf;
4116        struct rte_port *rte_port;
4117        int retval;
4118        uint16_t i;
4119
4120        if (num_procs > 1) {
4121                printf("The multi-process feature doesn't support dcb.\n");
4122                return -ENOTSUP;
4123        }
4124        rte_port = &ports[pid];
4125
4126        /* retain the original device configuration. */
4127        memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4128
4129        /*set configuration of DCB in vt mode and DCB in non-vt mode*/
4130        retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4131        if (retval < 0)
4132                return retval;
4133        port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4134        /* remove RSS HASH offload for DCB in vt mode */
4135        if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4136                port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4137                for (i = 0; i < nb_rxq; i++)
4138                        rte_port->rxq[i].conf.offloads &=
4139                                ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4140        }
4141
4142        /* re-configure the device . */
4143        retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
4144        if (retval < 0)
4145                return retval;
4146
4147        retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
4148        if (retval != 0)
4149                return retval;
4150
4151        /* If dev_info.vmdq_pool_base is greater than 0,
4152         * the queue id of vmdq pools is started after pf queues.
4153         */
4154        if (dcb_mode == DCB_VT_ENABLED &&
4155            rte_port->dev_info.vmdq_pool_base > 0) {
4156                fprintf(stderr,
4157                        "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
4158                        pid);
4159                return -1;
4160        }
4161
4162        /* Assume the ports in testpmd have the same dcb capability
4163         * and has the same number of rxq and txq in dcb mode
4164         */
4165        if (dcb_mode == DCB_VT_ENABLED) {
4166                if (rte_port->dev_info.max_vfs > 0) {
4167                        nb_rxq = rte_port->dev_info.nb_rx_queues;
4168                        nb_txq = rte_port->dev_info.nb_tx_queues;
4169                } else {
4170                        nb_rxq = rte_port->dev_info.max_rx_queues;
4171                        nb_txq = rte_port->dev_info.max_tx_queues;
4172                }
4173        } else {
4174                /*if vt is disabled, use all pf queues */
4175                if (rte_port->dev_info.vmdq_pool_base == 0) {
4176                        nb_rxq = rte_port->dev_info.max_rx_queues;
4177                        nb_txq = rte_port->dev_info.max_tx_queues;
4178                } else {
4179                        nb_rxq = (queueid_t)num_tcs;
4180                        nb_txq = (queueid_t)num_tcs;
4181
4182                }
4183        }
4184        rx_free_thresh = 64;
4185
4186        memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4187
4188        rxtx_port_config(pid);
4189        /* VLAN filter */
4190        rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4191        for (i = 0; i < RTE_DIM(vlan_tags); i++)
4192                rx_vft_set(pid, vlan_tags[i], 1);
4193
4194        retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4195        if (retval != 0)
4196                return retval;
4197
4198        rte_port->dcb_flag = 1;
4199
4200        /* Enter DCB configuration status */
4201        dcb_config = 1;
4202
4203        return 0;
4204}
4205
4206static void
4207init_port(void)
4208{
4209        int i;
4210
4211        /* Configuration of Ethernet ports. */
4212        ports = rte_zmalloc("testpmd: ports",
4213                            sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4214                            RTE_CACHE_LINE_SIZE);
4215        if (ports == NULL) {
4216                rte_exit(EXIT_FAILURE,
4217                                "rte_zmalloc(%d struct rte_port) failed\n",
4218                                RTE_MAX_ETHPORTS);
4219        }
4220        for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4221                ports[i].xstats_info.allocated = false;
4222        for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4223                LIST_INIT(&ports[i].flow_tunnel_list);
4224        /* Initialize ports NUMA structures */
4225        memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4226        memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4227        memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4228}
4229
4230static void
4231force_quit(void)
4232{
4233        pmd_test_exit();
4234        prompt_exit();
4235}
4236
4237static void
4238print_stats(void)
4239{
4240        uint8_t i;
4241        const char clr[] = { 27, '[', '2', 'J', '\0' };
4242        const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4243
4244        /* Clear screen and move to top left */
4245        printf("%s%s", clr, top_left);
4246
4247        printf("\nPort statistics ====================================");
4248        for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4249                nic_stats_display(fwd_ports_ids[i]);
4250
4251        fflush(stdout);
4252}
4253
4254static void
4255signal_handler(int signum)
4256{
4257        if (signum == SIGINT || signum == SIGTERM) {
4258                fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4259                        signum);
4260#ifdef RTE_LIB_PDUMP
4261                /* uninitialize packet capture framework */
4262                rte_pdump_uninit();
4263#endif
4264#ifdef RTE_LIB_LATENCYSTATS
4265                if (latencystats_enabled != 0)
4266                        rte_latencystats_uninit();
4267#endif
4268                force_quit();
4269                /* Set flag to indicate the force termination. */
4270                f_quit = 1;
4271                /* exit with the expected status */
4272#ifndef RTE_EXEC_ENV_WINDOWS
4273                signal(signum, SIG_DFL);
4274                kill(getpid(), signum);
4275#endif
4276        }
4277}
4278
4279int
4280main(int argc, char** argv)
4281{
4282        int diag;
4283        portid_t port_id;
4284        uint16_t count;
4285        int ret;
4286
4287        signal(SIGINT, signal_handler);
4288        signal(SIGTERM, signal_handler);
4289
4290        testpmd_logtype = rte_log_register("testpmd");
4291        if (testpmd_logtype < 0)
4292                rte_exit(EXIT_FAILURE, "Cannot register log type");
4293        rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4294
4295        diag = rte_eal_init(argc, argv);
4296        if (diag < 0)
4297                rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4298                         rte_strerror(rte_errno));
4299
4300        ret = register_eth_event_callback();
4301        if (ret != 0)
4302                rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4303
4304#ifdef RTE_LIB_PDUMP
4305        /* initialize packet capture framework */
4306        rte_pdump_init();
4307#endif
4308
4309        count = 0;
4310        RTE_ETH_FOREACH_DEV(port_id) {
4311                ports_ids[count] = port_id;
4312                count++;
4313        }
4314        nb_ports = (portid_t) count;
4315        if (nb_ports == 0)
4316                TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4317
4318        /* allocate port structures, and init them */
4319        init_port();
4320
4321        set_def_fwd_config();
4322        if (nb_lcores == 0)
4323                rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4324                         "Check the core mask argument\n");
4325
4326        /* Bitrate/latency stats disabled by default */
4327#ifdef RTE_LIB_BITRATESTATS
4328        bitrate_enabled = 0;
4329#endif
4330#ifdef RTE_LIB_LATENCYSTATS
4331        latencystats_enabled = 0;
4332#endif
4333
4334        /* on FreeBSD, mlockall() is disabled by default */
4335#ifdef RTE_EXEC_ENV_FREEBSD
4336        do_mlockall = 0;
4337#else
4338        do_mlockall = 1;
4339#endif
4340
4341        argc -= diag;
4342        argv += diag;
4343        if (argc > 1)
4344                launch_args_parse(argc, argv);
4345
4346#ifndef RTE_EXEC_ENV_WINDOWS
4347        if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4348                TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4349                        strerror(errno));
4350        }
4351#endif
4352
4353        if (tx_first && interactive)
4354                rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4355                                "interactive mode.\n");
4356
4357        if (tx_first && lsc_interrupt) {
4358                fprintf(stderr,
4359                        "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4360                lsc_interrupt = 0;
4361        }
4362
4363        if (!nb_rxq && !nb_txq)
4364                fprintf(stderr,
4365                        "Warning: Either rx or tx queues should be non-zero\n");
4366
4367        if (nb_rxq > 1 && nb_rxq > nb_txq)
4368                fprintf(stderr,
4369                        "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4370                        nb_rxq, nb_txq);
4371
4372        init_config();
4373
4374        if (hot_plug) {
4375                ret = rte_dev_hotplug_handle_enable();
4376                if (ret) {
4377                        RTE_LOG(ERR, EAL,
4378                                "fail to enable hotplug handling.");
4379                        return -1;
4380                }
4381
4382                ret = rte_dev_event_monitor_start();
4383                if (ret) {
4384                        RTE_LOG(ERR, EAL,
4385                                "fail to start device event monitoring.");
4386                        return -1;
4387                }
4388
4389                ret = rte_dev_event_callback_register(NULL,
4390                        dev_event_callback, NULL);
4391                if (ret) {
4392                        RTE_LOG(ERR, EAL,
4393                                "fail  to register device event callback\n");
4394                        return -1;
4395                }
4396        }
4397
4398        if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4399                rte_exit(EXIT_FAILURE, "Start ports failed\n");
4400
4401        /* set all ports to promiscuous mode by default */
4402        RTE_ETH_FOREACH_DEV(port_id) {
4403                ret = rte_eth_promiscuous_enable(port_id);
4404                if (ret != 0)
4405                        fprintf(stderr,
4406                                "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4407                                port_id, rte_strerror(-ret));
4408        }
4409
4410#ifdef RTE_LIB_METRICS
4411        /* Init metrics library */
4412        rte_metrics_init(rte_socket_id());
4413#endif
4414
4415#ifdef RTE_LIB_LATENCYSTATS
4416        if (latencystats_enabled != 0) {
4417                int ret = rte_latencystats_init(1, NULL);
4418                if (ret)
4419                        fprintf(stderr,
4420                                "Warning: latencystats init() returned error %d\n",
4421                                ret);
4422                fprintf(stderr, "Latencystats running on lcore %d\n",
4423                        latencystats_lcore_id);
4424        }
4425#endif
4426
4427        /* Setup bitrate stats */
4428#ifdef RTE_LIB_BITRATESTATS
4429        if (bitrate_enabled != 0) {
4430                bitrate_data = rte_stats_bitrate_create();
4431                if (bitrate_data == NULL)
4432                        rte_exit(EXIT_FAILURE,
4433                                "Could not allocate bitrate data.\n");
4434                rte_stats_bitrate_reg(bitrate_data);
4435        }
4436#endif
4437#ifdef RTE_LIB_CMDLINE
4438        if (init_cmdline() != 0)
4439                rte_exit(EXIT_FAILURE,
4440                        "Could not initialise cmdline context.\n");
4441
4442        if (strlen(cmdline_filename) != 0)
4443                cmdline_read_from_file(cmdline_filename);
4444
4445        if (interactive == 1) {
4446                if (auto_start) {
4447                        printf("Start automatic packet forwarding\n");
4448                        start_packet_forwarding(0);
4449                }
4450                prompt();
4451                pmd_test_exit();
4452        } else
4453#endif
4454        {
4455                char c;
4456                int rc;
4457
4458                f_quit = 0;
4459
4460                printf("No commandline core given, start packet forwarding\n");
4461                start_packet_forwarding(tx_first);
4462                if (stats_period != 0) {
4463                        uint64_t prev_time = 0, cur_time, diff_time = 0;
4464                        uint64_t timer_period;
4465
4466                        /* Convert to number of cycles */
4467                        timer_period = stats_period * rte_get_timer_hz();
4468
4469                        while (f_quit == 0) {
4470                                cur_time = rte_get_timer_cycles();
4471                                diff_time += cur_time - prev_time;
4472
4473                                if (diff_time >= timer_period) {
4474                                        print_stats();
4475                                        /* Reset the timer */
4476                                        diff_time = 0;
4477                                }
4478                                /* Sleep to avoid unnecessary checks */
4479                                prev_time = cur_time;
4480                                rte_delay_us_sleep(US_PER_S);
4481                        }
4482                }
4483
4484                printf("Press enter to exit\n");
4485                rc = read(0, &c, 1);
4486                pmd_test_exit();
4487                if (rc < 0)
4488                        return 1;
4489        }
4490
4491        ret = rte_eal_cleanup();
4492        if (ret != 0)
4493                rte_exit(EXIT_FAILURE,
4494                         "EAL cleanup failed: %s\n", strerror(-ret));
4495
4496        return EXIT_SUCCESS;
4497}
4498