dpdk/app/test/test_pmd_perf.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2014 Intel Corporation
   3 */
   4
   5
   6#include <stdio.h>
   7#include <inttypes.h>
   8#include <signal.h>
   9#include <unistd.h>
  10#include <rte_cycles.h>
  11#include <rte_ethdev.h>
  12#include <rte_byteorder.h>
  13#include <rte_malloc.h>
  14#include "packet_burst_generator.h"
  15#include "test.h"
  16
  17#define NB_ETHPORTS_USED                (1)
  18#define NB_SOCKETS                      (2)
  19#define MEMPOOL_CACHE_SIZE 250
  20#define MAX_PKT_BURST                   (32)
  21#define RTE_TEST_RX_DESC_DEFAULT        (1024)
  22#define RTE_TEST_TX_DESC_DEFAULT        (1024)
  23#define RTE_PORT_ALL            (~(uint16_t)0x0)
  24
  25/* how long test would take at full line rate */
  26#define RTE_TEST_DURATION                (2)
  27
  28/*
  29 * RX and TX Prefetch, Host, and Write-back threshold values should be
  30 * carefully set for optimal performance. Consult the network
  31 * controller's datasheet and supporting DPDK documentation for guidance
  32 * on how these parameters should be set.
  33 */
  34#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
  35#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
  36#define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
  37
  38/*
  39 * These default values are optimized for use with the Intel(R) 82599 10 GbE
  40 * Controller and the DPDK ixgbe PMD. Consider using other values for other
  41 * network controllers and/or network drivers.
  42 */
  43#define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
  44#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
  45#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
  46
  47#define MAX_TRAFFIC_BURST              2048
  48
  49#define NB_MBUF RTE_MAX(                                                \
  50                (unsigned)(nb_ports*nb_rx_queue*nb_rxd +                \
  51                           nb_ports*nb_lcores*MAX_PKT_BURST +           \
  52                           nb_ports*nb_tx_queue*nb_txd +                \
  53                           nb_lcores*MEMPOOL_CACHE_SIZE +               \
  54                           nb_ports*MAX_TRAFFIC_BURST),                 \
  55                        (unsigned)8192)
  56
  57
  58static struct rte_mempool *mbufpool[NB_SOCKETS];
  59/* ethernet addresses of ports */
  60static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
  61
  62static struct rte_eth_conf port_conf = {
  63        .rxmode = {
  64                .mq_mode = RTE_ETH_MQ_RX_NONE,
  65                .split_hdr_size = 0,
  66        },
  67        .txmode = {
  68                .mq_mode = RTE_ETH_MQ_TX_NONE,
  69        },
  70        .lpbk_mode = 1,  /* enable loopback */
  71};
  72
  73static struct rte_eth_rxconf rx_conf = {
  74        .rx_thresh = {
  75                .pthresh = RX_PTHRESH,
  76                .hthresh = RX_HTHRESH,
  77                .wthresh = RX_WTHRESH,
  78        },
  79        .rx_free_thresh = 32,
  80};
  81
  82static struct rte_eth_txconf tx_conf = {
  83        .tx_thresh = {
  84                .pthresh = TX_PTHRESH,
  85                .hthresh = TX_HTHRESH,
  86                .wthresh = TX_WTHRESH,
  87        },
  88        .tx_free_thresh = 32, /* Use PMD default values */
  89        .tx_rs_thresh = 32, /* Use PMD default values */
  90};
  91
  92enum {
  93        LCORE_INVALID = 0,
  94        LCORE_AVAIL,
  95        LCORE_USED,
  96};
  97
  98struct lcore_conf {
  99        uint8_t status;
 100        uint8_t socketid;
 101        uint16_t nb_ports;
 102        uint16_t portlist[RTE_MAX_ETHPORTS];
 103} __rte_cache_aligned;
 104
 105struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 106
 107static uint64_t link_mbps;
 108
 109enum {
 110        SC_CONTINUOUS = 0,
 111        SC_BURST_POLL_FIRST,
 112        SC_BURST_XMIT_FIRST,
 113};
 114
 115static uint32_t sc_flag;
 116
 117/* Check the link status of all ports in up to 3s, and print them finally */
 118static void
 119check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 120{
 121#define CHECK_INTERVAL 100 /* 100ms */
 122#define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
 123        uint16_t portid;
 124        uint8_t count, all_ports_up, print_flag = 0;
 125        struct rte_eth_link link;
 126        int ret;
 127        char link_status[RTE_ETH_LINK_MAX_STR_LEN];
 128
 129        printf("Checking link statuses...\n");
 130        fflush(stdout);
 131        for (count = 0; count <= MAX_CHECK_TIME; count++) {
 132                all_ports_up = 1;
 133                for (portid = 0; portid < port_num; portid++) {
 134                        if ((port_mask & (1 << portid)) == 0)
 135                                continue;
 136                        memset(&link, 0, sizeof(link));
 137                        ret = rte_eth_link_get_nowait(portid, &link);
 138                        if (ret < 0) {
 139                                all_ports_up = 0;
 140                                if (print_flag == 1)
 141                                        printf("Port %u link get failed: %s\n",
 142                                                portid, rte_strerror(-ret));
 143                                continue;
 144                        }
 145
 146                        /* print link status if flag set */
 147                        if (print_flag == 1) {
 148                                if (link.link_status && link_mbps == 0)
 149                                        link_mbps = link.link_speed;
 150
 151                                rte_eth_link_to_str(link_status,
 152                                        sizeof(link_status), &link);
 153                                printf("Port %d %s\n", portid, link_status);
 154                                continue;
 155                        }
 156                        /* clear all_ports_up flag if any link down */
 157                        if (link.link_status == RTE_ETH_LINK_DOWN) {
 158                                all_ports_up = 0;
 159                                break;
 160                        }
 161                }
 162                /* after finally printing all link status, get out */
 163                if (print_flag == 1)
 164                        break;
 165
 166                if (all_ports_up == 0) {
 167                        fflush(stdout);
 168                        rte_delay_ms(CHECK_INTERVAL);
 169                }
 170
 171                /* set the print_flag if all ports up or timeout */
 172                if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
 173                        print_flag = 1;
 174        }
 175}
 176
 177static void
 178print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
 179{
 180        char buf[RTE_ETHER_ADDR_FMT_SIZE];
 181        rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
 182        printf("%s%s", name, buf);
 183}
 184
 185static int
 186init_traffic(struct rte_mempool *mp,
 187             struct rte_mbuf **pkts_burst, uint32_t burst_size)
 188{
 189        struct rte_ether_hdr pkt_eth_hdr;
 190        struct rte_ipv4_hdr pkt_ipv4_hdr;
 191        struct rte_udp_hdr pkt_udp_hdr;
 192        uint32_t pktlen;
 193        static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
 194        static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
 195
 196
 197        initialize_eth_header(&pkt_eth_hdr,
 198                (struct rte_ether_addr *)src_mac,
 199                (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
 200
 201        pktlen = initialize_ipv4_header(&pkt_ipv4_hdr,
 202                                        IPV4_ADDR(10, 0, 0, 1),
 203                                        IPV4_ADDR(10, 0, 0, 2), 26);
 204        printf("IPv4 pktlen %u\n", pktlen);
 205
 206        pktlen = initialize_udp_header(&pkt_udp_hdr, 0, 0, 18);
 207
 208        printf("UDP pktlen %u\n", pktlen);
 209
 210        return generate_packet_burst(mp, pkts_burst, &pkt_eth_hdr,
 211                                     0, &pkt_ipv4_hdr, 1,
 212                                     &pkt_udp_hdr, burst_size,
 213                                     PACKET_BURST_GEN_PKT_LEN, 1);
 214}
 215
 216static int
 217init_lcores(void)
 218{
 219        unsigned lcore_id;
 220
 221        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 222                lcore_conf[lcore_id].socketid =
 223                        rte_lcore_to_socket_id(lcore_id);
 224                if (rte_lcore_is_enabled(lcore_id) == 0) {
 225                        lcore_conf[lcore_id].status = LCORE_INVALID;
 226                        continue;
 227                } else
 228                        lcore_conf[lcore_id].status = LCORE_AVAIL;
 229        }
 230        return 0;
 231}
 232
 233static int
 234init_mbufpool(unsigned nb_mbuf)
 235{
 236        int socketid;
 237        unsigned lcore_id;
 238        char s[64];
 239
 240        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 241                if (rte_lcore_is_enabled(lcore_id) == 0)
 242                        continue;
 243
 244                socketid = rte_lcore_to_socket_id(lcore_id);
 245                if (socketid >= NB_SOCKETS) {
 246                        rte_exit(EXIT_FAILURE,
 247                                "Socket %d of lcore %u is out of range %d\n",
 248                                socketid, lcore_id, NB_SOCKETS);
 249                }
 250                if (mbufpool[socketid] == NULL) {
 251                        snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
 252                        mbufpool[socketid] =
 253                                rte_pktmbuf_pool_create(s, nb_mbuf,
 254                                        MEMPOOL_CACHE_SIZE, 0,
 255                                        RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
 256                        if (mbufpool[socketid] == NULL)
 257                                rte_exit(EXIT_FAILURE,
 258                                        "Cannot init mbuf pool on socket %d\n",
 259                                        socketid);
 260                        else
 261                                printf("Allocated mbuf pool on socket %d\n",
 262                                        socketid);
 263                }
 264        }
 265        return 0;
 266}
 267
 268static uint16_t
 269alloc_lcore(uint16_t socketid)
 270{
 271        unsigned lcore_id;
 272
 273        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 274                if (LCORE_AVAIL != lcore_conf[lcore_id].status ||
 275                    lcore_conf[lcore_id].socketid != socketid ||
 276                    lcore_id == rte_get_main_lcore())
 277                        continue;
 278                lcore_conf[lcore_id].status = LCORE_USED;
 279                lcore_conf[lcore_id].nb_ports = 0;
 280                return lcore_id;
 281        }
 282
 283        return (uint16_t)-1;
 284}
 285
 286static volatile uint64_t stop;
 287static uint64_t count;
 288static uint64_t drop;
 289static uint64_t idle;
 290
 291static void
 292reset_count(void)
 293{
 294        count = 0;
 295        drop = 0;
 296        idle = 0;
 297}
 298
 299#ifndef RTE_EXEC_ENV_WINDOWS
 300static void
 301stats_display(uint16_t port_id)
 302{
 303        struct rte_eth_stats stats;
 304        rte_eth_stats_get(port_id, &stats);
 305
 306        printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
 307               "%-"PRIu64"\n",
 308               stats.ipackets, stats.imissed, stats.ibytes);
 309        printf("  RX-errors: %-10"PRIu64" RX-nombuf:  %-10"PRIu64"\n",
 310               stats.ierrors, stats.rx_nombuf);
 311        printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
 312               "%-"PRIu64"\n",
 313               stats.opackets, stats.oerrors, stats.obytes);
 314}
 315
 316static void
 317signal_handler(int signum)
 318{
 319        /*  USR1 signal, stop testing */
 320        if (signum == SIGUSR1) {
 321                printf("Force Stop!\n");
 322                stop = 1;
 323        }
 324
 325        /*  USR2 signal, print stats */
 326        if (signum == SIGUSR2)
 327                stats_display(0);
 328}
 329#endif
 330
 331struct rte_mbuf **tx_burst;
 332
 333uint64_t (*do_measure)(struct lcore_conf *conf,
 334                       struct rte_mbuf *pkts_burst[],
 335                       uint64_t total_pkts);
 336
 337static uint64_t
 338measure_rxtx(struct lcore_conf *conf,
 339             struct rte_mbuf *pkts_burst[],
 340             uint64_t total_pkts)
 341{
 342        unsigned i, portid, nb_rx, nb_tx;
 343        uint64_t prev_tsc, cur_tsc;
 344
 345        prev_tsc = rte_rdtsc();
 346
 347        while (likely(!stop)) {
 348                for (i = 0; i < conf->nb_ports; i++) {
 349                        portid = conf->portlist[i];
 350                        nb_rx = rte_eth_rx_burst(portid, 0,
 351                                                 pkts_burst, MAX_PKT_BURST);
 352                        if (unlikely(nb_rx == 0)) {
 353                                idle++;
 354                                continue;
 355                        }
 356
 357                        count += nb_rx;
 358                        nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, nb_rx);
 359                        if (unlikely(nb_tx < nb_rx)) {
 360                                drop += (nb_rx - nb_tx);
 361                                do {
 362                                        rte_pktmbuf_free(pkts_burst[nb_tx]);
 363                                } while (++nb_tx < nb_rx);
 364                        }
 365                }
 366                if (unlikely(count >= total_pkts))
 367                        break;
 368        }
 369
 370        cur_tsc = rte_rdtsc();
 371
 372        return cur_tsc - prev_tsc;
 373}
 374
 375static uint64_t
 376measure_rxonly(struct lcore_conf *conf,
 377               struct rte_mbuf *pkts_burst[],
 378               uint64_t total_pkts)
 379{
 380        unsigned i, portid, nb_rx, nb_tx;
 381        uint64_t diff_tsc, cur_tsc;
 382
 383        diff_tsc = 0;
 384        while (likely(!stop)) {
 385                for (i = 0; i < conf->nb_ports; i++) {
 386                        portid = conf->portlist[i];
 387
 388                        cur_tsc = rte_rdtsc();
 389                        nb_rx = rte_eth_rx_burst(portid, 0,
 390                                                 pkts_burst, MAX_PKT_BURST);
 391                        if (unlikely(nb_rx == 0)) {
 392                                idle++;
 393                                continue;
 394                        }
 395                        diff_tsc += rte_rdtsc() - cur_tsc;
 396
 397                        count += nb_rx;
 398                        nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, nb_rx);
 399                        if (unlikely(nb_tx < nb_rx)) {
 400                                drop += (nb_rx - nb_tx);
 401                                do {
 402                                        rte_pktmbuf_free(pkts_burst[nb_tx]);
 403                                } while (++nb_tx < nb_rx);
 404                        }
 405                }
 406                if (unlikely(count >= total_pkts))
 407                        break;
 408        }
 409
 410        return diff_tsc;
 411}
 412
 413static uint64_t
 414measure_txonly(struct lcore_conf *conf,
 415               struct rte_mbuf *pkts_burst[],
 416               uint64_t total_pkts)
 417{
 418        unsigned i, portid, nb_rx, nb_tx;
 419        uint64_t diff_tsc, cur_tsc;
 420
 421        printf("do tx measure\n");
 422        diff_tsc = 0;
 423        while (likely(!stop)) {
 424                for (i = 0; i < conf->nb_ports; i++) {
 425                        portid = conf->portlist[i];
 426                        nb_rx = rte_eth_rx_burst(portid, 0,
 427                                                 pkts_burst, MAX_PKT_BURST);
 428                        if (unlikely(nb_rx == 0)) {
 429                                idle++;
 430                                continue;
 431                        }
 432
 433                        count += nb_rx;
 434
 435                        cur_tsc = rte_rdtsc();
 436                        nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, nb_rx);
 437                        if (unlikely(nb_tx < nb_rx)) {
 438                                drop += (nb_rx - nb_tx);
 439                                do {
 440                                        rte_pktmbuf_free(pkts_burst[nb_tx]);
 441                                } while (++nb_tx < nb_rx);
 442                        }
 443                        diff_tsc += rte_rdtsc() - cur_tsc;
 444                }
 445                if (unlikely(count >= total_pkts))
 446                        break;
 447        }
 448
 449        return diff_tsc;
 450}
 451
 452/* main processing loop */
 453static int
 454main_loop(__rte_unused void *args)
 455{
 456#define PACKET_SIZE 64
 457#define FRAME_GAP 12
 458#define MAC_PREAMBLE 8
 459#define MAX_RETRY_COUNT 5
 460        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 461        unsigned lcore_id;
 462        unsigned i, portid, nb_rx = 0, nb_tx = 0;
 463        struct lcore_conf *conf;
 464        int pkt_per_port;
 465        uint64_t diff_tsc;
 466        uint64_t packets_per_second, total_packets;
 467        int retry_cnt = 0;
 468        int free_pkt = 0;
 469
 470        lcore_id = rte_lcore_id();
 471        conf = &lcore_conf[lcore_id];
 472        if (conf->status != LCORE_USED)
 473                return 0;
 474
 475        pkt_per_port = MAX_TRAFFIC_BURST;
 476
 477        int idx = 0;
 478        for (i = 0; i < conf->nb_ports; i++) {
 479                int num = pkt_per_port;
 480                portid = conf->portlist[i];
 481                printf("inject %d packet to port %d\n", num, portid);
 482                while (num) {
 483                        nb_tx = RTE_MIN(MAX_PKT_BURST, num);
 484                        nb_tx = rte_eth_tx_burst(portid, 0,
 485                                                &tx_burst[idx], nb_tx);
 486                        if (nb_tx == 0)
 487                                retry_cnt++;
 488                        num -= nb_tx;
 489                        idx += nb_tx;
 490                        if (retry_cnt == MAX_RETRY_COUNT) {
 491                                retry_cnt = 0;
 492                                break;
 493                        }
 494                }
 495        }
 496        for (free_pkt = idx; free_pkt < (MAX_TRAFFIC_BURST * conf->nb_ports);
 497                        free_pkt++)
 498                rte_pktmbuf_free(tx_burst[free_pkt]);
 499        printf("Total packets inject to prime ports = %u\n", idx);
 500
 501        packets_per_second = (link_mbps * 1000 * 1000) /
 502                ((PACKET_SIZE + FRAME_GAP + MAC_PREAMBLE) * CHAR_BIT);
 503        printf("Each port will do %"PRIu64" packets per second\n",
 504               packets_per_second);
 505
 506        total_packets = RTE_TEST_DURATION * conf->nb_ports * packets_per_second;
 507        printf("Test will stop after at least %"PRIu64" packets received\n",
 508                + total_packets);
 509
 510        diff_tsc = do_measure(conf, pkts_burst, total_packets);
 511
 512        for (i = 0; i < conf->nb_ports; i++) {
 513                portid = conf->portlist[i];
 514                int nb_free = 0;
 515                uint64_t timeout = 10000;
 516                do { /* dry out */
 517                        nb_rx = rte_eth_rx_burst(portid, 0,
 518                                                 pkts_burst, MAX_PKT_BURST);
 519                        nb_tx = 0;
 520                        while (nb_tx < nb_rx)
 521                                rte_pktmbuf_free(pkts_burst[nb_tx++]);
 522                        nb_free += nb_rx;
 523
 524                        if (unlikely(nb_rx == 0))
 525                                timeout--;
 526                } while (nb_free != pkt_per_port && timeout != 0);
 527                printf("free %d (expected %d) mbuf left in port %u\n", nb_free,
 528                       pkt_per_port, portid);
 529        }
 530
 531        if (count == 0)
 532                return -1;
 533
 534        printf("%"PRIu64" packet, %"PRIu64" drop, %"PRIu64" idle\n",
 535               count, drop, idle);
 536        printf("Result: %"PRIu64" cycles per packet\n", diff_tsc / count);
 537
 538        return 0;
 539}
 540
 541static uint64_t start;
 542
 543static inline int
 544poll_burst(void *args)
 545{
 546#define MAX_IDLE           (10000)
 547        unsigned lcore_id;
 548        struct rte_mbuf **pkts_burst;
 549        uint64_t diff_tsc, cur_tsc;
 550        uint16_t next[RTE_MAX_ETHPORTS];
 551        struct lcore_conf *conf;
 552        uint32_t pkt_per_port = *((uint32_t *)args);
 553        unsigned i, portid, nb_rx = 0;
 554        uint64_t total;
 555        uint64_t timeout = MAX_IDLE;
 556        int num[RTE_MAX_ETHPORTS];
 557
 558        lcore_id = rte_lcore_id();
 559        conf = &lcore_conf[lcore_id];
 560        if (conf->status != LCORE_USED)
 561                return 0;
 562
 563        total = pkt_per_port * conf->nb_ports;
 564        printf("start to receive total expect %"PRIu64"\n", total);
 565
 566        pkts_burst = (struct rte_mbuf **)
 567                rte_calloc_socket("poll_burst",
 568                                  total, sizeof(void *),
 569                                  RTE_CACHE_LINE_SIZE, conf->socketid);
 570        if (!pkts_burst)
 571                return -1;
 572
 573        for (i = 0; i < conf->nb_ports; i++) {
 574                portid = conf->portlist[i];
 575                next[portid] = i * pkt_per_port;
 576                num[portid] = pkt_per_port;
 577        }
 578
 579        rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
 580
 581        cur_tsc = rte_rdtsc();
 582        while (total) {
 583                for (i = 0; i < conf->nb_ports; i++) {
 584                        portid = conf->portlist[i];
 585                        nb_rx = rte_eth_rx_burst(portid, 0,
 586                                        &pkts_burst[next[portid]],
 587                                        RTE_MIN(MAX_PKT_BURST, num[portid]));
 588                        if (unlikely(nb_rx == 0)) {
 589                                timeout--;
 590                                if (unlikely(timeout == 0))
 591                                        goto timeout;
 592                                continue;
 593                        }
 594                        next[portid] += nb_rx;
 595                        num[portid] -= nb_rx;
 596                        total -= nb_rx;
 597                }
 598        }
 599timeout:
 600        diff_tsc = rte_rdtsc() - cur_tsc;
 601
 602        printf("%"PRIu64" packets lost, IDLE %"PRIu64" times\n",
 603               total, MAX_IDLE - timeout);
 604        /* clean up */
 605        total = pkt_per_port * conf->nb_ports - total;
 606        for (i = 0; i < total; i++)
 607                rte_pktmbuf_free(pkts_burst[i]);
 608
 609        rte_free(pkts_burst);
 610
 611        if (total > 0)
 612                return diff_tsc / total;
 613        else
 614                return -1;
 615}
 616
 617static int
 618exec_burst(uint32_t flags, int lcore)
 619{
 620        unsigned int portid, nb_tx = 0;
 621        struct lcore_conf *conf;
 622        uint32_t pkt_per_port;
 623        int num, i, idx = 0;
 624        int diff_tsc;
 625
 626        conf = &lcore_conf[lcore];
 627
 628        pkt_per_port = MAX_TRAFFIC_BURST;
 629        num = pkt_per_port * conf->nb_ports;
 630
 631        /* only when polling first */
 632        if (flags == SC_BURST_POLL_FIRST)
 633                __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
 634        else
 635                __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
 636
 637        /* start polling thread
 638         * if in POLL_FIRST mode, poll once launched;
 639         * otherwise, not actually poll yet
 640         */
 641        rte_eal_remote_launch(poll_burst,
 642                              (void *)&pkt_per_port, lcore);
 643
 644        /* start xmit */
 645        i = 0;
 646        while (num) {
 647                nb_tx = RTE_MIN(MAX_PKT_BURST, num);
 648                portid = conf->portlist[i];
 649                nb_tx = rte_eth_tx_burst(portid, 0, &tx_burst[idx], nb_tx);
 650                idx += nb_tx;
 651                num -= nb_tx;
 652                i = (i >= conf->nb_ports - 1) ? 0 : (i + 1);
 653        }
 654
 655        rte_delay_us(5 * US_PER_S);
 656
 657        /* only when polling second  */
 658        if (flags == SC_BURST_XMIT_FIRST)
 659                __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
 660
 661        /* wait for polling finished */
 662        diff_tsc = rte_eal_wait_lcore(lcore);
 663        if (diff_tsc < 0) {
 664                printf("exec_burst: Failed to measure cycles per packet\n");
 665                return -1;
 666        }
 667
 668        printf("Result: %d cycles per packet\n", diff_tsc);
 669
 670        return 0;
 671}
 672
 673static int
 674test_pmd_perf(void)
 675{
 676        uint16_t nb_ports, num, nb_lcores, worker_id = (uint16_t)-1;
 677        uint16_t nb_rxd = MAX_TRAFFIC_BURST;
 678        uint16_t nb_txd = MAX_TRAFFIC_BURST;
 679        uint16_t portid;
 680        uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
 681        int socketid = -1;
 682        int ret;
 683
 684        printf("Start PMD RXTX cycles cost test.\n");
 685
 686#ifndef RTE_EXEC_ENV_WINDOWS
 687        signal(SIGUSR1, signal_handler);
 688        signal(SIGUSR2, signal_handler);
 689#endif
 690
 691        nb_ports = rte_eth_dev_count_avail();
 692        if (nb_ports < NB_ETHPORTS_USED) {
 693                printf("At least %u port(s) used for perf. test\n",
 694                       NB_ETHPORTS_USED);
 695                return -1;
 696        }
 697
 698        nb_lcores = rte_lcore_count();
 699
 700        memset(lcore_conf, 0, sizeof(lcore_conf));
 701        init_lcores();
 702
 703        init_mbufpool(NB_MBUF);
 704
 705        if (sc_flag == SC_CONTINUOUS) {
 706                nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
 707                nb_txd = RTE_TEST_TX_DESC_DEFAULT;
 708        }
 709        printf("CONFIG RXD=%d TXD=%d\n", nb_rxd, nb_txd);
 710
 711        reset_count();
 712        num = 0;
 713        RTE_ETH_FOREACH_DEV(portid) {
 714                if (socketid == -1) {
 715                        socketid = rte_eth_dev_socket_id(portid);
 716                        worker_id = alloc_lcore(socketid);
 717                        if (worker_id == (uint16_t)-1) {
 718                                printf("No avail lcore to run test\n");
 719                                return -1;
 720                        }
 721                        printf("Performance test runs on lcore %u socket %u\n",
 722                               worker_id, socketid);
 723                }
 724
 725                if (socketid != rte_eth_dev_socket_id(portid)) {
 726                        printf("Skip port %d\n", portid);
 727                        continue;
 728                }
 729
 730                /* port configure */
 731                ret = rte_eth_dev_configure(portid, nb_rx_queue,
 732                                            nb_tx_queue, &port_conf);
 733                if (ret < 0)
 734                        rte_exit(EXIT_FAILURE,
 735                                "Cannot configure device: err=%d, port=%d\n",
 736                                 ret, portid);
 737
 738                ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
 739                if (ret < 0)
 740                        rte_exit(EXIT_FAILURE,
 741                                "Cannot get mac address: err=%d, port=%d\n",
 742                                 ret, portid);
 743
 744                printf("Port %u ", portid);
 745                print_ethaddr("Address:", &ports_eth_addr[portid]);
 746                printf("\n");
 747
 748                /* tx queue setup */
 749                ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
 750                                             socketid, &tx_conf);
 751                if (ret < 0)
 752                        rte_exit(EXIT_FAILURE,
 753                                "rte_eth_tx_queue_setup: err=%d, "
 754                                "port=%d\n", ret, portid);
 755
 756                /* rx queue steup */
 757                ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
 758                                                socketid, &rx_conf,
 759                                                mbufpool[socketid]);
 760                if (ret < 0)
 761                        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
 762                                 "port=%d\n", ret, portid);
 763
 764                /* Start device */
 765                stop = 0;
 766                ret = rte_eth_dev_start(portid);
 767                if (ret < 0)
 768                        rte_exit(EXIT_FAILURE,
 769                                "rte_eth_dev_start: err=%d, port=%d\n",
 770                                ret, portid);
 771
 772                /* always enable promiscuous */
 773                ret = rte_eth_promiscuous_enable(portid);
 774                if (ret != 0)
 775                        rte_exit(EXIT_FAILURE,
 776                                 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
 777                                 rte_strerror(-ret), portid);
 778
 779                lcore_conf[worker_id].portlist[num++] = portid;
 780                lcore_conf[worker_id].nb_ports++;
 781        }
 782        check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
 783
 784        if (tx_burst == NULL) {
 785                tx_burst = (struct rte_mbuf **)
 786                        rte_calloc_socket("tx_buff",
 787                                          MAX_TRAFFIC_BURST * nb_ports,
 788                                          sizeof(void *),
 789                                          RTE_CACHE_LINE_SIZE, socketid);
 790                if (!tx_burst)
 791                        return -1;
 792        }
 793
 794        init_traffic(mbufpool[socketid],
 795                     tx_burst, MAX_TRAFFIC_BURST * nb_ports);
 796
 797        printf("Generate %d packets @socket %d\n",
 798               MAX_TRAFFIC_BURST * nb_ports, socketid);
 799
 800        if (sc_flag == SC_CONTINUOUS) {
 801                /* do both rxtx by default */
 802                if (NULL == do_measure)
 803                        do_measure = measure_rxtx;
 804
 805                rte_eal_remote_launch(main_loop, NULL, worker_id);
 806
 807                if (rte_eal_wait_lcore(worker_id) < 0)
 808                        return -1;
 809        } else if (sc_flag == SC_BURST_POLL_FIRST ||
 810                   sc_flag == SC_BURST_XMIT_FIRST)
 811                if (exec_burst(sc_flag, worker_id) < 0)
 812                        return -1;
 813
 814        /* port tear down */
 815        RTE_ETH_FOREACH_DEV(portid) {
 816                if (socketid != rte_eth_dev_socket_id(portid))
 817                        continue;
 818
 819                ret = rte_eth_dev_stop(portid);
 820                if (ret != 0)
 821                        printf("rte_eth_dev_stop: err=%s, port=%u\n",
 822                               rte_strerror(-ret), portid);
 823        }
 824
 825        return 0;
 826}
 827
 828int
 829test_set_rxtx_conf(cmdline_fixed_string_t mode)
 830{
 831        printf("mode switch to %s\n", mode);
 832
 833        if (!strcmp(mode, "vector")) {
 834                /* vector rx, tx */
 835                tx_conf.tx_rs_thresh = 32;
 836                tx_conf.tx_free_thresh = 32;
 837                return 0;
 838        } else if (!strcmp(mode, "scalar")) {
 839                /* bulk alloc rx, full-featured tx */
 840                tx_conf.tx_rs_thresh = 32;
 841                tx_conf.tx_free_thresh = 32;
 842                port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 843                return 0;
 844        } else if (!strcmp(mode, "hybrid")) {
 845                /* bulk alloc rx, vector tx
 846                 * when vec macro not define,
 847                 * using the same rx/tx as scalar
 848                 */
 849                tx_conf.tx_rs_thresh = 32;
 850                tx_conf.tx_free_thresh = 32;
 851                port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
 852                return 0;
 853        } else if (!strcmp(mode, "full")) {
 854                /* full feature rx,tx pair */
 855                tx_conf.tx_rs_thresh = 32;
 856                tx_conf.tx_free_thresh = 32;
 857                port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 858                return 0;
 859        }
 860
 861        return -1;
 862}
 863
 864int
 865test_set_rxtx_anchor(cmdline_fixed_string_t type)
 866{
 867        printf("type switch to %s\n", type);
 868
 869        if (!strcmp(type, "rxtx")) {
 870                do_measure = measure_rxtx;
 871                return 0;
 872        } else if (!strcmp(type, "rxonly")) {
 873                do_measure = measure_rxonly;
 874                return 0;
 875        } else if (!strcmp(type, "txonly")) {
 876                do_measure = measure_txonly;
 877                return 0;
 878        }
 879
 880        return -1;
 881}
 882
 883int
 884test_set_rxtx_sc(cmdline_fixed_string_t type)
 885{
 886        printf("stream control switch to %s\n", type);
 887
 888        if (!strcmp(type, "continuous")) {
 889                sc_flag = SC_CONTINUOUS;
 890                return 0;
 891        } else if (!strcmp(type, "poll_before_xmit")) {
 892                sc_flag = SC_BURST_POLL_FIRST;
 893                return 0;
 894        } else if (!strcmp(type, "poll_after_xmit")) {
 895                sc_flag = SC_BURST_XMIT_FIRST;
 896                return 0;
 897        }
 898
 899        return -1;
 900}
 901
 902REGISTER_TEST_COMMAND(pmd_perf_autotest, test_pmd_perf);
 903