dpdk/app/test-pmd/flowgen.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright 2014-2020 Mellanox Technologies, Ltd
   3 */
   4
   5#include <stdarg.h>
   6#include <string.h>
   7#include <stdio.h>
   8#include <errno.h>
   9#include <stdint.h>
  10#include <unistd.h>
  11#include <inttypes.h>
  12
  13#include <sys/queue.h>
  14#include <sys/stat.h>
  15
  16#include <rte_common.h>
  17#include <rte_byteorder.h>
  18#include <rte_log.h>
  19#include <rte_debug.h>
  20#include <rte_cycles.h>
  21#include <rte_memory.h>
  22#include <rte_memcpy.h>
  23#include <rte_launch.h>
  24#include <rte_eal.h>
  25#include <rte_per_lcore.h>
  26#include <rte_lcore.h>
  27#include <rte_atomic.h>
  28#include <rte_branch_prediction.h>
  29#include <rte_mempool.h>
  30#include <rte_mbuf.h>
  31#include <rte_interrupts.h>
  32#include <rte_pci.h>
  33#include <rte_ether.h>
  34#include <rte_ethdev.h>
  35#include <rte_ip.h>
  36#include <rte_tcp.h>
  37#include <rte_udp.h>
  38#include <rte_string_fns.h>
  39#include <rte_flow.h>
  40
  41#include "testpmd.h"
  42
  43/* hardcoded configuration (for now) */
  44static unsigned cfg_n_flows     = 1024;
  45static uint32_t cfg_ip_src      = RTE_IPV4(10, 254, 0, 0);
  46static uint32_t cfg_ip_dst      = RTE_IPV4(10, 253, 0, 0);
  47static uint16_t cfg_udp_src     = 1000;
  48static uint16_t cfg_udp_dst     = 1001;
  49static struct rte_ether_addr cfg_ether_src =
  50        {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }};
  51static struct rte_ether_addr cfg_ether_dst =
  52        {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }};
  53
  54#define IP_DEFTTL  64   /* from RFC 1340. */
  55
  56/* Use this type to inform GCC that ip_sum violates aliasing rules. */
  57typedef unaligned_uint16_t alias_int16_t __attribute__((__may_alias__));
  58
  59static inline uint16_t
  60ip_sum(const alias_int16_t *hdr, int hdr_len)
  61{
  62        uint32_t sum = 0;
  63
  64        while (hdr_len > 1)
  65        {
  66                sum += *hdr++;
  67                if (sum & 0x80000000)
  68                        sum = (sum & 0xFFFF) + (sum >> 16);
  69                hdr_len -= 2;
  70        }
  71
  72        while (sum >> 16)
  73                sum = (sum & 0xFFFF) + (sum >> 16);
  74
  75        return ~sum;
  76}
  77
  78/*
  79 * Multi-flow generation mode.
  80 *
  81 * We originate a bunch of flows (varying destination IP addresses), and
  82 * terminate receive traffic.  Received traffic is simply discarded, but we
  83 * still do so in order to maintain traffic statistics.
  84 */
  85static void
  86pkt_burst_flow_gen(struct fwd_stream *fs)
  87{
  88        unsigned pkt_size = tx_pkt_length - 4;  /* Adjust FCS */
  89        struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
  90        struct rte_mempool *mbp;
  91        struct rte_mbuf  *pkt = NULL;
  92        struct rte_ether_hdr *eth_hdr;
  93        struct rte_ipv4_hdr *ip_hdr;
  94        struct rte_udp_hdr *udp_hdr;
  95        uint16_t vlan_tci, vlan_tci_outer;
  96        uint64_t ol_flags = 0;
  97        uint16_t nb_rx;
  98        uint16_t nb_tx;
  99        uint16_t nb_pkt;
 100        uint16_t nb_clones = nb_pkt_flowgen_clones;
 101        uint16_t i;
 102        uint32_t retry;
 103        uint64_t tx_offloads;
 104        uint64_t start_tsc = 0;
 105        static int next_flow = 0;
 106
 107        get_start_cycles(&start_tsc);
 108
 109        /* Receive a burst of packets and discard them. */
 110        nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
 111                                 nb_pkt_per_burst);
 112        fs->rx_packets += nb_rx;
 113
 114        for (i = 0; i < nb_rx; i++)
 115                rte_pktmbuf_free(pkts_burst[i]);
 116
 117        mbp = current_fwd_lcore()->mbp;
 118        vlan_tci = ports[fs->tx_port].tx_vlan_id;
 119        vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
 120
 121        tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
 122        if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
 123                ol_flags |= PKT_TX_VLAN_PKT;
 124        if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
 125                ol_flags |= PKT_TX_QINQ_PKT;
 126        if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
 127                ol_flags |= PKT_TX_MACSEC;
 128
 129        for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 130                if (!nb_pkt || !nb_clones) {
 131                        nb_clones = nb_pkt_flowgen_clones;
 132                        /* Logic limitation */
 133                        if (nb_clones > nb_pkt_per_burst)
 134                                nb_clones = nb_pkt_per_burst;
 135
 136                        pkt = rte_mbuf_raw_alloc(mbp);
 137                        if (!pkt)
 138                                break;
 139
 140                        pkt->data_len = pkt_size;
 141                        pkt->next = NULL;
 142
 143                        /* Initialize Ethernet header. */
 144                        eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
 145                        rte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
 146                        rte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
 147                        eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 148
 149                        /* Initialize IP header. */
 150                        ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
 151                        memset(ip_hdr, 0, sizeof(*ip_hdr));
 152                        ip_hdr->version_ihl     = RTE_IPV4_VHL_DEF;
 153                        ip_hdr->type_of_service = 0;
 154                        ip_hdr->fragment_offset = 0;
 155                        ip_hdr->time_to_live    = IP_DEFTTL;
 156                        ip_hdr->next_proto_id   = IPPROTO_UDP;
 157                        ip_hdr->packet_id       = 0;
 158                        ip_hdr->src_addr        = rte_cpu_to_be_32(cfg_ip_src);
 159                        ip_hdr->dst_addr        = rte_cpu_to_be_32(cfg_ip_dst +
 160                                                                   next_flow);
 161                        ip_hdr->total_length    = RTE_CPU_TO_BE_16(pkt_size -
 162                                                                   sizeof(*eth_hdr));
 163                        ip_hdr->hdr_checksum    = ip_sum((const alias_int16_t *)ip_hdr,
 164                                                         sizeof(*ip_hdr));
 165
 166                        /* Initialize UDP header. */
 167                        udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
 168                        udp_hdr->src_port       = rte_cpu_to_be_16(cfg_udp_src);
 169                        udp_hdr->dst_port       = rte_cpu_to_be_16(cfg_udp_dst);
 170                        udp_hdr->dgram_cksum    = 0; /* No UDP checksum. */
 171                        udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_size -
 172                                                                   sizeof(*eth_hdr) -
 173                                                                   sizeof(*ip_hdr));
 174                        pkt->nb_segs            = 1;
 175                        pkt->pkt_len            = pkt_size;
 176                        pkt->ol_flags           &= EXT_ATTACHED_MBUF;
 177                        pkt->ol_flags           |= ol_flags;
 178                        pkt->vlan_tci           = vlan_tci;
 179                        pkt->vlan_tci_outer     = vlan_tci_outer;
 180                        pkt->l2_len             = sizeof(struct rte_ether_hdr);
 181                        pkt->l3_len             = sizeof(struct rte_ipv4_hdr);
 182                } else {
 183                        nb_clones--;
 184                        rte_mbuf_refcnt_update(pkt, 1);
 185                }
 186                pkts_burst[nb_pkt] = pkt;
 187
 188                next_flow = (next_flow + 1) % cfg_n_flows;
 189        }
 190
 191        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
 192        /*
 193         * Retry if necessary
 194         */
 195        if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
 196                retry = 0;
 197                while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
 198                        rte_delay_us(burst_tx_delay_time);
 199                        nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
 200                                        &pkts_burst[nb_tx], nb_rx - nb_tx);
 201                }
 202        }
 203        fs->tx_packets += nb_tx;
 204
 205        inc_tx_burst_stats(fs, nb_tx);
 206        if (unlikely(nb_tx < nb_pkt)) {
 207                /* Back out the flow counter. */
 208                next_flow -= (nb_pkt - nb_tx);
 209                while (next_flow < 0)
 210                        next_flow += cfg_n_flows;
 211
 212                do {
 213                        rte_pktmbuf_free(pkts_burst[nb_tx]);
 214                } while (++nb_tx < nb_pkt);
 215        }
 216
 217        get_end_cycles(fs, start_tsc);
 218}
 219
 220struct fwd_engine flow_gen_engine = {
 221        .fwd_mode_name  = "flowgen",
 222        .port_fwd_begin = NULL,
 223        .port_fwd_end   = NULL,
 224        .packet_fwd     = pkt_burst_flow_gen,
 225};
 226