dpdk/lib/port/rte_port_ras.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2014 Intel Corporation
   3 */
   4#include <string.h>
   5
   6#include <rte_ip_frag.h>
   7#include <rte_cycles.h>
   8#include <rte_log.h>
   9
  10#include "rte_port_ras.h"
  11
  12#ifndef RTE_PORT_RAS_N_BUCKETS
  13#define RTE_PORT_RAS_N_BUCKETS                                 4094
  14#endif
  15
  16#ifndef RTE_PORT_RAS_N_ENTRIES_PER_BUCKET
  17#define RTE_PORT_RAS_N_ENTRIES_PER_BUCKET                      8
  18#endif
  19
  20#ifndef RTE_PORT_RAS_N_ENTRIES
  21#define RTE_PORT_RAS_N_ENTRIES (RTE_PORT_RAS_N_BUCKETS * RTE_PORT_RAS_N_ENTRIES_PER_BUCKET)
  22#endif
  23
  24#ifdef RTE_PORT_STATS_COLLECT
  25
  26#define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(port, val) \
  27        port->stats.n_pkts_in += val
  28#define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(port, val) \
  29        port->stats.n_pkts_drop += val
  30
  31#else
  32
  33#define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(port, val)
  34#define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(port, val)
  35
  36#endif
  37
  38struct rte_port_ring_writer_ras;
  39
  40typedef void (*ras_op)(
  41                struct rte_port_ring_writer_ras *p,
  42                struct rte_mbuf *pkt);
  43
  44static void
  45process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt);
  46static void
  47process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt);
  48
  49struct rte_port_ring_writer_ras {
  50        struct rte_port_out_stats stats;
  51
  52        struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
  53        struct rte_ring *ring;
  54        uint32_t tx_burst_sz;
  55        uint32_t tx_buf_count;
  56        struct rte_ip_frag_tbl *frag_tbl;
  57        struct rte_ip_frag_death_row death_row;
  58
  59        ras_op f_ras;
  60};
  61
  62static void *
  63rte_port_ring_writer_ras_create(void *params, int socket_id, int is_ipv4)
  64{
  65        struct rte_port_ring_writer_ras_params *conf =
  66                        params;
  67        struct rte_port_ring_writer_ras *port;
  68        uint64_t frag_cycles;
  69
  70        /* Check input parameters */
  71        if (conf == NULL) {
  72                RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
  73                return NULL;
  74        }
  75        if (conf->ring == NULL) {
  76                RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
  77                return NULL;
  78        }
  79        if ((conf->tx_burst_sz == 0) ||
  80            (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
  81                RTE_LOG(ERR, PORT, "%s: Parameter tx_burst_sz is invalid\n",
  82                        __func__);
  83                return NULL;
  84        }
  85
  86        /* Memory allocation */
  87        port = rte_zmalloc_socket("PORT", sizeof(*port),
  88                        RTE_CACHE_LINE_SIZE, socket_id);
  89        if (port == NULL) {
  90                RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
  91                return NULL;
  92        }
  93
  94        /* Create fragmentation table */
  95        frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
  96        frag_cycles *= 100;
  97
  98        port->frag_tbl = rte_ip_frag_table_create(
  99                RTE_PORT_RAS_N_BUCKETS,
 100                RTE_PORT_RAS_N_ENTRIES_PER_BUCKET,
 101                RTE_PORT_RAS_N_ENTRIES,
 102                frag_cycles,
 103                socket_id);
 104
 105        if (port->frag_tbl == NULL) {
 106                RTE_LOG(ERR, PORT, "%s: rte_ip_frag_table_create failed\n",
 107                        __func__);
 108                rte_free(port);
 109                return NULL;
 110        }
 111
 112        /* Initialization */
 113        port->ring = conf->ring;
 114        port->tx_burst_sz = conf->tx_burst_sz;
 115        port->tx_buf_count = 0;
 116
 117        port->f_ras = (is_ipv4 == 1) ? process_ipv4 : process_ipv6;
 118
 119        return port;
 120}
 121
 122static void *
 123rte_port_ring_writer_ipv4_ras_create(void *params, int socket_id)
 124{
 125        return rte_port_ring_writer_ras_create(params, socket_id, 1);
 126}
 127
 128static void *
 129rte_port_ring_writer_ipv6_ras_create(void *params, int socket_id)
 130{
 131        return rte_port_ring_writer_ras_create(params, socket_id, 0);
 132}
 133
 134static inline void
 135send_burst(struct rte_port_ring_writer_ras *p)
 136{
 137        uint32_t nb_tx;
 138
 139        nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
 140                        p->tx_buf_count, NULL);
 141
 142        RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
 143        for ( ; nb_tx < p->tx_buf_count; nb_tx++)
 144                rte_pktmbuf_free(p->tx_buf[nb_tx]);
 145
 146        p->tx_buf_count = 0;
 147}
 148
 149static void
 150process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
 151{
 152        /* Assume there is no ethernet header */
 153        struct rte_ipv4_hdr *pkt_hdr =
 154                rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
 155
 156        /* Get "More fragments" flag and fragment offset */
 157        uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
 158        uint16_t frag_offset = (uint16_t)(frag_field & RTE_IPV4_HDR_OFFSET_MASK);
 159        uint16_t frag_flag = (uint16_t)(frag_field & RTE_IPV4_HDR_MF_FLAG);
 160
 161        /* If it is a fragmented packet, then try to reassemble */
 162        if ((frag_flag == 0) && (frag_offset == 0))
 163                p->tx_buf[p->tx_buf_count++] = pkt;
 164        else {
 165                struct rte_mbuf *mo;
 166                struct rte_ip_frag_tbl *tbl = p->frag_tbl;
 167                struct rte_ip_frag_death_row *dr = &p->death_row;
 168
 169                pkt->l3_len = sizeof(*pkt_hdr);
 170
 171                /* Process this fragment */
 172                mo = rte_ipv4_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(),
 173                                pkt_hdr);
 174                if (mo != NULL)
 175                        p->tx_buf[p->tx_buf_count++] = mo;
 176
 177                rte_ip_frag_free_death_row(&p->death_row, 3);
 178        }
 179}
 180
 181static void
 182process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
 183{
 184        /* Assume there is no ethernet header */
 185        struct rte_ipv6_hdr *pkt_hdr =
 186                rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
 187
 188        struct rte_ipv6_fragment_ext *frag_hdr;
 189        uint16_t frag_data = 0;
 190        frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(pkt_hdr);
 191        if (frag_hdr != NULL)
 192                frag_data = rte_be_to_cpu_16(frag_hdr->frag_data);
 193
 194        /* If it is a fragmented packet, then try to reassemble */
 195        if ((frag_data & RTE_IPV6_FRAG_USED_MASK) == 0)
 196                p->tx_buf[p->tx_buf_count++] = pkt;
 197        else {
 198                struct rte_mbuf *mo;
 199                struct rte_ip_frag_tbl *tbl = p->frag_tbl;
 200                struct rte_ip_frag_death_row *dr = &p->death_row;
 201
 202                pkt->l3_len = sizeof(*pkt_hdr) + sizeof(*frag_hdr);
 203
 204                /* Process this fragment */
 205                mo = rte_ipv6_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(), pkt_hdr,
 206                                frag_hdr);
 207                if (mo != NULL)
 208                        p->tx_buf[p->tx_buf_count++] = mo;
 209
 210                rte_ip_frag_free_death_row(&p->death_row, 3);
 211        }
 212}
 213
 214static int
 215rte_port_ring_writer_ras_tx(void *port, struct rte_mbuf *pkt)
 216{
 217        struct rte_port_ring_writer_ras *p =
 218                        port;
 219
 220        RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
 221        p->f_ras(p, pkt);
 222        if (p->tx_buf_count >= p->tx_burst_sz)
 223                send_burst(p);
 224
 225        return 0;
 226}
 227
 228static int
 229rte_port_ring_writer_ras_tx_bulk(void *port,
 230                struct rte_mbuf **pkts,
 231                uint64_t pkts_mask)
 232{
 233        struct rte_port_ring_writer_ras *p =
 234                        port;
 235
 236        if ((pkts_mask & (pkts_mask + 1)) == 0) {
 237                uint64_t n_pkts = __builtin_popcountll(pkts_mask);
 238                uint32_t i;
 239
 240                for (i = 0; i < n_pkts; i++) {
 241                        struct rte_mbuf *pkt = pkts[i];
 242
 243                        RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
 244                        p->f_ras(p, pkt);
 245                        if (p->tx_buf_count >= p->tx_burst_sz)
 246                                send_burst(p);
 247                }
 248        } else {
 249                for ( ; pkts_mask; ) {
 250                        uint32_t pkt_index = __builtin_ctzll(pkts_mask);
 251                        uint64_t pkt_mask = 1LLU << pkt_index;
 252                        struct rte_mbuf *pkt = pkts[pkt_index];
 253
 254                        RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
 255                        p->f_ras(p, pkt);
 256                        if (p->tx_buf_count >= p->tx_burst_sz)
 257                                send_burst(p);
 258
 259                        pkts_mask &= ~pkt_mask;
 260                }
 261        }
 262
 263        return 0;
 264}
 265
 266static int
 267rte_port_ring_writer_ras_flush(void *port)
 268{
 269        struct rte_port_ring_writer_ras *p =
 270                        port;
 271
 272        if (p->tx_buf_count > 0)
 273                send_burst(p);
 274
 275        return 0;
 276}
 277
 278static int
 279rte_port_ring_writer_ras_free(void *port)
 280{
 281        struct rte_port_ring_writer_ras *p =
 282                        port;
 283
 284        if (port == NULL) {
 285                RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
 286                return -1;
 287        }
 288
 289        rte_port_ring_writer_ras_flush(port);
 290        rte_ip_frag_table_destroy(p->frag_tbl);
 291        rte_free(port);
 292
 293        return 0;
 294}
 295
 296static int
 297rte_port_ras_writer_stats_read(void *port,
 298                struct rte_port_out_stats *stats, int clear)
 299{
 300        struct rte_port_ring_writer_ras *p =
 301                port;
 302
 303        if (stats != NULL)
 304                memcpy(stats, &p->stats, sizeof(p->stats));
 305
 306        if (clear)
 307                memset(&p->stats, 0, sizeof(p->stats));
 308
 309        return 0;
 310}
 311
 312/*
 313 * Summary of port operations
 314 */
 315struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops = {
 316        .f_create = rte_port_ring_writer_ipv4_ras_create,
 317        .f_free = rte_port_ring_writer_ras_free,
 318        .f_tx = rte_port_ring_writer_ras_tx,
 319        .f_tx_bulk = rte_port_ring_writer_ras_tx_bulk,
 320        .f_flush = rte_port_ring_writer_ras_flush,
 321        .f_stats = rte_port_ras_writer_stats_read,
 322};
 323
 324struct rte_port_out_ops rte_port_ring_writer_ipv6_ras_ops = {
 325        .f_create = rte_port_ring_writer_ipv6_ras_create,
 326        .f_free = rte_port_ring_writer_ras_free,
 327        .f_tx = rte_port_ring_writer_ras_tx,
 328        .f_tx_bulk = rte_port_ring_writer_ras_tx_bulk,
 329        .f_flush = rte_port_ring_writer_ras_flush,
 330        .f_stats = rte_port_ras_writer_stats_read,
 331};
 332