dpdk/lib/gro/gro_tcp4.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Intel Corporation
   3 */
   4
   5#include <rte_malloc.h>
   6#include <rte_mbuf.h>
   7#include <rte_cycles.h>
   8#include <rte_ethdev.h>
   9
  10#include "gro_tcp4.h"
  11
  12void *
  13gro_tcp4_tbl_create(uint16_t socket_id,
  14                uint16_t max_flow_num,
  15                uint16_t max_item_per_flow)
  16{
  17        struct gro_tcp4_tbl *tbl;
  18        size_t size;
  19        uint32_t entries_num, i;
  20
  21        entries_num = max_flow_num * max_item_per_flow;
  22        entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM);
  23
  24        if (entries_num == 0)
  25                return NULL;
  26
  27        tbl = rte_zmalloc_socket(__func__,
  28                        sizeof(struct gro_tcp4_tbl),
  29                        RTE_CACHE_LINE_SIZE,
  30                        socket_id);
  31        if (tbl == NULL)
  32                return NULL;
  33
  34        size = sizeof(struct gro_tcp4_item) * entries_num;
  35        tbl->items = rte_zmalloc_socket(__func__,
  36                        size,
  37                        RTE_CACHE_LINE_SIZE,
  38                        socket_id);
  39        if (tbl->items == NULL) {
  40                rte_free(tbl);
  41                return NULL;
  42        }
  43        tbl->max_item_num = entries_num;
  44
  45        size = sizeof(struct gro_tcp4_flow) * entries_num;
  46        tbl->flows = rte_zmalloc_socket(__func__,
  47                        size,
  48                        RTE_CACHE_LINE_SIZE,
  49                        socket_id);
  50        if (tbl->flows == NULL) {
  51                rte_free(tbl->items);
  52                rte_free(tbl);
  53                return NULL;
  54        }
  55        /* INVALID_ARRAY_INDEX indicates an empty flow */
  56        for (i = 0; i < entries_num; i++)
  57                tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
  58        tbl->max_flow_num = entries_num;
  59
  60        return tbl;
  61}
  62
  63void
  64gro_tcp4_tbl_destroy(void *tbl)
  65{
  66        struct gro_tcp4_tbl *tcp_tbl = tbl;
  67
  68        if (tcp_tbl) {
  69                rte_free(tcp_tbl->items);
  70                rte_free(tcp_tbl->flows);
  71        }
  72        rte_free(tcp_tbl);
  73}
  74
  75static inline uint32_t
  76find_an_empty_item(struct gro_tcp4_tbl *tbl)
  77{
  78        uint32_t i;
  79        uint32_t max_item_num = tbl->max_item_num;
  80
  81        for (i = 0; i < max_item_num; i++)
  82                if (tbl->items[i].firstseg == NULL)
  83                        return i;
  84        return INVALID_ARRAY_INDEX;
  85}
  86
  87static inline uint32_t
  88find_an_empty_flow(struct gro_tcp4_tbl *tbl)
  89{
  90        uint32_t i;
  91        uint32_t max_flow_num = tbl->max_flow_num;
  92
  93        for (i = 0; i < max_flow_num; i++)
  94                if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
  95                        return i;
  96        return INVALID_ARRAY_INDEX;
  97}
  98
  99static inline uint32_t
 100insert_new_item(struct gro_tcp4_tbl *tbl,
 101                struct rte_mbuf *pkt,
 102                uint64_t start_time,
 103                uint32_t prev_idx,
 104                uint32_t sent_seq,
 105                uint16_t ip_id,
 106                uint8_t is_atomic)
 107{
 108        uint32_t item_idx;
 109
 110        item_idx = find_an_empty_item(tbl);
 111        if (item_idx == INVALID_ARRAY_INDEX)
 112                return INVALID_ARRAY_INDEX;
 113
 114        tbl->items[item_idx].firstseg = pkt;
 115        tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
 116        tbl->items[item_idx].start_time = start_time;
 117        tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
 118        tbl->items[item_idx].sent_seq = sent_seq;
 119        tbl->items[item_idx].ip_id = ip_id;
 120        tbl->items[item_idx].nb_merged = 1;
 121        tbl->items[item_idx].is_atomic = is_atomic;
 122        tbl->item_num++;
 123
 124        /* if the previous packet exists, chain them together. */
 125        if (prev_idx != INVALID_ARRAY_INDEX) {
 126                tbl->items[item_idx].next_pkt_idx =
 127                        tbl->items[prev_idx].next_pkt_idx;
 128                tbl->items[prev_idx].next_pkt_idx = item_idx;
 129        }
 130
 131        return item_idx;
 132}
 133
 134static inline uint32_t
 135delete_item(struct gro_tcp4_tbl *tbl, uint32_t item_idx,
 136                uint32_t prev_item_idx)
 137{
 138        uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;
 139
 140        /* NULL indicates an empty item */
 141        tbl->items[item_idx].firstseg = NULL;
 142        tbl->item_num--;
 143        if (prev_item_idx != INVALID_ARRAY_INDEX)
 144                tbl->items[prev_item_idx].next_pkt_idx = next_idx;
 145
 146        return next_idx;
 147}
 148
 149static inline uint32_t
 150insert_new_flow(struct gro_tcp4_tbl *tbl,
 151                struct tcp4_flow_key *src,
 152                uint32_t item_idx)
 153{
 154        struct tcp4_flow_key *dst;
 155        uint32_t flow_idx;
 156
 157        flow_idx = find_an_empty_flow(tbl);
 158        if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
 159                return INVALID_ARRAY_INDEX;
 160
 161        dst = &(tbl->flows[flow_idx].key);
 162
 163        rte_ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr));
 164        rte_ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr));
 165        dst->ip_src_addr = src->ip_src_addr;
 166        dst->ip_dst_addr = src->ip_dst_addr;
 167        dst->recv_ack = src->recv_ack;
 168        dst->src_port = src->src_port;
 169        dst->dst_port = src->dst_port;
 170
 171        tbl->flows[flow_idx].start_index = item_idx;
 172        tbl->flow_num++;
 173
 174        return flow_idx;
 175}
 176
 177/*
 178 * update the packet length for the flushed packet.
 179 */
 180static inline void
 181update_header(struct gro_tcp4_item *item)
 182{
 183        struct rte_ipv4_hdr *ipv4_hdr;
 184        struct rte_mbuf *pkt = item->firstseg;
 185
 186        ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
 187                        pkt->l2_len);
 188        ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
 189                        pkt->l2_len);
 190}
 191
 192int32_t
 193gro_tcp4_reassemble(struct rte_mbuf *pkt,
 194                struct gro_tcp4_tbl *tbl,
 195                uint64_t start_time)
 196{
 197        struct rte_ether_hdr *eth_hdr;
 198        struct rte_ipv4_hdr *ipv4_hdr;
 199        struct rte_tcp_hdr *tcp_hdr;
 200        uint32_t sent_seq;
 201        int32_t tcp_dl;
 202        uint16_t ip_id, hdr_len, frag_off;
 203        uint8_t is_atomic;
 204
 205        struct tcp4_flow_key key;
 206        uint32_t cur_idx, prev_idx, item_idx;
 207        uint32_t i, max_flow_num, remaining_flow_num;
 208        int cmp;
 209        uint8_t find;
 210
 211        /*
 212         * Don't process the packet whose TCP header length is greater
 213         * than 60 bytes or less than 20 bytes.
 214         */
 215        if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))
 216                return -1;
 217
 218        eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
 219        ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
 220        tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
 221        hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
 222
 223        /*
 224         * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
 225         * or CWR set.
 226         */
 227        if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
 228                return -1;
 229        /*
 230         * Don't process the packet whose payload length is less than or
 231         * equal to 0.
 232         */
 233        tcp_dl = pkt->pkt_len - hdr_len;
 234        if (tcp_dl <= 0)
 235                return -1;
 236
 237        /*
 238         * Save IPv4 ID for the packet whose DF bit is 0. For the packet
 239         * whose DF bit is 1, IPv4 ID is ignored.
 240         */
 241        frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
 242        is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
 243        ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
 244        sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
 245
 246        rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.eth_saddr));
 247        rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.eth_daddr));
 248        key.ip_src_addr = ipv4_hdr->src_addr;
 249        key.ip_dst_addr = ipv4_hdr->dst_addr;
 250        key.src_port = tcp_hdr->src_port;
 251        key.dst_port = tcp_hdr->dst_port;
 252        key.recv_ack = tcp_hdr->recv_ack;
 253
 254        /* Search for a matched flow. */
 255        max_flow_num = tbl->max_flow_num;
 256        remaining_flow_num = tbl->flow_num;
 257        find = 0;
 258        for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
 259                if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
 260                        if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
 261                                find = 1;
 262                                break;
 263                        }
 264                        remaining_flow_num--;
 265                }
 266        }
 267
 268        /*
 269         * Fail to find a matched flow. Insert a new flow and store the
 270         * packet into the flow.
 271         */
 272        if (find == 0) {
 273                item_idx = insert_new_item(tbl, pkt, start_time,
 274                                INVALID_ARRAY_INDEX, sent_seq, ip_id,
 275                                is_atomic);
 276                if (item_idx == INVALID_ARRAY_INDEX)
 277                        return -1;
 278                if (insert_new_flow(tbl, &key, item_idx) ==
 279                                INVALID_ARRAY_INDEX) {
 280                        /*
 281                         * Fail to insert a new flow, so delete the
 282                         * stored packet.
 283                         */
 284                        delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
 285                        return -1;
 286                }
 287                return 0;
 288        }
 289
 290        /*
 291         * Check all packets in the flow and try to find a neighbor for
 292         * the input packet.
 293         */
 294        cur_idx = tbl->flows[i].start_index;
 295        prev_idx = cur_idx;
 296        do {
 297                cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
 298                                sent_seq, ip_id, pkt->l4_len, tcp_dl, 0,
 299                                is_atomic);
 300                if (cmp) {
 301                        if (merge_two_tcp4_packets(&(tbl->items[cur_idx]),
 302                                                pkt, cmp, sent_seq, ip_id, 0))
 303                                return 1;
 304                        /*
 305                         * Fail to merge the two packets, as the packet
 306                         * length is greater than the max value. Store
 307                         * the packet into the flow.
 308                         */
 309                        if (insert_new_item(tbl, pkt, start_time, prev_idx,
 310                                                sent_seq, ip_id, is_atomic) ==
 311                                        INVALID_ARRAY_INDEX)
 312                                return -1;
 313                        return 0;
 314                }
 315                prev_idx = cur_idx;
 316                cur_idx = tbl->items[cur_idx].next_pkt_idx;
 317        } while (cur_idx != INVALID_ARRAY_INDEX);
 318
 319        /* Fail to find a neighbor, so store the packet into the flow. */
 320        if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
 321                                ip_id, is_atomic) == INVALID_ARRAY_INDEX)
 322                return -1;
 323
 324        return 0;
 325}
 326
 327uint16_t
 328gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
 329                uint64_t flush_timestamp,
 330                struct rte_mbuf **out,
 331                uint16_t nb_out)
 332{
 333        uint16_t k = 0;
 334        uint32_t i, j;
 335        uint32_t max_flow_num = tbl->max_flow_num;
 336
 337        for (i = 0; i < max_flow_num; i++) {
 338                if (unlikely(tbl->flow_num == 0))
 339                        return k;
 340
 341                j = tbl->flows[i].start_index;
 342                while (j != INVALID_ARRAY_INDEX) {
 343                        if (tbl->items[j].start_time <= flush_timestamp) {
 344                                out[k++] = tbl->items[j].firstseg;
 345                                if (tbl->items[j].nb_merged > 1)
 346                                        update_header(&(tbl->items[j]));
 347                                /*
 348                                 * Delete the packet and get the next
 349                                 * packet in the flow.
 350                                 */
 351                                j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
 352                                tbl->flows[i].start_index = j;
 353                                if (j == INVALID_ARRAY_INDEX)
 354                                        tbl->flow_num--;
 355
 356                                if (unlikely(k == nb_out))
 357                                        return k;
 358                        } else
 359                                /*
 360                                 * The left packets in this flow won't be
 361                                 * timeout. Go to check other flows.
 362                                 */
 363                                break;
 364                }
 365        }
 366        return k;
 367}
 368
 369uint32_t
 370gro_tcp4_tbl_pkt_count(void *tbl)
 371{
 372        struct gro_tcp4_tbl *gro_tbl = tbl;
 373
 374        if (gro_tbl)
 375                return gro_tbl->item_num;
 376
 377        return 0;
 378}
 379