dpdk/drivers/net/octeontx/octeontx_rxtx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#ifndef __OCTEONTX_RXTX_H__
   6#define __OCTEONTX_RXTX_H__
   7
   8#include <ethdev_driver.h>
   9
  10#define OFFLOAD_FLAGS                                   \
  11        uint16_t rx_offload_flags;                      \
  12        uint16_t tx_offload_flags
  13
  14#define BIT(nr) (1UL << (nr))
  15
  16#define OCCTX_RX_OFFLOAD_NONE           (0)
  17#define OCCTX_RX_MULTI_SEG_F            BIT(0)
  18#define OCCTX_RX_OFFLOAD_CSUM_F         BIT(1)
  19#define OCCTX_RX_VLAN_FLTR_F            BIT(2)
  20
  21#define OCCTX_TX_OFFLOAD_NONE           (0)
  22#define OCCTX_TX_MULTI_SEG_F            BIT(0)
  23#define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(1)
  24#define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
  25#define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
  26
  27/* Packet type table */
  28#define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
  29
  30/* octeontx send header sub descriptor structure */
  31RTE_STD_C11
  32union octeontx_send_hdr_w0_u {
  33        uint64_t u;
  34        struct {
  35                uint64_t total   : 16;
  36                uint64_t markptr : 8;
  37                uint64_t l3ptr   : 8;
  38                uint64_t l4ptr   : 8;
  39                uint64_t ii      : 1;
  40                uint64_t shp_dis : 1;
  41                uint64_t ckle    : 1;
  42                uint64_t cklf    : 2;
  43                uint64_t ckl3    : 1;
  44                uint64_t ckl4    : 2;
  45                uint64_t p       : 1;
  46                uint64_t format  : 7;
  47                uint64_t tstamp  : 1;
  48                uint64_t tso_eom : 1;
  49                uint64_t df      : 1;
  50                uint64_t tso     : 1;
  51                uint64_t n2      : 1;
  52                uint64_t scntn1  : 3;
  53        };
  54};
  55
  56RTE_STD_C11
  57union octeontx_send_hdr_w1_u {
  58        uint64_t u;
  59        struct {
  60                uint64_t tso_mss : 14;
  61                uint64_t shp_ra  : 2;
  62                uint64_t tso_sb  : 8;
  63                uint64_t leptr   : 8;
  64                uint64_t lfptr   : 8;
  65                uint64_t shp_chg : 9;
  66                uint64_t tso_fn  : 7;
  67                uint64_t l2len   : 8;
  68        };
  69};
  70
  71struct octeontx_send_hdr_s {
  72        union octeontx_send_hdr_w0_u w0;
  73        union octeontx_send_hdr_w1_u w1;
  74};
  75
  76static const uint32_t __rte_cache_aligned
  77ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
  78        [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
  79        [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
  80        [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
  81        [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
  82        [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
  83        [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
  84        [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
  85        [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
  86        [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
  87        [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
  88
  89        [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
  90        [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
  91                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
  92        [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
  93        [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
  94        [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
  95        [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
  96        [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
  97        [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
  98                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
  99        [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
 100                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
 101        [LC_IPV4][LE_NONE][LF_NVGRE] =
 102                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
 103
 104        [LC_IPV4_OPT][LE_NONE][LF_NONE] =
 105                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
 106        [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
 107                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
 108        [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
 109                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
 110        [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
 111                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
 112        [LC_IPV4_OPT][LE_NONE][LF_TCP] =
 113                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
 114        [LC_IPV4_OPT][LE_NONE][LF_UDP] =
 115                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
 116        [LC_IPV4_OPT][LE_NONE][LF_GRE] =
 117                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
 118        [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
 119                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
 120        [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
 121                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
 122        [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
 123                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
 124
 125        [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
 126        [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
 127                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
 128        [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
 129        [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
 130        [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
 131        [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
 132        [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
 133        [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
 134                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
 135        [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
 136                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
 137        [LC_IPV6][LE_NONE][LF_NVGRE] =
 138                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
 139        [LC_IPV6_OPT][LE_NONE][LF_NONE] =
 140                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
 141        [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
 142                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
 143        [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
 144                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
 145        [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
 146                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
 147        [LC_IPV6_OPT][LE_NONE][LF_TCP] =
 148                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
 149        [LC_IPV6_OPT][LE_NONE][LF_UDP] =
 150                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
 151        [LC_IPV6_OPT][LE_NONE][LF_GRE] =
 152                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
 153        [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
 154                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
 155        [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
 156                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
 157        [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
 158                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
 159
 160};
 161
 162
 163static __rte_always_inline uint64_t
 164octeontx_pktmbuf_detach(struct rte_mbuf *m)
 165{
 166        struct rte_mempool *mp = m->pool;
 167        uint32_t mbuf_size, buf_len;
 168        struct rte_mbuf *md;
 169        uint16_t priv_size;
 170        uint16_t refcount;
 171
 172        /* Update refcount of direct mbuf */
 173        md = rte_mbuf_from_indirect(m);
 174        refcount = rte_mbuf_refcnt_update(md, -1);
 175
 176        priv_size = rte_pktmbuf_priv_size(mp);
 177        mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
 178        buf_len = rte_pktmbuf_data_room_size(mp);
 179
 180        m->priv_size = priv_size;
 181        m->buf_addr = (char *)m + mbuf_size;
 182        m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
 183        m->buf_len = (uint16_t)buf_len;
 184        rte_pktmbuf_reset_headroom(m);
 185        m->data_len = 0;
 186        m->ol_flags = 0;
 187        m->next = NULL;
 188        m->nb_segs = 1;
 189
 190        /* Now indirect mbuf is safe to free */
 191        rte_pktmbuf_free(m);
 192
 193        if (refcount == 0) {
 194                rte_mbuf_refcnt_set(md, 1);
 195                md->data_len = 0;
 196                md->ol_flags = 0;
 197                md->next = NULL;
 198                md->nb_segs = 1;
 199                return 0;
 200        } else {
 201                return 1;
 202        }
 203}
 204
 205static __rte_always_inline uint64_t
 206octeontx_prefree_seg(struct rte_mbuf *m)
 207{
 208        if (likely(rte_mbuf_refcnt_read(m) == 1)) {
 209                if (!RTE_MBUF_DIRECT(m))
 210                        return octeontx_pktmbuf_detach(m);
 211
 212                m->next = NULL;
 213                m->nb_segs = 1;
 214                return 0;
 215        } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
 216                if (!RTE_MBUF_DIRECT(m))
 217                        return octeontx_pktmbuf_detach(m);
 218
 219                rte_mbuf_refcnt_set(m, 1);
 220                m->next = NULL;
 221                m->nb_segs = 1;
 222                return 0;
 223        }
 224
 225        /* Mbuf is having refcount more than 1 so need not to be freed */
 226        return 1;
 227}
 228
 229static __rte_always_inline void
 230octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
 231                             struct rte_mbuf *m)
 232{
 233        struct octeontx_send_hdr_s *send_hdr =
 234                                (struct octeontx_send_hdr_s *)cmd_buf;
 235        uint64_t ol_flags = m->ol_flags;
 236
 237        /* PKO Checksum L4 Algorithm Enumeration
 238         * 0x0 - No checksum
 239         * 0x1 - UDP L4 checksum
 240         * 0x2 - TCP L4 checksum
 241         * 0x3 - SCTP L4 checksum
 242         */
 243        const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) +
 244                      (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
 245                      (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
 246
 247        const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) ||
 248                                      !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) ||
 249                                      !!(ol_flags & PKT_TX_TUNNEL_VXLAN) ||
 250                                      !!(ol_flags & PKT_TX_TUNNEL_GRE) ||
 251                                      !!(ol_flags & PKT_TX_TUNNEL_GENEVE) ||
 252                                      !!(ol_flags & PKT_TX_TUNNEL_IP) ||
 253                                      !!(ol_flags & PKT_TX_TUNNEL_IPIP));
 254
 255        const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) ||
 256                                    !!(ol_flags & PKT_TX_TUNNEL_UDP));
 257        const uint8_t outer_l2_len = m->outer_l2_len;
 258        const uint8_t l2_len = m->l2_len;
 259
 260        if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
 261            (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
 262                if (is_tunnel_parsed) {
 263                        /* Outer L3 */
 264                        send_hdr->w0.l3ptr = outer_l2_len;
 265                        send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
 266                        /* Set clk3 for PKO to calculate IPV4 header checksum */
 267                        send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
 268
 269                        /* Outer L4 */
 270                        send_hdr->w0.ckl4 = csum_outer;
 271
 272                        /* Inner L3 */
 273                        send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
 274                        send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
 275                        /* Set clke for PKO to calculate inner IPV4 header
 276                         * checksum.
 277                         */
 278                        send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4);
 279
 280                        /* Inner L4 */
 281                        send_hdr->w0.cklf = csum;
 282                } else {
 283                        /* Inner L3 */
 284                        send_hdr->w0.l3ptr = l2_len;
 285                        send_hdr->w0.l4ptr = l2_len + m->l3_len;
 286                        /* Set clk3 for PKO to calculate IPV4 header checksum */
 287                        send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
 288
 289                        /* Inner L4 */
 290                        send_hdr->w0.ckl4 = csum;
 291                }
 292        } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
 293                /* Outer L3 */
 294                send_hdr->w0.l3ptr = outer_l2_len;
 295                send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
 296                /* Set clk3 for PKO to calculate IPV4 header checksum */
 297                send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
 298
 299                /* Outer L4 */
 300                send_hdr->w0.ckl4 = csum_outer;
 301        } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
 302                /* Inner L3 */
 303                send_hdr->w0.l3ptr = l2_len;
 304                send_hdr->w0.l4ptr = l2_len + m->l3_len;
 305                /* Set clk3 for PKO to calculate IPV4 header checksum */
 306                send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
 307
 308                /* Inner L4 */
 309                send_hdr->w0.ckl4 = csum;
 310        }
 311}
 312
 313static __rte_always_inline uint16_t
 314__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 315                        const uint16_t flag)
 316{
 317        uint16_t gaura_id, nb_desc = 0;
 318
 319        /* Setup PKO_SEND_HDR_S */
 320        cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
 321        cmd_buf[nb_desc++] = 0x0;
 322
 323        /* Enable tx checksum offload */
 324        if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
 325            (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
 326                octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
 327
 328        /* SEND_HDR[DF] bit controls if buffer is to be freed or
 329         * not, as SG_DESC[I] and SEND_HDR[II] are clear.
 330         */
 331        if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
 332                cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
 333                               58);
 334
 335        /* Mark mempool object as "put" since it is freed by PKO */
 336        if (!(cmd_buf[0] & (1ULL << 58)))
 337                __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
 338                                        1, 0);
 339        /* Get the gaura Id */
 340        gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
 341
 342        /* Setup PKO_SEND_BUFLINK_S */
 343        cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
 344                PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
 345                PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
 346                tx_pkt->data_len;
 347        cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
 348
 349        return nb_desc;
 350}
 351
 352static __rte_always_inline uint16_t
 353__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 354                        const uint16_t flag)
 355{
 356        uint16_t nb_segs, nb_desc = 0;
 357        uint16_t gaura_id, len = 0;
 358        struct rte_mbuf *m_next = NULL;
 359
 360        nb_segs = tx_pkt->nb_segs;
 361        /* Setup PKO_SEND_HDR_S */
 362        cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
 363        cmd_buf[nb_desc++] = 0x0;
 364
 365        /* Enable tx checksum offload */
 366        if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
 367            (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
 368                octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
 369
 370        do {
 371                m_next = tx_pkt->next;
 372                /* To handle case where mbufs belong to diff pools, like
 373                 * fragmentation
 374                 */
 375                gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
 376                                                      tx_pkt->pool->pool_id);
 377
 378                /* Setup PKO_SEND_GATHER_S */
 379                cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC                 |
 380                                   PKO_SEND_GATHER_LDTYPE(0x1ull)        |
 381                                   PKO_SEND_GATHER_GAUAR((long)gaura_id) |
 382                                   tx_pkt->data_len;
 383
 384                /* SG_DESC[I] bit controls if buffer is to be freed or
 385                 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
 386                 */
 387                if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
 388                        cmd_buf[nb_desc] |=
 389                             (octeontx_prefree_seg(tx_pkt) << 57);
 390                }
 391
 392                /* Mark mempool object as "put" since it is freed by
 393                 * PKO.
 394                 */
 395                if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
 396                        tx_pkt->next = NULL;
 397                        __mempool_check_cookies(tx_pkt->pool,
 398                                                (void **)&tx_pkt, 1, 0);
 399                }
 400                nb_desc++;
 401
 402                cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
 403
 404                nb_segs--;
 405                len += tx_pkt->data_len;
 406                tx_pkt = m_next;
 407        } while (nb_segs);
 408
 409        return nb_desc;
 410}
 411
 412static __rte_always_inline uint16_t
 413__octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 414                     uint16_t nb_pkts, uint64_t *cmd_buf,
 415                     const uint16_t flags)
 416{
 417        struct octeontx_txq *txq = tx_queue;
 418        octeontx_dq_t *dq = &txq->dq;
 419        uint16_t count = 0, nb_desc;
 420        rte_io_wmb();
 421
 422        while (count < nb_pkts) {
 423                if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
 424                        break;
 425
 426                if (flags & OCCTX_TX_MULTI_SEG_F) {
 427                        nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
 428                                                               cmd_buf, flags);
 429                } else {
 430                        nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
 431                                                          cmd_buf, flags);
 432                }
 433
 434                octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
 435                                   nb_desc);
 436
 437                count++;
 438        }
 439        return count;
 440}
 441
 442uint16_t
 443octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 444
 445#define L3L4CSUM_F   OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
 446#define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
 447#define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
 448#define MULT_F       OCCTX_TX_MULTI_SEG_F
 449
 450/* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
 451#define OCCTX_TX_FASTPATH_MODES                                                \
 452T(no_offload,                           0, 0, 0, 0,     4,                     \
 453                                        OCCTX_TX_OFFLOAD_NONE)                 \
 454T(mseg,                                 0, 0, 0, 1,     14,                    \
 455                                        MULT_F)                                \
 456T(l3l4csum,                             0, 0, 1, 0,     4,                     \
 457                                        L3L4CSUM_F)                            \
 458T(l3l4csum_mseg,                        0, 0, 1, 1,     14,                    \
 459                                        L3L4CSUM_F | MULT_F)                   \
 460T(ol3ol4csum,                           0, 1, 0, 0,     4,                     \
 461                                        OL3OL4CSUM_F)                          \
 462T(ol3l4csum_mseg,                       0, 1, 0, 1,     14,                    \
 463                                        OL3OL4CSUM_F | MULT_F)                 \
 464T(ol3l4csum_l3l4csum,                   0, 1, 1, 0,     4,                     \
 465                                        OL3OL4CSUM_F | L3L4CSUM_F)             \
 466T(ol3l4csum_l3l4csum_mseg,              0, 1, 1, 1,     14,                    \
 467                                        OL3OL4CSUM_F | L3L4CSUM_F | MULT_F)    \
 468T(noff,                                 1, 0, 0, 0,     4,                     \
 469                                        NOFF_F)                                \
 470T(noff_mseg,                            1, 0, 0, 1,     14,                    \
 471                                        NOFF_F | MULT_F)                       \
 472T(noff_l3l4csum,                        1, 0, 1, 0,     4,                     \
 473                                        NOFF_F | L3L4CSUM_F)                   \
 474T(noff_l3l4csum_mseg,                   1, 0, 1, 1,     14,                    \
 475                                        NOFF_F | L3L4CSUM_F | MULT_F)          \
 476T(noff_ol3ol4csum,                      1, 1, 0, 0,     4,                     \
 477                                        NOFF_F | OL3OL4CSUM_F)                 \
 478T(noff_ol3ol4csum_mseg,                 1, 1, 0, 1,     14,                    \
 479                                        NOFF_F | OL3OL4CSUM_F | MULT_F)        \
 480T(noff_ol3ol4csum_l3l4csum,             1, 1, 1, 0,     4,                     \
 481                                        NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)    \
 482T(noff_ol3ol4csum_l3l4csum_mseg,        1, 1, 1, 1,     14,                    \
 483                                        NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
 484                                        MULT_F)
 485
 486/* RX offload macros */
 487#define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
 488#define CSUM_F          OCCTX_RX_OFFLOAD_CSUM_F
 489#define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
 490
 491/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
 492#define OCCTX_RX_FASTPATH_MODES                                                \
 493R(no_offload,                           0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
 494R(mseg,                                 0, 0, 1,  MULT_RX_F)                   \
 495R(csum,                                 0, 1, 0,  CSUM_F)                      \
 496R(csum_mseg,                            0, 1, 1,  CSUM_F | MULT_RX_F)          \
 497R(vlan,                                 1, 0, 0,  VLAN_FLTR_F)                 \
 498R(vlan_mseg,                            1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
 499R(vlan_csum,                            1, 1, 0,  VLAN_FLTR_F | CSUM_F)        \
 500R(vlan_csum_mseg,                       1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
 501                                        MULT_RX_F)
 502
 503 #endif /* __OCTEONTX_RXTX_H__ */
 504