dpdk/drivers/net/octeontx/octeontx_rxtx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#ifndef __OCTEONTX_RXTX_H__
   6#define __OCTEONTX_RXTX_H__
   7
   8#include <ethdev_driver.h>
   9
  10#define OFFLOAD_FLAGS                                   \
  11        uint16_t rx_offload_flags;                      \
  12        uint16_t tx_offload_flags
  13
  14#define BIT(nr) (1UL << (nr))
  15
  16#define OCCTX_RX_OFFLOAD_NONE           (0)
  17#define OCCTX_RX_MULTI_SEG_F            BIT(0)
  18#define OCCTX_RX_OFFLOAD_CSUM_F         BIT(1)
  19#define OCCTX_RX_VLAN_FLTR_F            BIT(2)
  20
  21#define OCCTX_TX_OFFLOAD_NONE           (0)
  22#define OCCTX_TX_MULTI_SEG_F            BIT(0)
  23#define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(1)
  24#define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
  25#define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
  26
  27/* Packet type table */
  28#define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
  29
  30/* octeontx send header sub descriptor structure */
  31RTE_STD_C11
  32union octeontx_send_hdr_w0_u {
  33        uint64_t u;
  34        struct {
  35                uint64_t total   : 16;
  36                uint64_t markptr : 8;
  37                uint64_t l3ptr   : 8;
  38                uint64_t l4ptr   : 8;
  39                uint64_t ii      : 1;
  40                uint64_t shp_dis : 1;
  41                uint64_t ckle    : 1;
  42                uint64_t cklf    : 2;
  43                uint64_t ckl3    : 1;
  44                uint64_t ckl4    : 2;
  45                uint64_t p       : 1;
  46                uint64_t format  : 7;
  47                uint64_t tstamp  : 1;
  48                uint64_t tso_eom : 1;
  49                uint64_t df      : 1;
  50                uint64_t tso     : 1;
  51                uint64_t n2      : 1;
  52                uint64_t scntn1  : 3;
  53        };
  54};
  55
  56RTE_STD_C11
  57union octeontx_send_hdr_w1_u {
  58        uint64_t u;
  59        struct {
  60                uint64_t tso_mss : 14;
  61                uint64_t shp_ra  : 2;
  62                uint64_t tso_sb  : 8;
  63                uint64_t leptr   : 8;
  64                uint64_t lfptr   : 8;
  65                uint64_t shp_chg : 9;
  66                uint64_t tso_fn  : 7;
  67                uint64_t l2len   : 8;
  68        };
  69};
  70
  71struct octeontx_send_hdr_s {
  72        union octeontx_send_hdr_w0_u w0;
  73        union octeontx_send_hdr_w1_u w1;
  74};
  75
  76static const uint32_t __rte_cache_aligned
  77ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
  78        [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
  79        [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
  80        [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
  81        [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
  82        [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
  83        [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
  84        [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
  85        [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
  86        [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
  87        [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
  88
  89        [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
  90        [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
  91                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
  92        [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
  93        [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
  94        [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
  95        [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
  96        [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
  97        [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
  98                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
  99        [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
 100                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
 101        [LC_IPV4][LE_NONE][LF_NVGRE] =
 102                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
 103
 104        [LC_IPV4_OPT][LE_NONE][LF_NONE] =
 105                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
 106        [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
 107                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
 108        [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
 109                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
 110        [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
 111                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
 112        [LC_IPV4_OPT][LE_NONE][LF_TCP] =
 113                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
 114        [LC_IPV4_OPT][LE_NONE][LF_UDP] =
 115                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
 116        [LC_IPV4_OPT][LE_NONE][LF_GRE] =
 117                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
 118        [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
 119                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
 120        [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
 121                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
 122        [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
 123                                RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
 124
 125        [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
 126        [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
 127                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
 128        [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
 129        [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
 130        [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
 131        [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
 132        [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
 133        [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
 134                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
 135        [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
 136                                RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
 137        [LC_IPV6][LE_NONE][LF_NVGRE] =
 138                                RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
 139        [LC_IPV6_OPT][LE_NONE][LF_NONE] =
 140                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
 141        [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
 142                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
 143        [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
 144                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
 145        [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
 146                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
 147        [LC_IPV6_OPT][LE_NONE][LF_TCP] =
 148                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
 149        [LC_IPV6_OPT][LE_NONE][LF_UDP] =
 150                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
 151        [LC_IPV6_OPT][LE_NONE][LF_GRE] =
 152                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
 153        [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
 154                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
 155        [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
 156                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
 157        [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
 158                                RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
 159
 160};
 161
 162
 163static __rte_always_inline uint64_t
 164octeontx_pktmbuf_detach(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
 165{
 166        struct rte_mempool *mp = m->pool;
 167        uint32_t mbuf_size, buf_len;
 168        struct rte_mbuf *md;
 169        uint16_t priv_size;
 170        uint16_t refcount;
 171
 172        /* Update refcount of direct mbuf */
 173        md = rte_mbuf_from_indirect(m);
 174        /* The real data will be in the direct buffer, inform callers this */
 175        *m_tofree = md;
 176        refcount = rte_mbuf_refcnt_update(md, -1);
 177
 178        priv_size = rte_pktmbuf_priv_size(mp);
 179        mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
 180        buf_len = rte_pktmbuf_data_room_size(mp);
 181
 182        m->priv_size = priv_size;
 183        m->buf_addr = (char *)m + mbuf_size;
 184        m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
 185        m->buf_len = (uint16_t)buf_len;
 186        rte_pktmbuf_reset_headroom(m);
 187        m->data_len = 0;
 188        m->ol_flags = 0;
 189        m->next = NULL;
 190        m->nb_segs = 1;
 191
 192        /* Now indirect mbuf is safe to free */
 193        rte_pktmbuf_free(m);
 194
 195        if (refcount == 0) {
 196                rte_mbuf_refcnt_set(md, 1);
 197                md->data_len = 0;
 198                md->ol_flags = 0;
 199                md->next = NULL;
 200                md->nb_segs = 1;
 201                return 0;
 202        } else {
 203                return 1;
 204        }
 205}
 206
 207static __rte_always_inline uint64_t
 208octeontx_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
 209{
 210        if (likely(rte_mbuf_refcnt_read(m) == 1)) {
 211                if (!RTE_MBUF_DIRECT(m))
 212                        return octeontx_pktmbuf_detach(m, m_tofree);
 213
 214                m->next = NULL;
 215                m->nb_segs = 1;
 216                return 0;
 217        } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
 218                if (!RTE_MBUF_DIRECT(m))
 219                        return octeontx_pktmbuf_detach(m, m_tofree);
 220
 221                rte_mbuf_refcnt_set(m, 1);
 222                m->next = NULL;
 223                m->nb_segs = 1;
 224                return 0;
 225        }
 226
 227        /* Mbuf is having refcount more than 1 so need not to be freed */
 228        return 1;
 229}
 230
 231static __rte_always_inline void
 232octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
 233                             struct rte_mbuf *m)
 234{
 235        struct octeontx_send_hdr_s *send_hdr =
 236                                (struct octeontx_send_hdr_s *)cmd_buf;
 237        uint64_t ol_flags = m->ol_flags;
 238
 239        /* PKO Checksum L4 Algorithm Enumeration
 240         * 0x0 - No checksum
 241         * 0x1 - UDP L4 checksum
 242         * 0x2 - TCP L4 checksum
 243         * 0x3 - SCTP L4 checksum
 244         */
 245        const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
 246                      (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
 247                      (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
 248
 249        const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
 250                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
 251                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
 252                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
 253                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
 254                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
 255                                      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
 256
 257        const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
 258                                    !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
 259        const uint8_t outer_l2_len = m->outer_l2_len;
 260        const uint8_t l2_len = m->l2_len;
 261
 262        if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
 263            (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
 264                if (is_tunnel_parsed) {
 265                        /* Outer L3 */
 266                        send_hdr->w0.l3ptr = outer_l2_len;
 267                        send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
 268                        /* Set clk3 for PKO to calculate IPV4 header checksum */
 269                        send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
 270
 271                        /* Outer L4 */
 272                        send_hdr->w0.ckl4 = csum_outer;
 273
 274                        /* Inner L3 */
 275                        send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
 276                        send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
 277                        /* Set clke for PKO to calculate inner IPV4 header
 278                         * checksum.
 279                         */
 280                        send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
 281
 282                        /* Inner L4 */
 283                        send_hdr->w0.cklf = csum;
 284                } else {
 285                        /* Inner L3 */
 286                        send_hdr->w0.l3ptr = l2_len;
 287                        send_hdr->w0.l4ptr = l2_len + m->l3_len;
 288                        /* Set clk3 for PKO to calculate IPV4 header checksum */
 289                        send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
 290
 291                        /* Inner L4 */
 292                        send_hdr->w0.ckl4 = csum;
 293                }
 294        } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
 295                /* Outer L3 */
 296                send_hdr->w0.l3ptr = outer_l2_len;
 297                send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
 298                /* Set clk3 for PKO to calculate IPV4 header checksum */
 299                send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
 300
 301                /* Outer L4 */
 302                send_hdr->w0.ckl4 = csum_outer;
 303        } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
 304                /* Inner L3 */
 305                send_hdr->w0.l3ptr = l2_len;
 306                send_hdr->w0.l4ptr = l2_len + m->l3_len;
 307                /* Set clk3 for PKO to calculate IPV4 header checksum */
 308                send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
 309
 310                /* Inner L4 */
 311                send_hdr->w0.ckl4 = csum;
 312        }
 313}
 314
 315static __rte_always_inline uint16_t
 316__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 317                        const uint16_t flag)
 318{
 319        uint16_t gaura_id, nb_desc = 0;
 320        struct rte_mbuf *m_tofree;
 321        rte_iova_t iova;
 322        uint16_t data_len;
 323
 324        m_tofree = tx_pkt;
 325
 326        data_len = tx_pkt->data_len;
 327        iova = rte_mbuf_data_iova(tx_pkt);
 328
 329        /* Setup PKO_SEND_HDR_S */
 330        cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
 331        cmd_buf[nb_desc++] = 0x0;
 332
 333        /* Enable tx checksum offload */
 334        if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
 335            (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
 336                octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
 337
 338        /* SEND_HDR[DF] bit controls if buffer is to be freed or
 339         * not, as SG_DESC[I] and SEND_HDR[II] are clear.
 340         */
 341        if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
 342                cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt, &m_tofree) <<
 343                               58);
 344
 345        /* Mark mempool object as "put" since it is freed by PKO */
 346        if (!(cmd_buf[0] & (1ULL << 58)))
 347                RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
 348                                        1, 0);
 349        /* Get the gaura Id */
 350        gaura_id =
 351                octeontx_fpa_bufpool_gaura((uintptr_t)m_tofree->pool->pool_id);
 352
 353        /* Setup PKO_SEND_BUFLINK_S */
 354        cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
 355                PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
 356                PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
 357                data_len;
 358        cmd_buf[nb_desc++] = iova;
 359
 360        return nb_desc;
 361}
 362
 363static __rte_always_inline uint16_t
 364__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 365                        const uint16_t flag)
 366{
 367        uint16_t nb_segs, nb_desc = 0;
 368        uint16_t gaura_id;
 369        struct rte_mbuf *m_next = NULL, *m_tofree;
 370        rte_iova_t iova;
 371        uint16_t data_len;
 372
 373        nb_segs = tx_pkt->nb_segs;
 374        /* Setup PKO_SEND_HDR_S */
 375        cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
 376        cmd_buf[nb_desc++] = 0x0;
 377
 378        /* Enable tx checksum offload */
 379        if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
 380            (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
 381                octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
 382
 383        do {
 384                m_next = tx_pkt->next;
 385                /* Get TX parameters up front, octeontx_prefree_seg might change
 386                 * them
 387                 */
 388                m_tofree = tx_pkt;
 389                data_len = tx_pkt->data_len;
 390                iova = rte_mbuf_data_iova(tx_pkt);
 391
 392                /* Setup PKO_SEND_GATHER_S */
 393                cmd_buf[nb_desc] = 0;
 394
 395                /* SG_DESC[I] bit controls if buffer is to be freed or
 396                 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
 397                 */
 398                if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
 399                        cmd_buf[nb_desc] |=
 400                                (octeontx_prefree_seg(tx_pkt, &m_tofree) << 57);
 401                }
 402
 403                /* To handle case where mbufs belong to diff pools, like
 404                 * fragmentation
 405                 */
 406                gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
 407                                        m_tofree->pool->pool_id);
 408
 409                /* Setup PKO_SEND_GATHER_S */
 410                cmd_buf[nb_desc] |= PKO_SEND_GATHER_SUBDC                |
 411                                   PKO_SEND_GATHER_LDTYPE(0x1ull)        |
 412                                   PKO_SEND_GATHER_GAUAR((long)gaura_id) |
 413                                   data_len;
 414
 415                /* Mark mempool object as "put" since it is freed by
 416                 * PKO.
 417                 */
 418                if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
 419                        tx_pkt->next = NULL;
 420                        RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
 421                                                (void **)&m_tofree, 1, 0);
 422                }
 423                nb_desc++;
 424
 425                cmd_buf[nb_desc++] = iova;
 426
 427                nb_segs--;
 428                tx_pkt = m_next;
 429        } while (nb_segs);
 430
 431        return nb_desc;
 432}
 433
 434static __rte_always_inline uint16_t
 435__octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 436                     uint16_t nb_pkts, uint64_t *cmd_buf,
 437                     const uint16_t flags)
 438{
 439        struct octeontx_txq *txq = tx_queue;
 440        octeontx_dq_t *dq = &txq->dq;
 441        uint16_t count = 0, nb_desc;
 442        rte_io_wmb();
 443
 444        while (count < nb_pkts) {
 445                if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
 446                        break;
 447
 448                if (flags & OCCTX_TX_MULTI_SEG_F) {
 449                        nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
 450                                                               cmd_buf, flags);
 451                } else {
 452                        nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
 453                                                          cmd_buf, flags);
 454                }
 455
 456                octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
 457                                   nb_desc);
 458
 459                count++;
 460        }
 461        return count;
 462}
 463
 464uint16_t
 465octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 466
 467#define L3L4CSUM_F   OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
 468#define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
 469#define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
 470#define MULT_F       OCCTX_TX_MULTI_SEG_F
 471
 472/* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
 473#define OCCTX_TX_FASTPATH_MODES                                                \
 474T(no_offload,                           0, 0, 0, 0,     4,                     \
 475                                        OCCTX_TX_OFFLOAD_NONE)                 \
 476T(mseg,                                 0, 0, 0, 1,     14,                    \
 477                                        MULT_F)                                \
 478T(l3l4csum,                             0, 0, 1, 0,     4,                     \
 479                                        L3L4CSUM_F)                            \
 480T(l3l4csum_mseg,                        0, 0, 1, 1,     14,                    \
 481                                        L3L4CSUM_F | MULT_F)                   \
 482T(ol3ol4csum,                           0, 1, 0, 0,     4,                     \
 483                                        OL3OL4CSUM_F)                          \
 484T(ol3l4csum_mseg,                       0, 1, 0, 1,     14,                    \
 485                                        OL3OL4CSUM_F | MULT_F)                 \
 486T(ol3l4csum_l3l4csum,                   0, 1, 1, 0,     4,                     \
 487                                        OL3OL4CSUM_F | L3L4CSUM_F)             \
 488T(ol3l4csum_l3l4csum_mseg,              0, 1, 1, 1,     14,                    \
 489                                        OL3OL4CSUM_F | L3L4CSUM_F | MULT_F)    \
 490T(noff,                                 1, 0, 0, 0,     4,                     \
 491                                        NOFF_F)                                \
 492T(noff_mseg,                            1, 0, 0, 1,     14,                    \
 493                                        NOFF_F | MULT_F)                       \
 494T(noff_l3l4csum,                        1, 0, 1, 0,     4,                     \
 495                                        NOFF_F | L3L4CSUM_F)                   \
 496T(noff_l3l4csum_mseg,                   1, 0, 1, 1,     14,                    \
 497                                        NOFF_F | L3L4CSUM_F | MULT_F)          \
 498T(noff_ol3ol4csum,                      1, 1, 0, 0,     4,                     \
 499                                        NOFF_F | OL3OL4CSUM_F)                 \
 500T(noff_ol3ol4csum_mseg,                 1, 1, 0, 1,     14,                    \
 501                                        NOFF_F | OL3OL4CSUM_F | MULT_F)        \
 502T(noff_ol3ol4csum_l3l4csum,             1, 1, 1, 0,     4,                     \
 503                                        NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)    \
 504T(noff_ol3ol4csum_l3l4csum_mseg,        1, 1, 1, 1,     14,                    \
 505                                        NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
 506                                        MULT_F)
 507
 508/* RX offload macros */
 509#define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
 510#define CSUM_F          OCCTX_RX_OFFLOAD_CSUM_F
 511#define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
 512
 513/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
 514#define OCCTX_RX_FASTPATH_MODES                                                \
 515R(no_offload,                           0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
 516R(mseg,                                 0, 0, 1,  MULT_RX_F)                   \
 517R(csum,                                 0, 1, 0,  CSUM_F)                      \
 518R(csum_mseg,                            0, 1, 1,  CSUM_F | MULT_RX_F)          \
 519R(vlan,                                 1, 0, 0,  VLAN_FLTR_F)                 \
 520R(vlan_mseg,                            1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
 521R(vlan_csum,                            1, 1, 0,  VLAN_FLTR_F | CSUM_F)        \
 522R(vlan_csum_mseg,                       1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
 523                                        MULT_RX_F)
 524
 525 #endif /* __OCTEONTX_RXTX_H__ */
 526