dpdk/drivers/net/txgbe/txgbe_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
   3 * Copyright(c) 2010-2017 Intel Corporation
   4 */
   5
   6#include <sys/queue.h>
   7
   8#include <stdio.h>
   9#include <stdlib.h>
  10#include <string.h>
  11#include <errno.h>
  12#include <stdint.h>
  13#include <stdarg.h>
  14#include <unistd.h>
  15#include <inttypes.h>
  16
  17#include <rte_byteorder.h>
  18#include <rte_common.h>
  19#include <rte_cycles.h>
  20#include <rte_log.h>
  21#include <rte_debug.h>
  22#include <rte_ethdev.h>
  23#include <ethdev_driver.h>
  24#include <rte_security_driver.h>
  25#include <rte_memzone.h>
  26#include <rte_atomic.h>
  27#include <rte_mempool.h>
  28#include <rte_malloc.h>
  29#include <rte_mbuf.h>
  30#include <rte_ether.h>
  31#include <rte_prefetch.h>
  32#include <rte_udp.h>
  33#include <rte_tcp.h>
  34#include <rte_sctp.h>
  35#include <rte_string_fns.h>
  36#include <rte_errno.h>
  37#include <rte_ip.h>
  38#include <rte_net.h>
  39
  40#include "txgbe_logs.h"
  41#include "base/txgbe.h"
  42#include "txgbe_ethdev.h"
  43#include "txgbe_rxtx.h"
  44
  45#ifdef RTE_LIBRTE_IEEE1588
  46#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
  47#else
  48#define TXGBE_TX_IEEE1588_TMST 0
  49#endif
  50
  51/* Bit Mask to indicate what bits required for building TX context */
  52static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
  53                PKT_TX_OUTER_IPV6 |
  54                PKT_TX_OUTER_IPV4 |
  55                PKT_TX_IPV6 |
  56                PKT_TX_IPV4 |
  57                PKT_TX_VLAN_PKT |
  58                PKT_TX_L4_MASK |
  59                PKT_TX_TCP_SEG |
  60                PKT_TX_TUNNEL_MASK |
  61                PKT_TX_OUTER_IP_CKSUM |
  62                PKT_TX_OUTER_UDP_CKSUM |
  63#ifdef RTE_LIB_SECURITY
  64                PKT_TX_SEC_OFFLOAD |
  65#endif
  66                TXGBE_TX_IEEE1588_TMST);
  67
  68#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
  69                (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
  70
  71/*
  72 * Prefetch a cache line into all cache levels.
  73 */
  74#define rte_txgbe_prefetch(p)   rte_prefetch0(p)
  75
  76static int
  77txgbe_is_vf(struct rte_eth_dev *dev)
  78{
  79        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
  80
  81        switch (hw->mac.type) {
  82        case txgbe_mac_raptor_vf:
  83                return 1;
  84        default:
  85                return 0;
  86        }
  87}
  88
  89/*********************************************************************
  90 *
  91 *  TX functions
  92 *
  93 **********************************************************************/
  94
  95/*
  96 * Check for descriptors with their DD bit set and free mbufs.
  97 * Return the total number of buffers freed.
  98 */
  99static __rte_always_inline int
 100txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
 101{
 102        struct txgbe_tx_entry *txep;
 103        uint32_t status;
 104        int i, nb_free = 0;
 105        struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
 106
 107        /* check DD bit on threshold descriptor */
 108        status = txq->tx_ring[txq->tx_next_dd].dw3;
 109        if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
 110                if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
 111                        txgbe_set32_masked(txq->tdc_reg_addr,
 112                                TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
 113                return 0;
 114        }
 115
 116        /*
 117         * first buffer to free from S/W ring is at index
 118         * tx_next_dd - (tx_free_thresh-1)
 119         */
 120        txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
 121        for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
 122                /* free buffers one at a time */
 123                m = rte_pktmbuf_prefree_seg(txep->mbuf);
 124                txep->mbuf = NULL;
 125
 126                if (unlikely(m == NULL))
 127                        continue;
 128
 129                if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
 130                    (nb_free > 0 && m->pool != free[0]->pool)) {
 131                        rte_mempool_put_bulk(free[0]->pool,
 132                                             (void **)free, nb_free);
 133                        nb_free = 0;
 134                }
 135
 136                free[nb_free++] = m;
 137        }
 138
 139        if (nb_free > 0)
 140                rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
 141
 142        /* buffers were freed, update counters */
 143        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
 144        txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
 145        if (txq->tx_next_dd >= txq->nb_tx_desc)
 146                txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
 147
 148        return txq->tx_free_thresh;
 149}
 150
 151/* Populate 4 descriptors with data from 4 mbufs */
 152static inline void
 153tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
 154{
 155        uint64_t buf_dma_addr;
 156        uint32_t pkt_len;
 157        int i;
 158
 159        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
 160                buf_dma_addr = rte_mbuf_data_iova(*pkts);
 161                pkt_len = (*pkts)->data_len;
 162
 163                /* write data to descriptor */
 164                txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
 165                txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
 166                                        TXGBE_TXD_DATLEN(pkt_len));
 167                txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
 168
 169                rte_prefetch0(&(*pkts)->pool);
 170        }
 171}
 172
 173/* Populate 1 descriptor with data from 1 mbuf */
 174static inline void
 175tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
 176{
 177        uint64_t buf_dma_addr;
 178        uint32_t pkt_len;
 179
 180        buf_dma_addr = rte_mbuf_data_iova(*pkts);
 181        pkt_len = (*pkts)->data_len;
 182
 183        /* write data to descriptor */
 184        txdp->qw0 = cpu_to_le64(buf_dma_addr);
 185        txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
 186                                TXGBE_TXD_DATLEN(pkt_len));
 187        txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
 188
 189        rte_prefetch0(&(*pkts)->pool);
 190}
 191
 192/*
 193 * Fill H/W descriptor ring with mbuf data.
 194 * Copy mbuf pointers to the S/W ring.
 195 */
 196static inline void
 197txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
 198                      uint16_t nb_pkts)
 199{
 200        volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
 201        struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
 202        const int N_PER_LOOP = 4;
 203        const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
 204        int mainpart, leftover;
 205        int i, j;
 206
 207        /*
 208         * Process most of the packets in chunks of N pkts.  Any
 209         * leftover packets will get processed one at a time.
 210         */
 211        mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
 212        leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
 213        for (i = 0; i < mainpart; i += N_PER_LOOP) {
 214                /* Copy N mbuf pointers to the S/W ring */
 215                for (j = 0; j < N_PER_LOOP; ++j)
 216                        (txep + i + j)->mbuf = *(pkts + i + j);
 217                tx4(txdp + i, pkts + i);
 218        }
 219
 220        if (unlikely(leftover > 0)) {
 221                for (i = 0; i < leftover; ++i) {
 222                        (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
 223                        tx1(txdp + mainpart + i, pkts + mainpart + i);
 224                }
 225        }
 226}
 227
 228static inline uint16_t
 229tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 230             uint16_t nb_pkts)
 231{
 232        struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
 233        uint16_t n = 0;
 234
 235        /*
 236         * Begin scanning the H/W ring for done descriptors when the
 237         * number of available descriptors drops below tx_free_thresh.  For
 238         * each done descriptor, free the associated buffer.
 239         */
 240        if (txq->nb_tx_free < txq->tx_free_thresh)
 241                txgbe_tx_free_bufs(txq);
 242
 243        /* Only use descriptors that are available */
 244        nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 245        if (unlikely(nb_pkts == 0))
 246                return 0;
 247
 248        /* Use exactly nb_pkts descriptors */
 249        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
 250
 251        /*
 252         * At this point, we know there are enough descriptors in the
 253         * ring to transmit all the packets.  This assumes that each
 254         * mbuf contains a single segment, and that no new offloads
 255         * are expected, which would require a new context descriptor.
 256         */
 257
 258        /*
 259         * See if we're going to wrap-around. If so, handle the top
 260         * of the descriptor ring first, then do the bottom.  If not,
 261         * the processing looks just like the "bottom" part anyway...
 262         */
 263        if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
 264                n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
 265                txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
 266                txq->tx_tail = 0;
 267        }
 268
 269        /* Fill H/W descriptor ring with mbuf data */
 270        txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
 271        txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
 272
 273        /*
 274         * Check for wrap-around. This would only happen if we used
 275         * up to the last descriptor in the ring, no more, no less.
 276         */
 277        if (txq->tx_tail >= txq->nb_tx_desc)
 278                txq->tx_tail = 0;
 279
 280        PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
 281                   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
 282                   (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
 283
 284        /* update tail pointer */
 285        rte_wmb();
 286        txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
 287
 288        return nb_pkts;
 289}
 290
 291uint16_t
 292txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 293                       uint16_t nb_pkts)
 294{
 295        uint16_t nb_tx;
 296
 297        /* Try to transmit at least chunks of TX_MAX_BURST pkts */
 298        if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
 299                return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
 300
 301        /* transmit more than the max burst, in chunks of TX_MAX_BURST */
 302        nb_tx = 0;
 303        while (nb_pkts) {
 304                uint16_t ret, n;
 305
 306                n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
 307                ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
 308                nb_tx = (uint16_t)(nb_tx + ret);
 309                nb_pkts = (uint16_t)(nb_pkts - ret);
 310                if (ret < n)
 311                        break;
 312        }
 313
 314        return nb_tx;
 315}
 316
 317static inline void
 318txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
 319                volatile struct txgbe_tx_ctx_desc *ctx_txd,
 320                uint64_t ol_flags, union txgbe_tx_offload tx_offload,
 321                __rte_unused uint64_t *mdata)
 322{
 323        union txgbe_tx_offload tx_offload_mask;
 324        uint32_t type_tucmd_mlhl;
 325        uint32_t mss_l4len_idx;
 326        uint32_t ctx_idx;
 327        uint32_t vlan_macip_lens;
 328        uint32_t tunnel_seed;
 329
 330        ctx_idx = txq->ctx_curr;
 331        tx_offload_mask.data[0] = 0;
 332        tx_offload_mask.data[1] = 0;
 333
 334        /* Specify which HW CTX to upload. */
 335        mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
 336        type_tucmd_mlhl = TXGBE_TXD_CTXT;
 337
 338        tx_offload_mask.ptid |= ~0;
 339        type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
 340
 341        /* check if TCP segmentation required for this packet */
 342        if (ol_flags & PKT_TX_TCP_SEG) {
 343                tx_offload_mask.l2_len |= ~0;
 344                tx_offload_mask.l3_len |= ~0;
 345                tx_offload_mask.l4_len |= ~0;
 346                tx_offload_mask.tso_segsz |= ~0;
 347                mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
 348                mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
 349        } else { /* no TSO, check if hardware checksum is needed */
 350                if (ol_flags & PKT_TX_IP_CKSUM) {
 351                        tx_offload_mask.l2_len |= ~0;
 352                        tx_offload_mask.l3_len |= ~0;
 353                }
 354
 355                switch (ol_flags & PKT_TX_L4_MASK) {
 356                case PKT_TX_UDP_CKSUM:
 357                        mss_l4len_idx |=
 358                                TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
 359                        tx_offload_mask.l2_len |= ~0;
 360                        tx_offload_mask.l3_len |= ~0;
 361                        break;
 362                case PKT_TX_TCP_CKSUM:
 363                        mss_l4len_idx |=
 364                                TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
 365                        tx_offload_mask.l2_len |= ~0;
 366                        tx_offload_mask.l3_len |= ~0;
 367                        break;
 368                case PKT_TX_SCTP_CKSUM:
 369                        mss_l4len_idx |=
 370                                TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
 371                        tx_offload_mask.l2_len |= ~0;
 372                        tx_offload_mask.l3_len |= ~0;
 373                        break;
 374                default:
 375                        break;
 376                }
 377        }
 378
 379        vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
 380
 381        if (ol_flags & PKT_TX_TUNNEL_MASK) {
 382                tx_offload_mask.outer_tun_len |= ~0;
 383                tx_offload_mask.outer_l2_len |= ~0;
 384                tx_offload_mask.outer_l3_len |= ~0;
 385                tx_offload_mask.l2_len |= ~0;
 386                tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
 387                tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
 388
 389                switch (ol_flags & PKT_TX_TUNNEL_MASK) {
 390                case PKT_TX_TUNNEL_IPIP:
 391                        /* for non UDP / GRE tunneling, set to 0b */
 392                        break;
 393                case PKT_TX_TUNNEL_VXLAN:
 394                case PKT_TX_TUNNEL_VXLAN_GPE:
 395                case PKT_TX_TUNNEL_GENEVE:
 396                        tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
 397                        break;
 398                case PKT_TX_TUNNEL_GRE:
 399                        tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
 400                        break;
 401                default:
 402                        PMD_TX_LOG(ERR, "Tunnel type not supported");
 403                        return;
 404                }
 405                vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
 406        } else {
 407                tunnel_seed = 0;
 408                vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
 409        }
 410
 411        if (ol_flags & PKT_TX_VLAN_PKT) {
 412                tx_offload_mask.vlan_tci |= ~0;
 413                vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
 414        }
 415
 416#ifdef RTE_LIB_SECURITY
 417        if (ol_flags & PKT_TX_SEC_OFFLOAD) {
 418                union txgbe_crypto_tx_desc_md *md =
 419                                (union txgbe_crypto_tx_desc_md *)mdata;
 420                tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
 421                type_tucmd_mlhl |= md->enc ?
 422                        (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
 423                type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
 424                tx_offload_mask.sa_idx |= ~0;
 425                tx_offload_mask.sec_pad_len |= ~0;
 426        }
 427#endif
 428
 429        txq->ctx_cache[ctx_idx].flags = ol_flags;
 430        txq->ctx_cache[ctx_idx].tx_offload.data[0] =
 431                tx_offload_mask.data[0] & tx_offload.data[0];
 432        txq->ctx_cache[ctx_idx].tx_offload.data[1] =
 433                tx_offload_mask.data[1] & tx_offload.data[1];
 434        txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
 435
 436        ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
 437        ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
 438        ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
 439        ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
 440}
 441
 442/*
 443 * Check which hardware context can be used. Use the existing match
 444 * or create a new context descriptor.
 445 */
 446static inline uint32_t
 447what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
 448                   union txgbe_tx_offload tx_offload)
 449{
 450        /* If match with the current used context */
 451        if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
 452                   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
 453                    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
 454                     & tx_offload.data[0])) &&
 455                   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
 456                    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
 457                     & tx_offload.data[1]))))
 458                return txq->ctx_curr;
 459
 460        /* What if match with the next context  */
 461        txq->ctx_curr ^= 1;
 462        if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
 463                   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
 464                    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
 465                     & tx_offload.data[0])) &&
 466                   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
 467                    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
 468                     & tx_offload.data[1]))))
 469                return txq->ctx_curr;
 470
 471        /* Mismatch, use the previous context */
 472        return TXGBE_CTX_NUM;
 473}
 474
 475static inline uint32_t
 476tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
 477{
 478        uint32_t tmp = 0;
 479
 480        if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
 481                tmp |= TXGBE_TXD_CC;
 482                tmp |= TXGBE_TXD_L4CS;
 483        }
 484        if (ol_flags & PKT_TX_IP_CKSUM) {
 485                tmp |= TXGBE_TXD_CC;
 486                tmp |= TXGBE_TXD_IPCS;
 487        }
 488        if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
 489                tmp |= TXGBE_TXD_CC;
 490                tmp |= TXGBE_TXD_EIPCS;
 491        }
 492        if (ol_flags & PKT_TX_TCP_SEG) {
 493                tmp |= TXGBE_TXD_CC;
 494                /* implies IPv4 cksum */
 495                if (ol_flags & PKT_TX_IPV4)
 496                        tmp |= TXGBE_TXD_IPCS;
 497                tmp |= TXGBE_TXD_L4CS;
 498        }
 499        if (ol_flags & PKT_TX_VLAN_PKT)
 500                tmp |= TXGBE_TXD_CC;
 501
 502        return tmp;
 503}
 504
 505static inline uint32_t
 506tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 507{
 508        uint32_t cmdtype = 0;
 509
 510        if (ol_flags & PKT_TX_VLAN_PKT)
 511                cmdtype |= TXGBE_TXD_VLE;
 512        if (ol_flags & PKT_TX_TCP_SEG)
 513                cmdtype |= TXGBE_TXD_TSE;
 514        if (ol_flags & PKT_TX_MACSEC)
 515                cmdtype |= TXGBE_TXD_LINKSEC;
 516        return cmdtype;
 517}
 518
 519static inline uint8_t
 520tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
 521{
 522        bool tun;
 523
 524        if (ptype)
 525                return txgbe_encode_ptype(ptype);
 526
 527        /* Only support flags in TXGBE_TX_OFFLOAD_MASK */
 528        tun = !!(oflags & PKT_TX_TUNNEL_MASK);
 529
 530        /* L2 level */
 531        ptype = RTE_PTYPE_L2_ETHER;
 532        if (oflags & PKT_TX_VLAN)
 533                ptype |= RTE_PTYPE_L2_ETHER_VLAN;
 534
 535        /* L3 level */
 536        if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
 537                ptype |= RTE_PTYPE_L3_IPV4;
 538        else if (oflags & (PKT_TX_OUTER_IPV6))
 539                ptype |= RTE_PTYPE_L3_IPV6;
 540
 541        if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
 542                ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
 543        else if (oflags & (PKT_TX_IPV6))
 544                ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
 545
 546        /* L4 level */
 547        switch (oflags & (PKT_TX_L4_MASK)) {
 548        case PKT_TX_TCP_CKSUM:
 549                ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
 550                break;
 551        case PKT_TX_UDP_CKSUM:
 552                ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
 553                break;
 554        case PKT_TX_SCTP_CKSUM:
 555                ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
 556                break;
 557        }
 558
 559        if (oflags & PKT_TX_TCP_SEG)
 560                ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
 561
 562        /* Tunnel */
 563        switch (oflags & PKT_TX_TUNNEL_MASK) {
 564        case PKT_TX_TUNNEL_VXLAN:
 565                ptype |= RTE_PTYPE_L2_ETHER |
 566                         RTE_PTYPE_L3_IPV4 |
 567                         RTE_PTYPE_TUNNEL_VXLAN;
 568                ptype |= RTE_PTYPE_INNER_L2_ETHER;
 569                break;
 570        case PKT_TX_TUNNEL_GRE:
 571                ptype |= RTE_PTYPE_L2_ETHER |
 572                         RTE_PTYPE_L3_IPV4 |
 573                         RTE_PTYPE_TUNNEL_GRE;
 574                ptype |= RTE_PTYPE_INNER_L2_ETHER;
 575                break;
 576        case PKT_TX_TUNNEL_GENEVE:
 577                ptype |= RTE_PTYPE_L2_ETHER |
 578                         RTE_PTYPE_L3_IPV4 |
 579                         RTE_PTYPE_TUNNEL_GENEVE;
 580                ptype |= RTE_PTYPE_INNER_L2_ETHER;
 581                break;
 582        case PKT_TX_TUNNEL_VXLAN_GPE:
 583                ptype |= RTE_PTYPE_L2_ETHER |
 584                         RTE_PTYPE_L3_IPV4 |
 585                         RTE_PTYPE_TUNNEL_VXLAN_GPE;
 586                break;
 587        case PKT_TX_TUNNEL_IPIP:
 588        case PKT_TX_TUNNEL_IP:
 589                ptype |= RTE_PTYPE_L2_ETHER |
 590                         RTE_PTYPE_L3_IPV4 |
 591                         RTE_PTYPE_TUNNEL_IP;
 592                break;
 593        }
 594
 595        return txgbe_encode_ptype(ptype);
 596}
 597
 598#ifndef DEFAULT_TX_FREE_THRESH
 599#define DEFAULT_TX_FREE_THRESH 32
 600#endif
 601
 602/* Reset transmit descriptors after they have been used */
 603static inline int
 604txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
 605{
 606        struct txgbe_tx_entry *sw_ring = txq->sw_ring;
 607        volatile struct txgbe_tx_desc *txr = txq->tx_ring;
 608        uint16_t last_desc_cleaned = txq->last_desc_cleaned;
 609        uint16_t nb_tx_desc = txq->nb_tx_desc;
 610        uint16_t desc_to_clean_to;
 611        uint16_t nb_tx_to_clean;
 612        uint32_t status;
 613
 614        /* Determine the last descriptor needing to be cleaned */
 615        desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
 616        if (desc_to_clean_to >= nb_tx_desc)
 617                desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 618
 619        /* Check to make sure the last descriptor to clean is done */
 620        desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
 621        status = txr[desc_to_clean_to].dw3;
 622        if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
 623                PMD_TX_FREE_LOG(DEBUG,
 624                                "TX descriptor %4u is not done"
 625                                "(port=%d queue=%d)",
 626                                desc_to_clean_to,
 627                                txq->port_id, txq->queue_id);
 628                if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
 629                        txgbe_set32_masked(txq->tdc_reg_addr,
 630                                TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
 631                /* Failed to clean any descriptors, better luck next time */
 632                return -(1);
 633        }
 634
 635        /* Figure out how many descriptors will be cleaned */
 636        if (last_desc_cleaned > desc_to_clean_to)
 637                nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
 638                                                        desc_to_clean_to);
 639        else
 640                nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 641                                                last_desc_cleaned);
 642
 643        PMD_TX_FREE_LOG(DEBUG,
 644                        "Cleaning %4u TX descriptors: %4u to %4u "
 645                        "(port=%d queue=%d)",
 646                        nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
 647                        txq->port_id, txq->queue_id);
 648
 649        /*
 650         * The last descriptor to clean is done, so that means all the
 651         * descriptors from the last descriptor that was cleaned
 652         * up to the last descriptor with the RS bit set
 653         * are done. Only reset the threshold descriptor.
 654         */
 655        txr[desc_to_clean_to].dw3 = 0;
 656
 657        /* Update the txq to reflect the last descriptor that was cleaned */
 658        txq->last_desc_cleaned = desc_to_clean_to;
 659        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
 660
 661        /* No Error */
 662        return 0;
 663}
 664
 665static inline uint8_t
 666txgbe_get_tun_len(struct rte_mbuf *mbuf)
 667{
 668        struct txgbe_genevehdr genevehdr;
 669        const struct txgbe_genevehdr *gh;
 670        uint8_t tun_len;
 671
 672        switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
 673        case PKT_TX_TUNNEL_IPIP:
 674                tun_len = 0;
 675                break;
 676        case PKT_TX_TUNNEL_VXLAN:
 677        case PKT_TX_TUNNEL_VXLAN_GPE:
 678                tun_len = sizeof(struct txgbe_udphdr)
 679                        + sizeof(struct txgbe_vxlanhdr);
 680                break;
 681        case PKT_TX_TUNNEL_GRE:
 682                tun_len = sizeof(struct txgbe_nvgrehdr);
 683                break;
 684        case PKT_TX_TUNNEL_GENEVE:
 685                gh = rte_pktmbuf_read(mbuf,
 686                        mbuf->outer_l2_len + mbuf->outer_l3_len,
 687                        sizeof(genevehdr), &genevehdr);
 688                tun_len = sizeof(struct txgbe_udphdr)
 689                        + sizeof(struct txgbe_genevehdr)
 690                        + (gh->opt_len << 2);
 691                break;
 692        default:
 693                tun_len = 0;
 694        }
 695
 696        return tun_len;
 697}
 698
 699uint16_t
 700txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 701                uint16_t nb_pkts)
 702{
 703        struct txgbe_tx_queue *txq;
 704        struct txgbe_tx_entry *sw_ring;
 705        struct txgbe_tx_entry *txe, *txn;
 706        volatile struct txgbe_tx_desc *txr;
 707        volatile struct txgbe_tx_desc *txd;
 708        struct rte_mbuf     *tx_pkt;
 709        struct rte_mbuf     *m_seg;
 710        uint64_t buf_dma_addr;
 711        uint32_t olinfo_status;
 712        uint32_t cmd_type_len;
 713        uint32_t pkt_len;
 714        uint16_t slen;
 715        uint64_t ol_flags;
 716        uint16_t tx_id;
 717        uint16_t tx_last;
 718        uint16_t nb_tx;
 719        uint16_t nb_used;
 720        uint64_t tx_ol_req;
 721        uint32_t ctx = 0;
 722        uint32_t new_ctx;
 723        union txgbe_tx_offload tx_offload;
 724#ifdef RTE_LIB_SECURITY
 725        uint8_t use_ipsec;
 726#endif
 727
 728        tx_offload.data[0] = 0;
 729        tx_offload.data[1] = 0;
 730        txq = tx_queue;
 731        sw_ring = txq->sw_ring;
 732        txr     = txq->tx_ring;
 733        tx_id   = txq->tx_tail;
 734        txe = &sw_ring[tx_id];
 735
 736        /* Determine if the descriptor ring needs to be cleaned. */
 737        if (txq->nb_tx_free < txq->tx_free_thresh)
 738                txgbe_xmit_cleanup(txq);
 739
 740        rte_prefetch0(&txe->mbuf->pool);
 741
 742        /* TX loop */
 743        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 744                new_ctx = 0;
 745                tx_pkt = *tx_pkts++;
 746                pkt_len = tx_pkt->pkt_len;
 747
 748                /*
 749                 * Determine how many (if any) context descriptors
 750                 * are needed for offload functionality.
 751                 */
 752                ol_flags = tx_pkt->ol_flags;
 753#ifdef RTE_LIB_SECURITY
 754                use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
 755#endif
 756
 757                /* If hardware offload required */
 758                tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
 759                if (tx_ol_req) {
 760                        tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
 761                                        tx_pkt->packet_type);
 762                        tx_offload.l2_len = tx_pkt->l2_len;
 763                        tx_offload.l3_len = tx_pkt->l3_len;
 764                        tx_offload.l4_len = tx_pkt->l4_len;
 765                        tx_offload.vlan_tci = tx_pkt->vlan_tci;
 766                        tx_offload.tso_segsz = tx_pkt->tso_segsz;
 767                        tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
 768                        tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
 769                        tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
 770
 771#ifdef RTE_LIB_SECURITY
 772                        if (use_ipsec) {
 773                                union txgbe_crypto_tx_desc_md *ipsec_mdata =
 774                                        (union txgbe_crypto_tx_desc_md *)
 775                                                rte_security_dynfield(tx_pkt);
 776                                tx_offload.sa_idx = ipsec_mdata->sa_idx;
 777                                tx_offload.sec_pad_len = ipsec_mdata->pad_len;
 778                        }
 779#endif
 780
 781                        /* If new context need be built or reuse the exist ctx*/
 782                        ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
 783                        /* Only allocate context descriptor if required */
 784                        new_ctx = (ctx == TXGBE_CTX_NUM);
 785                        ctx = txq->ctx_curr;
 786                }
 787
 788                /*
 789                 * Keep track of how many descriptors are used this loop
 790                 * This will always be the number of segments + the number of
 791                 * Context descriptors required to transmit the packet
 792                 */
 793                nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
 794
 795                /*
 796                 * The number of descriptors that must be allocated for a
 797                 * packet is the number of segments of that packet, plus 1
 798                 * Context Descriptor for the hardware offload, if any.
 799                 * Determine the last TX descriptor to allocate in the TX ring
 800                 * for the packet, starting from the current position (tx_id)
 801                 * in the ring.
 802                 */
 803                tx_last = (uint16_t)(tx_id + nb_used - 1);
 804
 805                /* Circular ring */
 806                if (tx_last >= txq->nb_tx_desc)
 807                        tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
 808
 809                PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
 810                           " tx_first=%u tx_last=%u",
 811                           (uint16_t)txq->port_id,
 812                           (uint16_t)txq->queue_id,
 813                           (uint32_t)pkt_len,
 814                           (uint16_t)tx_id,
 815                           (uint16_t)tx_last);
 816
 817                /*
 818                 * Make sure there are enough TX descriptors available to
 819                 * transmit the entire packet.
 820                 * nb_used better be less than or equal to txq->tx_free_thresh
 821                 */
 822                if (nb_used > txq->nb_tx_free) {
 823                        PMD_TX_FREE_LOG(DEBUG,
 824                                        "Not enough free TX descriptors "
 825                                        "nb_used=%4u nb_free=%4u "
 826                                        "(port=%d queue=%d)",
 827                                        nb_used, txq->nb_tx_free,
 828                                        txq->port_id, txq->queue_id);
 829
 830                        if (txgbe_xmit_cleanup(txq) != 0) {
 831                                /* Could not clean any descriptors */
 832                                if (nb_tx == 0)
 833                                        return 0;
 834                                goto end_of_tx;
 835                        }
 836
 837                        /* nb_used better be <= txq->tx_free_thresh */
 838                        if (unlikely(nb_used > txq->tx_free_thresh)) {
 839                                PMD_TX_FREE_LOG(DEBUG,
 840                                        "The number of descriptors needed to "
 841                                        "transmit the packet exceeds the "
 842                                        "RS bit threshold. This will impact "
 843                                        "performance."
 844                                        "nb_used=%4u nb_free=%4u "
 845                                        "tx_free_thresh=%4u. "
 846                                        "(port=%d queue=%d)",
 847                                        nb_used, txq->nb_tx_free,
 848                                        txq->tx_free_thresh,
 849                                        txq->port_id, txq->queue_id);
 850                                /*
 851                                 * Loop here until there are enough TX
 852                                 * descriptors or until the ring cannot be
 853                                 * cleaned.
 854                                 */
 855                                while (nb_used > txq->nb_tx_free) {
 856                                        if (txgbe_xmit_cleanup(txq) != 0) {
 857                                                /*
 858                                                 * Could not clean any
 859                                                 * descriptors
 860                                                 */
 861                                                if (nb_tx == 0)
 862                                                        return 0;
 863                                                goto end_of_tx;
 864                                        }
 865                                }
 866                        }
 867                }
 868
 869                /*
 870                 * By now there are enough free TX descriptors to transmit
 871                 * the packet.
 872                 */
 873
 874                /*
 875                 * Set common flags of all TX Data Descriptors.
 876                 *
 877                 * The following bits must be set in all Data Descriptors:
 878                 *   - TXGBE_TXD_DTYP_DATA
 879                 *   - TXGBE_TXD_DCMD_DEXT
 880                 *
 881                 * The following bits must be set in the first Data Descriptor
 882                 * and are ignored in the other ones:
 883                 *   - TXGBE_TXD_DCMD_IFCS
 884                 *   - TXGBE_TXD_MAC_1588
 885                 *   - TXGBE_TXD_DCMD_VLE
 886                 *
 887                 * The following bits must only be set in the last Data
 888                 * Descriptor:
 889                 *   - TXGBE_TXD_CMD_EOP
 890                 *
 891                 * The following bits can be set in any Data Descriptor, but
 892                 * are only set in the last Data Descriptor:
 893                 *   - TXGBE_TXD_CMD_RS
 894                 */
 895                cmd_type_len = TXGBE_TXD_FCS;
 896
 897#ifdef RTE_LIBRTE_IEEE1588
 898                if (ol_flags & PKT_TX_IEEE1588_TMST)
 899                        cmd_type_len |= TXGBE_TXD_1588;
 900#endif
 901
 902                olinfo_status = 0;
 903                if (tx_ol_req) {
 904                        if (ol_flags & PKT_TX_TCP_SEG) {
 905                                /* when TSO is on, paylen in descriptor is the
 906                                 * not the packet len but the tcp payload len
 907                                 */
 908                                pkt_len -= (tx_offload.l2_len +
 909                                        tx_offload.l3_len + tx_offload.l4_len);
 910                                pkt_len -=
 911                                        (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
 912                                        ? tx_offload.outer_l2_len +
 913                                          tx_offload.outer_l3_len : 0;
 914                        }
 915
 916                        /*
 917                         * Setup the TX Advanced Context Descriptor if required
 918                         */
 919                        if (new_ctx) {
 920                                volatile struct txgbe_tx_ctx_desc *ctx_txd;
 921
 922                                ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
 923                                    &txr[tx_id];
 924
 925                                txn = &sw_ring[txe->next_id];
 926                                rte_prefetch0(&txn->mbuf->pool);
 927
 928                                if (txe->mbuf != NULL) {
 929                                        rte_pktmbuf_free_seg(txe->mbuf);
 930                                        txe->mbuf = NULL;
 931                                }
 932
 933                                txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
 934                                        tx_offload,
 935                                        rte_security_dynfield(tx_pkt));
 936
 937                                txe->last_id = tx_last;
 938                                tx_id = txe->next_id;
 939                                txe = txn;
 940                        }
 941
 942                        /*
 943                         * Setup the TX Advanced Data Descriptor,
 944                         * This path will go through
 945                         * whatever new/reuse the context descriptor
 946                         */
 947                        cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
 948                        olinfo_status |=
 949                                tx_desc_cksum_flags_to_olinfo(ol_flags);
 950                        olinfo_status |= TXGBE_TXD_IDX(ctx);
 951                }
 952
 953                olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
 954#ifdef RTE_LIB_SECURITY
 955                if (use_ipsec)
 956                        olinfo_status |= TXGBE_TXD_IPSEC;
 957#endif
 958
 959                m_seg = tx_pkt;
 960                do {
 961                        txd = &txr[tx_id];
 962                        txn = &sw_ring[txe->next_id];
 963                        rte_prefetch0(&txn->mbuf->pool);
 964
 965                        if (txe->mbuf != NULL)
 966                                rte_pktmbuf_free_seg(txe->mbuf);
 967                        txe->mbuf = m_seg;
 968
 969                        /*
 970                         * Set up Transmit Data Descriptor.
 971                         */
 972                        slen = m_seg->data_len;
 973                        buf_dma_addr = rte_mbuf_data_iova(m_seg);
 974                        txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
 975                        txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
 976                        txd->dw3 = rte_cpu_to_le_32(olinfo_status);
 977                        txe->last_id = tx_last;
 978                        tx_id = txe->next_id;
 979                        txe = txn;
 980                        m_seg = m_seg->next;
 981                } while (m_seg != NULL);
 982
 983                /*
 984                 * The last packet data descriptor needs End Of Packet (EOP)
 985                 */
 986                cmd_type_len |= TXGBE_TXD_EOP;
 987                txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
 988
 989                txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
 990        }
 991
 992end_of_tx:
 993
 994        rte_wmb();
 995
 996        /*
 997         * Set the Transmit Descriptor Tail (TDT)
 998         */
 999        PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1000                   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
1001                   (uint16_t)tx_id, (uint16_t)nb_tx);
1002        txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
1003        txq->tx_tail = tx_id;
1004
1005        return nb_tx;
1006}
1007
1008/*********************************************************************
1009 *
1010 *  TX prep functions
1011 *
1012 **********************************************************************/
1013uint16_t
1014txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1015{
1016        int i, ret;
1017        uint64_t ol_flags;
1018        struct rte_mbuf *m;
1019        struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
1020
1021        for (i = 0; i < nb_pkts; i++) {
1022                m = tx_pkts[i];
1023                ol_flags = m->ol_flags;
1024
1025                /**
1026                 * Check if packet meets requirements for number of segments
1027                 *
1028                 * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
1029                 *       non-TSO
1030                 */
1031
1032                if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
1033                        rte_errno = -EINVAL;
1034                        return i;
1035                }
1036
1037                if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
1038                        rte_errno = -ENOTSUP;
1039                        return i;
1040                }
1041
1042#ifdef RTE_LIBRTE_ETHDEV_DEBUG
1043                ret = rte_validate_tx_offload(m);
1044                if (ret != 0) {
1045                        rte_errno = ret;
1046                        return i;
1047                }
1048#endif
1049                ret = rte_net_intel_cksum_prepare(m);
1050                if (ret != 0) {
1051                        rte_errno = ret;
1052                        return i;
1053                }
1054        }
1055
1056        return i;
1057}
1058
1059/*********************************************************************
1060 *
1061 *  RX functions
1062 *
1063 **********************************************************************/
1064/* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
1065static inline uint32_t
1066txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
1067{
1068        uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
1069
1070        ptid &= ptid_mask;
1071
1072        return txgbe_decode_ptype(ptid);
1073}
1074
1075static inline uint64_t
1076txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
1077{
1078        static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1079                0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1080                0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1081                PKT_RX_RSS_HASH, 0, 0, 0,
1082                0, 0, 0,  PKT_RX_FDIR,
1083        };
1084#ifdef RTE_LIBRTE_IEEE1588
1085        static uint64_t ip_pkt_etqf_map[8] = {
1086                0, 0, 0, PKT_RX_IEEE1588_PTP,
1087                0, 0, 0, 0,
1088        };
1089        int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
1090        if (likely(-1 != etfid))
1091                return ip_pkt_etqf_map[etfid] |
1092                       ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1093        else
1094                return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1095#else
1096        return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1097#endif
1098}
1099
1100static inline uint64_t
1101rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1102{
1103        uint64_t pkt_flags;
1104
1105        /*
1106         * Check if VLAN present only.
1107         * Do not check whether L3/L4 rx checksum done by NIC or not,
1108         * That can be found from rte_eth_rxmode.offloads flag
1109         */
1110        pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
1111                     vlan_flags & PKT_RX_VLAN_STRIPPED)
1112                    ? vlan_flags : 0;
1113
1114#ifdef RTE_LIBRTE_IEEE1588
1115        if (rx_status & TXGBE_RXD_STAT_1588)
1116                pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1117#endif
1118        return pkt_flags;
1119}
1120
1121static inline uint64_t
1122rx_desc_error_to_pkt_flags(uint32_t rx_status)
1123{
1124        uint64_t pkt_flags = 0;
1125
1126        /* checksum offload can't be disabled */
1127        if (rx_status & TXGBE_RXD_STAT_IPCS) {
1128                pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
1129                                ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
1130        }
1131
1132        if (rx_status & TXGBE_RXD_STAT_L4CS) {
1133                pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
1134                                ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
1135        }
1136
1137        if (rx_status & TXGBE_RXD_STAT_EIPCS &&
1138            rx_status & TXGBE_RXD_ERR_EIPCS) {
1139                pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1140        }
1141
1142#ifdef RTE_LIB_SECURITY
1143        if (rx_status & TXGBE_RXD_STAT_SECP) {
1144                pkt_flags |= PKT_RX_SEC_OFFLOAD;
1145                if (rx_status & TXGBE_RXD_ERR_SECERR)
1146                        pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1147        }
1148#endif
1149
1150        return pkt_flags;
1151}
1152
1153/*
1154 * LOOK_AHEAD defines how many desc statuses to check beyond the
1155 * current descriptor.
1156 * It must be a pound define for optimal performance.
1157 * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
1158 * function only works with LOOK_AHEAD=8.
1159 */
1160#define LOOK_AHEAD 8
1161#if (LOOK_AHEAD != 8)
1162#error "PMD TXGBE: LOOK_AHEAD must be 8\n"
1163#endif
1164static inline int
1165txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
1166{
1167        volatile struct txgbe_rx_desc *rxdp;
1168        struct txgbe_rx_entry *rxep;
1169        struct rte_mbuf *mb;
1170        uint16_t pkt_len;
1171        uint64_t pkt_flags;
1172        int nb_dd;
1173        uint32_t s[LOOK_AHEAD];
1174        uint32_t pkt_info[LOOK_AHEAD];
1175        int i, j, nb_rx = 0;
1176        uint32_t status;
1177
1178        /* get references to current descriptor and S/W ring entry */
1179        rxdp = &rxq->rx_ring[rxq->rx_tail];
1180        rxep = &rxq->sw_ring[rxq->rx_tail];
1181
1182        status = rxdp->qw1.lo.status;
1183        /* check to make sure there is at least 1 packet to receive */
1184        if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1185                return 0;
1186
1187        /*
1188         * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1189         * reference packets that are ready to be received.
1190         */
1191        for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
1192             i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1193                /* Read desc statuses backwards to avoid race condition */
1194                for (j = 0; j < LOOK_AHEAD; j++)
1195                        s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
1196
1197                rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1198
1199                /* Compute how many status bits were set */
1200                for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1201                                (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
1202                        ;
1203
1204                for (j = 0; j < nb_dd; j++)
1205                        pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
1206
1207                nb_rx += nb_dd;
1208
1209                /* Translate descriptor info to mbuf format */
1210                for (j = 0; j < nb_dd; ++j) {
1211                        mb = rxep[j].mbuf;
1212                        pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
1213                                  rxq->crc_len;
1214                        mb->data_len = pkt_len;
1215                        mb->pkt_len = pkt_len;
1216                        mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1217
1218                        /* convert descriptor fields to rte mbuf flags */
1219                        pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1220                                        rxq->vlan_flags);
1221                        pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1222                        pkt_flags |=
1223                                txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1224                        mb->ol_flags = pkt_flags;
1225                        mb->packet_type =
1226                                txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1227                                rxq->pkt_type_mask);
1228
1229                        if (likely(pkt_flags & PKT_RX_RSS_HASH))
1230                                mb->hash.rss =
1231                                        rte_le_to_cpu_32(rxdp[j].qw0.dw1);
1232                        else if (pkt_flags & PKT_RX_FDIR) {
1233                                mb->hash.fdir.hash =
1234                                        rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
1235                                        TXGBE_ATR_HASH_MASK;
1236                                mb->hash.fdir.id =
1237                                        rte_le_to_cpu_16(rxdp[j].qw0.hi.ipid);
1238                        }
1239                }
1240
1241                /* Move mbuf pointers from the S/W ring to the stage */
1242                for (j = 0; j < LOOK_AHEAD; ++j)
1243                        rxq->rx_stage[i + j] = rxep[j].mbuf;
1244
1245                /* stop if all requested packets could not be received */
1246                if (nb_dd != LOOK_AHEAD)
1247                        break;
1248        }
1249
1250        /* clear software ring entries so we can cleanup correctly */
1251        for (i = 0; i < nb_rx; ++i)
1252                rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1253
1254        return nb_rx;
1255}
1256
1257static inline int
1258txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
1259{
1260        volatile struct txgbe_rx_desc *rxdp;
1261        struct txgbe_rx_entry *rxep;
1262        struct rte_mbuf *mb;
1263        uint16_t alloc_idx;
1264        __le64 dma_addr;
1265        int diag, i;
1266
1267        /* allocate buffers in bulk directly into the S/W ring */
1268        alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1269        rxep = &rxq->sw_ring[alloc_idx];
1270        diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1271                                    rxq->rx_free_thresh);
1272        if (unlikely(diag != 0))
1273                return -ENOMEM;
1274
1275        rxdp = &rxq->rx_ring[alloc_idx];
1276        for (i = 0; i < rxq->rx_free_thresh; ++i) {
1277                /* populate the static rte mbuf fields */
1278                mb = rxep[i].mbuf;
1279                if (reset_mbuf)
1280                        mb->port = rxq->port_id;
1281
1282                rte_mbuf_refcnt_set(mb, 1);
1283                mb->data_off = RTE_PKTMBUF_HEADROOM;
1284
1285                /* populate the descriptors */
1286                dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1287                TXGBE_RXD_HDRADDR(&rxdp[i], 0);
1288                TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1289        }
1290
1291        /* update state of internal queue structure */
1292        rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1293        if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1294                rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1295
1296        /* no errors */
1297        return 0;
1298}
1299
1300static inline uint16_t
1301txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1302                         uint16_t nb_pkts)
1303{
1304        struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1305        int i;
1306
1307        /* how many packets are ready to return? */
1308        nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1309
1310        /* copy mbuf pointers to the application's packet list */
1311        for (i = 0; i < nb_pkts; ++i)
1312                rx_pkts[i] = stage[i];
1313
1314        /* update internal queue state */
1315        rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1316        rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1317
1318        return nb_pkts;
1319}
1320
1321static inline uint16_t
1322txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1323             uint16_t nb_pkts)
1324{
1325        struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
1326        struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1327        uint16_t nb_rx = 0;
1328
1329        /* Any previously recv'd pkts will be returned from the Rx stage */
1330        if (rxq->rx_nb_avail)
1331                return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1332
1333        /* Scan the H/W ring for packets to receive */
1334        nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
1335
1336        /* update internal queue state */
1337        rxq->rx_next_avail = 0;
1338        rxq->rx_nb_avail = nb_rx;
1339        rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1340
1341        /* if required, allocate new buffers to replenish descriptors */
1342        if (rxq->rx_tail > rxq->rx_free_trigger) {
1343                uint16_t cur_free_trigger = rxq->rx_free_trigger;
1344
1345                if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
1346                        int i, j;
1347
1348                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1349                                   "queue_id=%u", (uint16_t)rxq->port_id,
1350                                   (uint16_t)rxq->queue_id);
1351
1352                        dev->data->rx_mbuf_alloc_failed +=
1353                                rxq->rx_free_thresh;
1354
1355                        /*
1356                         * Need to rewind any previous receives if we cannot
1357                         * allocate new buffers to replenish the old ones.
1358                         */
1359                        rxq->rx_nb_avail = 0;
1360                        rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1361                        for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1362                                rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1363
1364                        return 0;
1365                }
1366
1367                /* update tail pointer */
1368                rte_wmb();
1369                txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1370        }
1371
1372        if (rxq->rx_tail >= rxq->nb_rx_desc)
1373                rxq->rx_tail = 0;
1374
1375        /* received any packets this loop? */
1376        if (rxq->rx_nb_avail)
1377                return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1378
1379        return 0;
1380}
1381
1382/* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
1383uint16_t
1384txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1385                           uint16_t nb_pkts)
1386{
1387        uint16_t nb_rx;
1388
1389        if (unlikely(nb_pkts == 0))
1390                return 0;
1391
1392        if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
1393                return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1394
1395        /* request is relatively large, chunk it up */
1396        nb_rx = 0;
1397        while (nb_pkts) {
1398                uint16_t ret, n;
1399
1400                n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
1401                ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1402                nb_rx = (uint16_t)(nb_rx + ret);
1403                nb_pkts = (uint16_t)(nb_pkts - ret);
1404                if (ret < n)
1405                        break;
1406        }
1407
1408        return nb_rx;
1409}
1410
1411uint16_t
1412txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1413                uint16_t nb_pkts)
1414{
1415        struct txgbe_rx_queue *rxq;
1416        volatile struct txgbe_rx_desc *rx_ring;
1417        volatile struct txgbe_rx_desc *rxdp;
1418        struct txgbe_rx_entry *sw_ring;
1419        struct txgbe_rx_entry *rxe;
1420        struct rte_mbuf *rxm;
1421        struct rte_mbuf *nmb;
1422        struct txgbe_rx_desc rxd;
1423        uint64_t dma_addr;
1424        uint32_t staterr;
1425        uint32_t pkt_info;
1426        uint16_t pkt_len;
1427        uint16_t rx_id;
1428        uint16_t nb_rx;
1429        uint16_t nb_hold;
1430        uint64_t pkt_flags;
1431
1432        nb_rx = 0;
1433        nb_hold = 0;
1434        rxq = rx_queue;
1435        rx_id = rxq->rx_tail;
1436        rx_ring = rxq->rx_ring;
1437        sw_ring = rxq->sw_ring;
1438        struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1439        while (nb_rx < nb_pkts) {
1440                /*
1441                 * The order of operations here is important as the DD status
1442                 * bit must not be read after any other descriptor fields.
1443                 * rx_ring and rxdp are pointing to volatile data so the order
1444                 * of accesses cannot be reordered by the compiler. If they were
1445                 * not volatile, they could be reordered which could lead to
1446                 * using invalid descriptor fields when read from rxd.
1447                 */
1448                rxdp = &rx_ring[rx_id];
1449                staterr = rxdp->qw1.lo.status;
1450                if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1451                        break;
1452                rxd = *rxdp;
1453
1454                /*
1455                 * End of packet.
1456                 *
1457                 * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
1458                 * is likely to be invalid and to be dropped by the various
1459                 * validation checks performed by the network stack.
1460                 *
1461                 * Allocate a new mbuf to replenish the RX ring descriptor.
1462                 * If the allocation fails:
1463                 *    - arrange for that RX descriptor to be the first one
1464                 *      being parsed the next time the receive function is
1465                 *      invoked [on the same queue].
1466                 *
1467                 *    - Stop parsing the RX ring and return immediately.
1468                 *
1469                 * This policy do not drop the packet received in the RX
1470                 * descriptor for which the allocation of a new mbuf failed.
1471                 * Thus, it allows that packet to be later retrieved if
1472                 * mbuf have been freed in the mean time.
1473                 * As a side effect, holding RX descriptors instead of
1474                 * systematically giving them back to the NIC may lead to
1475                 * RX ring exhaustion situations.
1476                 * However, the NIC can gracefully prevent such situations
1477                 * to happen by sending specific "back-pressure" flow control
1478                 * frames to its peer(s).
1479                 */
1480                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1481                           "ext_err_stat=0x%08x pkt_len=%u",
1482                           (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1483                           (uint16_t)rx_id, (uint32_t)staterr,
1484                           (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1485
1486                nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1487                if (nmb == NULL) {
1488                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1489                                   "queue_id=%u", (uint16_t)rxq->port_id,
1490                                   (uint16_t)rxq->queue_id);
1491                        dev->data->rx_mbuf_alloc_failed++;
1492                        break;
1493                }
1494
1495                nb_hold++;
1496                rxe = &sw_ring[rx_id];
1497                rx_id++;
1498                if (rx_id == rxq->nb_rx_desc)
1499                        rx_id = 0;
1500
1501                /* Prefetch next mbuf while processing current one. */
1502                rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
1503
1504                /*
1505                 * When next RX descriptor is on a cache-line boundary,
1506                 * prefetch the next 4 RX descriptors and the next 8 pointers
1507                 * to mbufs.
1508                 */
1509                if ((rx_id & 0x3) == 0) {
1510                        rte_txgbe_prefetch(&rx_ring[rx_id]);
1511                        rte_txgbe_prefetch(&sw_ring[rx_id]);
1512                }
1513
1514                rxm = rxe->mbuf;
1515                rxe->mbuf = nmb;
1516                dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1517                TXGBE_RXD_HDRADDR(rxdp, 0);
1518                TXGBE_RXD_PKTADDR(rxdp, dma_addr);
1519
1520                /*
1521                 * Initialize the returned mbuf.
1522                 * 1) setup generic mbuf fields:
1523                 *    - number of segments,
1524                 *    - next segment,
1525                 *    - packet length,
1526                 *    - RX port identifier.
1527                 * 2) integrate hardware offload data, if any:
1528                 *    - RSS flag & hash,
1529                 *    - IP checksum flag,
1530                 *    - VLAN TCI, if any,
1531                 *    - error flags.
1532                 */
1533                pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1534                                      rxq->crc_len);
1535                rxm->data_off = RTE_PKTMBUF_HEADROOM;
1536                rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1537                rxm->nb_segs = 1;
1538                rxm->next = NULL;
1539                rxm->pkt_len = pkt_len;
1540                rxm->data_len = pkt_len;
1541                rxm->port = rxq->port_id;
1542
1543                pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1544                /* Only valid if PKT_RX_VLAN set in pkt_flags */
1545                rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1546
1547                pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1548                                        rxq->vlan_flags);
1549                pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1550                pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1551                rxm->ol_flags = pkt_flags;
1552                rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1553                                                       rxq->pkt_type_mask);
1554
1555                if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
1556                        rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
1557                } else if (pkt_flags & PKT_RX_FDIR) {
1558                        rxm->hash.fdir.hash =
1559                                rte_le_to_cpu_16(rxd.qw0.hi.csum) &
1560                                TXGBE_ATR_HASH_MASK;
1561                        rxm->hash.fdir.id = rte_le_to_cpu_16(rxd.qw0.hi.ipid);
1562                }
1563                /*
1564                 * Store the mbuf address into the next entry of the array
1565                 * of returned packets.
1566                 */
1567                rx_pkts[nb_rx++] = rxm;
1568        }
1569        rxq->rx_tail = rx_id;
1570
1571        /*
1572         * If the number of free RX descriptors is greater than the RX free
1573         * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1574         * register.
1575         * Update the RDT with the value of the last processed RX descriptor
1576         * minus 1, to guarantee that the RDT register is never equal to the
1577         * RDH register, which creates a "full" ring situation from the
1578         * hardware point of view...
1579         */
1580        nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1581        if (nb_hold > rxq->rx_free_thresh) {
1582                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1583                           "nb_hold=%u nb_rx=%u",
1584                           (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1585                           (uint16_t)rx_id, (uint16_t)nb_hold,
1586                           (uint16_t)nb_rx);
1587                rx_id = (uint16_t)((rx_id == 0) ?
1588                                (rxq->nb_rx_desc - 1) : (rx_id - 1));
1589                txgbe_set32(rxq->rdt_reg_addr, rx_id);
1590                nb_hold = 0;
1591        }
1592        rxq->nb_rx_hold = nb_hold;
1593        return nb_rx;
1594}
1595
1596/**
1597 * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1598 *
1599 * Fill the following info in the HEAD buffer of the Rx cluster:
1600 *    - RX port identifier
1601 *    - hardware offload data, if any:
1602 *      - RSS flag & hash
1603 *      - IP checksum flag
1604 *      - VLAN TCI, if any
1605 *      - error flags
1606 * @head HEAD of the packet cluster
1607 * @desc HW descriptor to get data from
1608 * @rxq Pointer to the Rx queue
1609 */
1610static inline void
1611txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
1612                struct txgbe_rx_queue *rxq, uint32_t staterr)
1613{
1614        uint32_t pkt_info;
1615        uint64_t pkt_flags;
1616
1617        head->port = rxq->port_id;
1618
1619        /* The vlan_tci field is only valid when PKT_RX_VLAN is
1620         * set in the pkt_flags field.
1621         */
1622        head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1623        pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1624        pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1625        pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1626        pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1627        head->ol_flags = pkt_flags;
1628        head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1629                                                rxq->pkt_type_mask);
1630
1631        if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
1632                head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
1633        } else if (pkt_flags & PKT_RX_FDIR) {
1634                head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
1635                                & TXGBE_ATR_HASH_MASK;
1636                head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
1637        }
1638}
1639
1640/**
1641 * txgbe_recv_pkts_lro - receive handler for and LRO case.
1642 *
1643 * @rx_queue Rx queue handle
1644 * @rx_pkts table of received packets
1645 * @nb_pkts size of rx_pkts table
1646 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1647 *
1648 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1649 * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
1650 *
1651 * We use the same logic as in Linux and in FreeBSD txgbe drivers:
1652 * 1) When non-EOP RSC completion arrives:
1653 *    a) Update the HEAD of the current RSC aggregation cluster with the new
1654 *       segment's data length.
1655 *    b) Set the "next" pointer of the current segment to point to the segment
1656 *       at the NEXTP index.
1657 *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1658 *       in the sw_rsc_ring.
1659 * 2) When EOP arrives we just update the cluster's total length and offload
1660 *    flags and deliver the cluster up to the upper layers. In our case - put it
1661 *    in the rx_pkts table.
1662 *
1663 * Returns the number of received packets/clusters (according to the "bulk
1664 * receive" interface).
1665 */
1666static inline uint16_t
1667txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1668                    bool bulk_alloc)
1669{
1670        struct txgbe_rx_queue *rxq = rx_queue;
1671        struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1672        volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
1673        struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
1674        struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1675        uint16_t rx_id = rxq->rx_tail;
1676        uint16_t nb_rx = 0;
1677        uint16_t nb_hold = rxq->nb_rx_hold;
1678        uint16_t prev_id = rxq->rx_tail;
1679
1680        while (nb_rx < nb_pkts) {
1681                bool eop;
1682                struct txgbe_rx_entry *rxe;
1683                struct txgbe_scattered_rx_entry *sc_entry;
1684                struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
1685                struct txgbe_rx_entry *next_rxe = NULL;
1686                struct rte_mbuf *first_seg;
1687                struct rte_mbuf *rxm;
1688                struct rte_mbuf *nmb = NULL;
1689                struct txgbe_rx_desc rxd;
1690                uint16_t data_len;
1691                uint16_t next_id;
1692                volatile struct txgbe_rx_desc *rxdp;
1693                uint32_t staterr;
1694
1695next_desc:
1696                /*
1697                 * The code in this whole file uses the volatile pointer to
1698                 * ensure the read ordering of the status and the rest of the
1699                 * descriptor fields (on the compiler level only!!!). This is so
1700                 * UGLY - why not to just use the compiler barrier instead? DPDK
1701                 * even has the rte_compiler_barrier() for that.
1702                 *
1703                 * But most importantly this is just wrong because this doesn't
1704                 * ensure memory ordering in a general case at all. For
1705                 * instance, DPDK is supposed to work on Power CPUs where
1706                 * compiler barrier may just not be enough!
1707                 *
1708                 * I tried to write only this function properly to have a
1709                 * starting point (as a part of an LRO/RSC series) but the
1710                 * compiler cursed at me when I tried to cast away the
1711                 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1712                 * keeping it the way it is for now.
1713                 *
1714                 * The code in this file is broken in so many other places and
1715                 * will just not work on a big endian CPU anyway therefore the
1716                 * lines below will have to be revisited together with the rest
1717                 * of the txgbe PMD.
1718                 *
1719                 * TODO:
1720                 *    - Get rid of "volatile" and let the compiler do its job.
1721                 *    - Use the proper memory barrier (rte_rmb()) to ensure the
1722                 *      memory ordering below.
1723                 */
1724                rxdp = &rx_ring[rx_id];
1725                staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1726
1727                if (!(staterr & TXGBE_RXD_STAT_DD))
1728                        break;
1729
1730                rxd = *rxdp;
1731
1732                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1733                                  "staterr=0x%x data_len=%u",
1734                           rxq->port_id, rxq->queue_id, rx_id, staterr,
1735                           rte_le_to_cpu_16(rxd.qw1.hi.len));
1736
1737                if (!bulk_alloc) {
1738                        nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1739                        if (nmb == NULL) {
1740                                PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1741                                                  "port_id=%u queue_id=%u",
1742                                           rxq->port_id, rxq->queue_id);
1743
1744                                dev->data->rx_mbuf_alloc_failed++;
1745                                break;
1746                        }
1747                } else if (nb_hold > rxq->rx_free_thresh) {
1748                        uint16_t next_rdt = rxq->rx_free_trigger;
1749
1750                        if (!txgbe_rx_alloc_bufs(rxq, false)) {
1751                                rte_wmb();
1752                                txgbe_set32_relaxed(rxq->rdt_reg_addr,
1753                                                            next_rdt);
1754                                nb_hold -= rxq->rx_free_thresh;
1755                        } else {
1756                                PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1757                                                  "port_id=%u queue_id=%u",
1758                                           rxq->port_id, rxq->queue_id);
1759
1760                                dev->data->rx_mbuf_alloc_failed++;
1761                                break;
1762                        }
1763                }
1764
1765                nb_hold++;
1766                rxe = &sw_ring[rx_id];
1767                eop = staterr & TXGBE_RXD_STAT_EOP;
1768
1769                next_id = rx_id + 1;
1770                if (next_id == rxq->nb_rx_desc)
1771                        next_id = 0;
1772
1773                /* Prefetch next mbuf while processing current one. */
1774                rte_txgbe_prefetch(sw_ring[next_id].mbuf);
1775
1776                /*
1777                 * When next RX descriptor is on a cache-line boundary,
1778                 * prefetch the next 4 RX descriptors and the next 4 pointers
1779                 * to mbufs.
1780                 */
1781                if ((next_id & 0x3) == 0) {
1782                        rte_txgbe_prefetch(&rx_ring[next_id]);
1783                        rte_txgbe_prefetch(&sw_ring[next_id]);
1784                }
1785
1786                rxm = rxe->mbuf;
1787
1788                if (!bulk_alloc) {
1789                        __le64 dma =
1790                          rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1791                        /*
1792                         * Update RX descriptor with the physical address of the
1793                         * new data buffer of the new allocated mbuf.
1794                         */
1795                        rxe->mbuf = nmb;
1796
1797                        rxm->data_off = RTE_PKTMBUF_HEADROOM;
1798                        TXGBE_RXD_HDRADDR(rxdp, 0);
1799                        TXGBE_RXD_PKTADDR(rxdp, dma);
1800                } else {
1801                        rxe->mbuf = NULL;
1802                }
1803
1804                /*
1805                 * Set data length & data buffer address of mbuf.
1806                 */
1807                data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1808                rxm->data_len = data_len;
1809
1810                if (!eop) {
1811                        uint16_t nextp_id;
1812                        /*
1813                         * Get next descriptor index:
1814                         *  - For RSC it's in the NEXTP field.
1815                         *  - For a scattered packet - it's just a following
1816                         *    descriptor.
1817                         */
1818                        if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
1819                                nextp_id = TXGBE_RXD_NEXTP(staterr);
1820                        else
1821                                nextp_id = next_id;
1822
1823                        next_sc_entry = &sw_sc_ring[nextp_id];
1824                        next_rxe = &sw_ring[nextp_id];
1825                        rte_txgbe_prefetch(next_rxe);
1826                }
1827
1828                sc_entry = &sw_sc_ring[rx_id];
1829                first_seg = sc_entry->fbuf;
1830                sc_entry->fbuf = NULL;
1831
1832                /*
1833                 * If this is the first buffer of the received packet,
1834                 * set the pointer to the first mbuf of the packet and
1835                 * initialize its context.
1836                 * Otherwise, update the total length and the number of segments
1837                 * of the current scattered packet, and update the pointer to
1838                 * the last mbuf of the current packet.
1839                 */
1840                if (first_seg == NULL) {
1841                        first_seg = rxm;
1842                        first_seg->pkt_len = data_len;
1843                        first_seg->nb_segs = 1;
1844                } else {
1845                        first_seg->pkt_len += data_len;
1846                        first_seg->nb_segs++;
1847                }
1848
1849                prev_id = rx_id;
1850                rx_id = next_id;
1851
1852                /*
1853                 * If this is not the last buffer of the received packet, update
1854                 * the pointer to the first mbuf at the NEXTP entry in the
1855                 * sw_sc_ring and continue to parse the RX ring.
1856                 */
1857                if (!eop && next_rxe) {
1858                        rxm->next = next_rxe->mbuf;
1859                        next_sc_entry->fbuf = first_seg;
1860                        goto next_desc;
1861                }
1862
1863                /* Initialize the first mbuf of the returned packet */
1864                txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1865
1866                /*
1867                 * Deal with the case, when HW CRC srip is disabled.
1868                 * That can't happen when LRO is enabled, but still could
1869                 * happen for scattered RX mode.
1870                 */
1871                first_seg->pkt_len -= rxq->crc_len;
1872                if (unlikely(rxm->data_len <= rxq->crc_len)) {
1873                        struct rte_mbuf *lp;
1874
1875                        for (lp = first_seg; lp->next != rxm; lp = lp->next)
1876                                ;
1877
1878                        first_seg->nb_segs--;
1879                        lp->data_len -= rxq->crc_len - rxm->data_len;
1880                        lp->next = NULL;
1881                        rte_pktmbuf_free_seg(rxm);
1882                } else {
1883                        rxm->data_len -= rxq->crc_len;
1884                }
1885
1886                /* Prefetch data of first segment, if configured to do so. */
1887                rte_packet_prefetch((char *)first_seg->buf_addr +
1888                        first_seg->data_off);
1889
1890                /*
1891                 * Store the mbuf address into the next entry of the array
1892                 * of returned packets.
1893                 */
1894                rx_pkts[nb_rx++] = first_seg;
1895        }
1896
1897        /*
1898         * Record index of the next RX descriptor to probe.
1899         */
1900        rxq->rx_tail = rx_id;
1901
1902        /*
1903         * If the number of free RX descriptors is greater than the RX free
1904         * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1905         * register.
1906         * Update the RDT with the value of the last processed RX descriptor
1907         * minus 1, to guarantee that the RDT register is never equal to the
1908         * RDH register, which creates a "full" ring situation from the
1909         * hardware point of view...
1910         */
1911        if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1912                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1913                           "nb_hold=%u nb_rx=%u",
1914                           rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1915
1916                rte_wmb();
1917                txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1918                nb_hold = 0;
1919        }
1920
1921        rxq->nb_rx_hold = nb_hold;
1922        return nb_rx;
1923}
1924
1925uint16_t
1926txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1927                                 uint16_t nb_pkts)
1928{
1929        return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1930}
1931
1932uint16_t
1933txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1934                               uint16_t nb_pkts)
1935{
1936        return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1937}
1938
1939uint64_t
1940txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
1941{
1942        return DEV_RX_OFFLOAD_VLAN_STRIP;
1943}
1944
1945uint64_t
1946txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
1947{
1948        uint64_t offloads;
1949        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1950        struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
1951
1952        offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
1953                   DEV_RX_OFFLOAD_UDP_CKSUM   |
1954                   DEV_RX_OFFLOAD_TCP_CKSUM   |
1955                   DEV_RX_OFFLOAD_KEEP_CRC    |
1956                   DEV_RX_OFFLOAD_JUMBO_FRAME |
1957                   DEV_RX_OFFLOAD_VLAN_FILTER |
1958                   DEV_RX_OFFLOAD_RSS_HASH |
1959                   DEV_RX_OFFLOAD_SCATTER;
1960
1961        if (!txgbe_is_vf(dev))
1962                offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
1963                             DEV_RX_OFFLOAD_QINQ_STRIP |
1964                             DEV_RX_OFFLOAD_VLAN_EXTEND);
1965
1966        /*
1967         * RSC is only supported by PF devices in a non-SR-IOV
1968         * mode.
1969         */
1970        if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
1971                offloads |= DEV_RX_OFFLOAD_TCP_LRO;
1972
1973        if (hw->mac.type == txgbe_mac_raptor)
1974                offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
1975
1976        offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1977
1978#ifdef RTE_LIB_SECURITY
1979        if (dev->security_ctx)
1980                offloads |= DEV_RX_OFFLOAD_SECURITY;
1981#endif
1982
1983        return offloads;
1984}
1985
1986static void __rte_cold
1987txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
1988{
1989        unsigned int i;
1990
1991        if (txq->sw_ring != NULL) {
1992                for (i = 0; i < txq->nb_tx_desc; i++) {
1993                        if (txq->sw_ring[i].mbuf != NULL) {
1994                                rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1995                                txq->sw_ring[i].mbuf = NULL;
1996                        }
1997                }
1998        }
1999}
2000
2001static int
2002txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
2003{
2004        struct txgbe_tx_entry *swr_ring = txq->sw_ring;
2005        uint16_t i, tx_last, tx_id;
2006        uint16_t nb_tx_free_last;
2007        uint16_t nb_tx_to_clean;
2008        uint32_t pkt_cnt;
2009
2010        /* Start free mbuf from the next of tx_tail */
2011        tx_last = txq->tx_tail;
2012        tx_id  = swr_ring[tx_last].next_id;
2013
2014        if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
2015                return 0;
2016
2017        nb_tx_to_clean = txq->nb_tx_free;
2018        nb_tx_free_last = txq->nb_tx_free;
2019        if (!free_cnt)
2020                free_cnt = txq->nb_tx_desc;
2021
2022        /* Loop through swr_ring to count the amount of
2023         * freeable mubfs and packets.
2024         */
2025        for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2026                for (i = 0; i < nb_tx_to_clean &&
2027                        pkt_cnt < free_cnt &&
2028                        tx_id != tx_last; i++) {
2029                        if (swr_ring[tx_id].mbuf != NULL) {
2030                                rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2031                                swr_ring[tx_id].mbuf = NULL;
2032
2033                                /*
2034                                 * last segment in the packet,
2035                                 * increment packet count
2036                                 */
2037                                pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2038                        }
2039
2040                        tx_id = swr_ring[tx_id].next_id;
2041                }
2042
2043                if (pkt_cnt < free_cnt) {
2044                        if (txgbe_xmit_cleanup(txq))
2045                                break;
2046
2047                        nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2048                        nb_tx_free_last = txq->nb_tx_free;
2049                }
2050        }
2051
2052        return (int)pkt_cnt;
2053}
2054
2055static int
2056txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
2057                        uint32_t free_cnt)
2058{
2059        int i, n, cnt;
2060
2061        if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2062                free_cnt = txq->nb_tx_desc;
2063
2064        cnt = free_cnt - free_cnt % txq->tx_free_thresh;
2065
2066        for (i = 0; i < cnt; i += n) {
2067                if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
2068                        break;
2069
2070                n = txgbe_tx_free_bufs(txq);
2071
2072                if (n == 0)
2073                        break;
2074        }
2075
2076        return i;
2077}
2078
2079int
2080txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
2081{
2082        struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
2083        if (txq->offloads == 0 &&
2084#ifdef RTE_LIB_SECURITY
2085                !(txq->using_ipsec) &&
2086#endif
2087                txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
2088                return txgbe_tx_done_cleanup_simple(txq, free_cnt);
2089
2090        return txgbe_tx_done_cleanup_full(txq, free_cnt);
2091}
2092
2093static void __rte_cold
2094txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
2095{
2096        if (txq != NULL &&
2097            txq->sw_ring != NULL)
2098                rte_free(txq->sw_ring);
2099}
2100
2101static void __rte_cold
2102txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
2103{
2104        if (txq != NULL && txq->ops != NULL) {
2105                txq->ops->release_mbufs(txq);
2106                txq->ops->free_swring(txq);
2107                rte_free(txq);
2108        }
2109}
2110
2111void __rte_cold
2112txgbe_dev_tx_queue_release(void *txq)
2113{
2114        txgbe_tx_queue_release(txq);
2115}
2116
2117/* (Re)set dynamic txgbe_tx_queue fields to defaults */
2118static void __rte_cold
2119txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
2120{
2121        static const struct txgbe_tx_desc zeroed_desc = {0};
2122        struct txgbe_tx_entry *txe = txq->sw_ring;
2123        uint16_t prev, i;
2124
2125        /* Zero out HW ring memory */
2126        for (i = 0; i < txq->nb_tx_desc; i++)
2127                txq->tx_ring[i] = zeroed_desc;
2128
2129        /* Initialize SW ring entries */
2130        prev = (uint16_t)(txq->nb_tx_desc - 1);
2131        for (i = 0; i < txq->nb_tx_desc; i++) {
2132                volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
2133
2134                txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
2135                txe[i].mbuf = NULL;
2136                txe[i].last_id = i;
2137                txe[prev].next_id = i;
2138                prev = i;
2139        }
2140
2141        txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
2142        txq->tx_tail = 0;
2143
2144        /*
2145         * Always allow 1 descriptor to be un-allocated to avoid
2146         * a H/W race condition
2147         */
2148        txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2149        txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2150        txq->ctx_curr = 0;
2151        memset((void *)&txq->ctx_cache, 0,
2152                TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
2153}
2154
2155static const struct txgbe_txq_ops def_txq_ops = {
2156        .release_mbufs = txgbe_tx_queue_release_mbufs,
2157        .free_swring = txgbe_tx_free_swring,
2158        .reset = txgbe_reset_tx_queue,
2159};
2160
2161/* Takes an ethdev and a queue and sets up the tx function to be used based on
2162 * the queue parameters. Used in tx_queue_setup by primary process and then
2163 * in dev_init by secondary process when attaching to an existing ethdev.
2164 */
2165void __rte_cold
2166txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
2167{
2168        /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2169        if (txq->offloads == 0 &&
2170#ifdef RTE_LIB_SECURITY
2171                        !(txq->using_ipsec) &&
2172#endif
2173                        txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
2174                PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2175                dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
2176                dev->tx_pkt_prepare = NULL;
2177        } else {
2178                PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2179                PMD_INIT_LOG(DEBUG,
2180                                " - offloads = 0x%" PRIx64,
2181                                txq->offloads);
2182                PMD_INIT_LOG(DEBUG,
2183                                " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
2184                                (unsigned long)txq->tx_free_thresh,
2185                                (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
2186                dev->tx_pkt_burst = txgbe_xmit_pkts;
2187                dev->tx_pkt_prepare = txgbe_prep_pkts;
2188        }
2189}
2190
2191uint64_t
2192txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2193{
2194        RTE_SET_USED(dev);
2195
2196        return 0;
2197}
2198
2199uint64_t
2200txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2201{
2202        uint64_t tx_offload_capa;
2203
2204        tx_offload_capa =
2205                DEV_TX_OFFLOAD_VLAN_INSERT |
2206                DEV_TX_OFFLOAD_IPV4_CKSUM  |
2207                DEV_TX_OFFLOAD_UDP_CKSUM   |
2208                DEV_TX_OFFLOAD_TCP_CKSUM   |
2209                DEV_TX_OFFLOAD_SCTP_CKSUM  |
2210                DEV_TX_OFFLOAD_TCP_TSO     |
2211                DEV_TX_OFFLOAD_UDP_TSO     |
2212                DEV_TX_OFFLOAD_UDP_TNL_TSO      |
2213                DEV_TX_OFFLOAD_IP_TNL_TSO       |
2214                DEV_TX_OFFLOAD_VXLAN_TNL_TSO    |
2215                DEV_TX_OFFLOAD_GRE_TNL_TSO      |
2216                DEV_TX_OFFLOAD_IPIP_TNL_TSO     |
2217                DEV_TX_OFFLOAD_GENEVE_TNL_TSO   |
2218                DEV_TX_OFFLOAD_MULTI_SEGS;
2219
2220        if (!txgbe_is_vf(dev))
2221                tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
2222
2223        tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2224
2225        tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2226                           DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
2227
2228#ifdef RTE_LIB_SECURITY
2229        if (dev->security_ctx)
2230                tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2231#endif
2232        return tx_offload_capa;
2233}
2234
2235int __rte_cold
2236txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2237                         uint16_t queue_idx,
2238                         uint16_t nb_desc,
2239                         unsigned int socket_id,
2240                         const struct rte_eth_txconf *tx_conf)
2241{
2242        const struct rte_memzone *tz;
2243        struct txgbe_tx_queue *txq;
2244        struct txgbe_hw     *hw;
2245        uint16_t tx_free_thresh;
2246        uint64_t offloads;
2247
2248        PMD_INIT_FUNC_TRACE();
2249        hw = TXGBE_DEV_HW(dev);
2250
2251        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2252
2253        /*
2254         * Validate number of transmit descriptors.
2255         * It must not exceed hardware maximum, and must be multiple
2256         * of TXGBE_ALIGN.
2257         */
2258        if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
2259            nb_desc > TXGBE_RING_DESC_MAX ||
2260            nb_desc < TXGBE_RING_DESC_MIN) {
2261                return -EINVAL;
2262        }
2263
2264        /*
2265         * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2266         * descriptors are used or if the number of descriptors required
2267         * to transmit a packet is greater than the number of free TX
2268         * descriptors.
2269         * One descriptor in the TX ring is used as a sentinel to avoid a
2270         * H/W race condition, hence the maximum threshold constraints.
2271         * When set to zero use default values.
2272         */
2273        tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2274                        tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2275        if (tx_free_thresh >= (nb_desc - 3)) {
2276                PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
2277                             "TX descriptors minus 3. (tx_free_thresh=%u "
2278                             "port=%d queue=%d)",
2279                             (unsigned int)tx_free_thresh,
2280                             (int)dev->data->port_id, (int)queue_idx);
2281                return -(EINVAL);
2282        }
2283
2284        if ((nb_desc % tx_free_thresh) != 0) {
2285                PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
2286                             "number of TX descriptors. (tx_free_thresh=%u "
2287                             "port=%d queue=%d)", (unsigned int)tx_free_thresh,
2288                             (int)dev->data->port_id, (int)queue_idx);
2289                return -(EINVAL);
2290        }
2291
2292        /* Free memory prior to re-allocation if needed... */
2293        if (dev->data->tx_queues[queue_idx] != NULL) {
2294                txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2295                dev->data->tx_queues[queue_idx] = NULL;
2296        }
2297
2298        /* First allocate the tx queue data structure */
2299        txq = rte_zmalloc_socket("ethdev TX queue",
2300                                 sizeof(struct txgbe_tx_queue),
2301                                 RTE_CACHE_LINE_SIZE, socket_id);
2302        if (txq == NULL)
2303                return -ENOMEM;
2304
2305        /*
2306         * Allocate TX ring hardware descriptors. A memzone large enough to
2307         * handle the maximum ring size is allocated in order to allow for
2308         * resizing in later calls to the queue setup function.
2309         */
2310        tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2311                        sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
2312                        TXGBE_ALIGN, socket_id);
2313        if (tz == NULL) {
2314                txgbe_tx_queue_release(txq);
2315                return -ENOMEM;
2316        }
2317
2318        txq->nb_tx_desc = nb_desc;
2319        txq->tx_free_thresh = tx_free_thresh;
2320        txq->pthresh = tx_conf->tx_thresh.pthresh;
2321        txq->hthresh = tx_conf->tx_thresh.hthresh;
2322        txq->wthresh = tx_conf->tx_thresh.wthresh;
2323        txq->queue_id = queue_idx;
2324        txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2325                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2326        txq->port_id = dev->data->port_id;
2327        txq->offloads = offloads;
2328        txq->ops = &def_txq_ops;
2329        txq->tx_deferred_start = tx_conf->tx_deferred_start;
2330#ifdef RTE_LIB_SECURITY
2331        txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2332                        DEV_TX_OFFLOAD_SECURITY);
2333#endif
2334
2335        /* Modification to set tail pointer for virtual function
2336         * if vf is detected.
2337         */
2338        if (hw->mac.type == txgbe_mac_raptor_vf) {
2339                txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
2340                txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
2341        } else {
2342                txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
2343                                                TXGBE_TXWP(txq->reg_idx));
2344                txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
2345                                                TXGBE_TXCFG(txq->reg_idx));
2346        }
2347
2348        txq->tx_ring_phys_addr = TMZ_PADDR(tz);
2349        txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
2350
2351        /* Allocate software ring */
2352        txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2353                                sizeof(struct txgbe_tx_entry) * nb_desc,
2354                                RTE_CACHE_LINE_SIZE, socket_id);
2355        if (txq->sw_ring == NULL) {
2356                txgbe_tx_queue_release(txq);
2357                return -ENOMEM;
2358        }
2359        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2360                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2361
2362        /* set up scalar TX function as appropriate */
2363        txgbe_set_tx_function(dev, txq);
2364
2365        txq->ops->reset(txq);
2366
2367        dev->data->tx_queues[queue_idx] = txq;
2368
2369        return 0;
2370}
2371
2372/**
2373 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2374 *
2375 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2376 * in the sw_rsc_ring is not set to NULL but rather points to the next
2377 * mbuf of this RSC aggregation (that has not been completed yet and still
2378 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2379 * will just free first "nb_segs" segments of the cluster explicitly by calling
2380 * an rte_pktmbuf_free_seg().
2381 *
2382 * @m scattered cluster head
2383 */
2384static void __rte_cold
2385txgbe_free_sc_cluster(struct rte_mbuf *m)
2386{
2387        uint16_t i, nb_segs = m->nb_segs;
2388        struct rte_mbuf *next_seg;
2389
2390        for (i = 0; i < nb_segs; i++) {
2391                next_seg = m->next;
2392                rte_pktmbuf_free_seg(m);
2393                m = next_seg;
2394        }
2395}
2396
2397static void __rte_cold
2398txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
2399{
2400        unsigned int i;
2401
2402        if (rxq->sw_ring != NULL) {
2403                for (i = 0; i < rxq->nb_rx_desc; i++) {
2404                        if (rxq->sw_ring[i].mbuf != NULL) {
2405                                rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2406                                rxq->sw_ring[i].mbuf = NULL;
2407                        }
2408                }
2409                if (rxq->rx_nb_avail) {
2410                        for (i = 0; i < rxq->rx_nb_avail; ++i) {
2411                                struct rte_mbuf *mb;
2412
2413                                mb = rxq->rx_stage[rxq->rx_next_avail + i];
2414                                rte_pktmbuf_free_seg(mb);
2415                        }
2416                        rxq->rx_nb_avail = 0;
2417                }
2418        }
2419
2420        if (rxq->sw_sc_ring)
2421                for (i = 0; i < rxq->nb_rx_desc; i++)
2422                        if (rxq->sw_sc_ring[i].fbuf) {
2423                                txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2424                                rxq->sw_sc_ring[i].fbuf = NULL;
2425                        }
2426}
2427
2428static void __rte_cold
2429txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
2430{
2431        if (rxq != NULL) {
2432                txgbe_rx_queue_release_mbufs(rxq);
2433                rte_free(rxq->sw_ring);
2434                rte_free(rxq->sw_sc_ring);
2435                rte_free(rxq);
2436        }
2437}
2438
2439void __rte_cold
2440txgbe_dev_rx_queue_release(void *rxq)
2441{
2442        txgbe_rx_queue_release(rxq);
2443}
2444
2445/*
2446 * Check if Rx Burst Bulk Alloc function can be used.
2447 * Return
2448 *        0: the preconditions are satisfied and the bulk allocation function
2449 *           can be used.
2450 *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2451 *           function must be used.
2452 */
2453static inline int __rte_cold
2454check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
2455{
2456        int ret = 0;
2457
2458        /*
2459         * Make sure the following pre-conditions are satisfied:
2460         *   rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
2461         *   rxq->rx_free_thresh < rxq->nb_rx_desc
2462         *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2463         * Scattered packets are not supported.  This should be checked
2464         * outside of this function.
2465         */
2466        if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
2467                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2468                             "rxq->rx_free_thresh=%d, "
2469                             "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
2470                             rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
2471                ret = -EINVAL;
2472        } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2473                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2474                             "rxq->rx_free_thresh=%d, "
2475                             "rxq->nb_rx_desc=%d",
2476                             rxq->rx_free_thresh, rxq->nb_rx_desc);
2477                ret = -EINVAL;
2478        } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2479                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2480                             "rxq->nb_rx_desc=%d, "
2481                             "rxq->rx_free_thresh=%d",
2482                             rxq->nb_rx_desc, rxq->rx_free_thresh);
2483                ret = -EINVAL;
2484        }
2485
2486        return ret;
2487}
2488
2489/* Reset dynamic txgbe_rx_queue fields back to defaults */
2490static void __rte_cold
2491txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
2492{
2493        static const struct txgbe_rx_desc zeroed_desc = {
2494                                                {{0}, {0} }, {{0}, {0} } };
2495        unsigned int i;
2496        uint16_t len = rxq->nb_rx_desc;
2497
2498        /*
2499         * By default, the Rx queue setup function allocates enough memory for
2500         * TXGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
2501         * extra memory at the end of the descriptor ring to be zero'd out.
2502         */
2503        if (adapter->rx_bulk_alloc_allowed)
2504                /* zero out extra memory */
2505                len += RTE_PMD_TXGBE_RX_MAX_BURST;
2506
2507        /*
2508         * Zero out HW ring memory. Zero out extra memory at the end of
2509         * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2510         * reads extra memory as zeros.
2511         */
2512        for (i = 0; i < len; i++)
2513                rxq->rx_ring[i] = zeroed_desc;
2514
2515        /*
2516         * initialize extra software ring entries. Space for these extra
2517         * entries is always allocated
2518         */
2519        memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2520        for (i = rxq->nb_rx_desc; i < len; ++i)
2521                rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2522
2523        rxq->rx_nb_avail = 0;
2524        rxq->rx_next_avail = 0;
2525        rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2526        rxq->rx_tail = 0;
2527        rxq->nb_rx_hold = 0;
2528        rxq->pkt_first_seg = NULL;
2529        rxq->pkt_last_seg = NULL;
2530}
2531
2532int __rte_cold
2533txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2534                         uint16_t queue_idx,
2535                         uint16_t nb_desc,
2536                         unsigned int socket_id,
2537                         const struct rte_eth_rxconf *rx_conf,
2538                         struct rte_mempool *mp)
2539{
2540        const struct rte_memzone *rz;
2541        struct txgbe_rx_queue *rxq;
2542        struct txgbe_hw     *hw;
2543        uint16_t len;
2544        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2545        uint64_t offloads;
2546
2547        PMD_INIT_FUNC_TRACE();
2548        hw = TXGBE_DEV_HW(dev);
2549
2550        offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2551
2552        /*
2553         * Validate number of receive descriptors.
2554         * It must not exceed hardware maximum, and must be multiple
2555         * of TXGBE_ALIGN.
2556         */
2557        if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
2558                        nb_desc > TXGBE_RING_DESC_MAX ||
2559                        nb_desc < TXGBE_RING_DESC_MIN) {
2560                return -EINVAL;
2561        }
2562
2563        /* Free memory prior to re-allocation if needed... */
2564        if (dev->data->rx_queues[queue_idx] != NULL) {
2565                txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2566                dev->data->rx_queues[queue_idx] = NULL;
2567        }
2568
2569        /* First allocate the rx queue data structure */
2570        rxq = rte_zmalloc_socket("ethdev RX queue",
2571                                 sizeof(struct txgbe_rx_queue),
2572                                 RTE_CACHE_LINE_SIZE, socket_id);
2573        if (rxq == NULL)
2574                return -ENOMEM;
2575        rxq->mb_pool = mp;
2576        rxq->nb_rx_desc = nb_desc;
2577        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2578        rxq->queue_id = queue_idx;
2579        rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2580                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2581        rxq->port_id = dev->data->port_id;
2582        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2583                rxq->crc_len = RTE_ETHER_CRC_LEN;
2584        else
2585                rxq->crc_len = 0;
2586        rxq->drop_en = rx_conf->rx_drop_en;
2587        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2588        rxq->offloads = offloads;
2589
2590        /*
2591         * The packet type in RX descriptor is different for different NICs.
2592         * So set different masks for different NICs.
2593         */
2594        rxq->pkt_type_mask = TXGBE_PTID_MASK;
2595
2596        /*
2597         * Allocate RX ring hardware descriptors. A memzone large enough to
2598         * handle the maximum ring size is allocated in order to allow for
2599         * resizing in later calls to the queue setup function.
2600         */
2601        rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2602                                      RX_RING_SZ, TXGBE_ALIGN, socket_id);
2603        if (rz == NULL) {
2604                txgbe_rx_queue_release(rxq);
2605                return -ENOMEM;
2606        }
2607
2608        /*
2609         * Zero init all the descriptors in the ring.
2610         */
2611        memset(rz->addr, 0, RX_RING_SZ);
2612
2613        /*
2614         * Modified to setup VFRDT for Virtual Function
2615         */
2616        if (hw->mac.type == txgbe_mac_raptor_vf) {
2617                rxq->rdt_reg_addr =
2618                        TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
2619                rxq->rdh_reg_addr =
2620                        TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
2621        } else {
2622                rxq->rdt_reg_addr =
2623                        TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
2624                rxq->rdh_reg_addr =
2625                        TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
2626        }
2627
2628        rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2629        rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
2630
2631        /*
2632         * Certain constraints must be met in order to use the bulk buffer
2633         * allocation Rx burst function. If any of Rx queues doesn't meet them
2634         * the feature should be disabled for the whole port.
2635         */
2636        if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2637                PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2638                                    "preconditions - canceling the feature for "
2639                                    "the whole port[%d]",
2640                             rxq->queue_id, rxq->port_id);
2641                adapter->rx_bulk_alloc_allowed = false;
2642        }
2643
2644        /*
2645         * Allocate software ring. Allow for space at the end of the
2646         * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2647         * function does not access an invalid memory region.
2648         */
2649        len = nb_desc;
2650        if (adapter->rx_bulk_alloc_allowed)
2651                len += RTE_PMD_TXGBE_RX_MAX_BURST;
2652
2653        rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2654                                          sizeof(struct txgbe_rx_entry) * len,
2655                                          RTE_CACHE_LINE_SIZE, socket_id);
2656        if (!rxq->sw_ring) {
2657                txgbe_rx_queue_release(rxq);
2658                return -ENOMEM;
2659        }
2660
2661        /*
2662         * Always allocate even if it's not going to be needed in order to
2663         * simplify the code.
2664         *
2665         * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2666         * be requested in txgbe_dev_rx_init(), which is called later from
2667         * dev_start() flow.
2668         */
2669        rxq->sw_sc_ring =
2670                rte_zmalloc_socket("rxq->sw_sc_ring",
2671                                  sizeof(struct txgbe_scattered_rx_entry) * len,
2672                                  RTE_CACHE_LINE_SIZE, socket_id);
2673        if (!rxq->sw_sc_ring) {
2674                txgbe_rx_queue_release(rxq);
2675                return -ENOMEM;
2676        }
2677
2678        PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2679                            "dma_addr=0x%" PRIx64,
2680                     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2681                     rxq->rx_ring_phys_addr);
2682
2683        dev->data->rx_queues[queue_idx] = rxq;
2684
2685        txgbe_reset_rx_queue(adapter, rxq);
2686
2687        return 0;
2688}
2689
2690uint32_t
2691txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2692{
2693#define TXGBE_RXQ_SCAN_INTERVAL 4
2694        volatile struct txgbe_rx_desc *rxdp;
2695        struct txgbe_rx_queue *rxq;
2696        uint32_t desc = 0;
2697
2698        rxq = dev->data->rx_queues[rx_queue_id];
2699        rxdp = &rxq->rx_ring[rxq->rx_tail];
2700
2701        while ((desc < rxq->nb_rx_desc) &&
2702                (rxdp->qw1.lo.status &
2703                        rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
2704                desc += TXGBE_RXQ_SCAN_INTERVAL;
2705                rxdp += TXGBE_RXQ_SCAN_INTERVAL;
2706                if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2707                        rxdp = &(rxq->rx_ring[rxq->rx_tail +
2708                                desc - rxq->nb_rx_desc]);
2709        }
2710
2711        return desc;
2712}
2713
2714int
2715txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
2716{
2717        struct txgbe_rx_queue *rxq = rx_queue;
2718        volatile uint32_t *status;
2719        uint32_t nb_hold, desc;
2720
2721        if (unlikely(offset >= rxq->nb_rx_desc))
2722                return -EINVAL;
2723
2724        nb_hold = rxq->nb_rx_hold;
2725        if (offset >= rxq->nb_rx_desc - nb_hold)
2726                return RTE_ETH_RX_DESC_UNAVAIL;
2727
2728        desc = rxq->rx_tail + offset;
2729        if (desc >= rxq->nb_rx_desc)
2730                desc -= rxq->nb_rx_desc;
2731
2732        status = &rxq->rx_ring[desc].qw1.lo.status;
2733        if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
2734                return RTE_ETH_RX_DESC_DONE;
2735
2736        return RTE_ETH_RX_DESC_AVAIL;
2737}
2738
2739int
2740txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
2741{
2742        struct txgbe_tx_queue *txq = tx_queue;
2743        volatile uint32_t *status;
2744        uint32_t desc;
2745
2746        if (unlikely(offset >= txq->nb_tx_desc))
2747                return -EINVAL;
2748
2749        desc = txq->tx_tail + offset;
2750        if (desc >= txq->nb_tx_desc) {
2751                desc -= txq->nb_tx_desc;
2752                if (desc >= txq->nb_tx_desc)
2753                        desc -= txq->nb_tx_desc;
2754        }
2755
2756        status = &txq->tx_ring[desc].dw3;
2757        if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
2758                return RTE_ETH_TX_DESC_DONE;
2759
2760        return RTE_ETH_TX_DESC_FULL;
2761}
2762
2763void __rte_cold
2764txgbe_dev_clear_queues(struct rte_eth_dev *dev)
2765{
2766        unsigned int i;
2767        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2768
2769        PMD_INIT_FUNC_TRACE();
2770
2771        for (i = 0; i < dev->data->nb_tx_queues; i++) {
2772                struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
2773
2774                if (txq != NULL) {
2775                        txq->ops->release_mbufs(txq);
2776                        txq->ops->reset(txq);
2777                }
2778        }
2779
2780        for (i = 0; i < dev->data->nb_rx_queues; i++) {
2781                struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
2782
2783                if (rxq != NULL) {
2784                        txgbe_rx_queue_release_mbufs(rxq);
2785                        txgbe_reset_rx_queue(adapter, rxq);
2786                }
2787        }
2788}
2789
2790void
2791txgbe_dev_free_queues(struct rte_eth_dev *dev)
2792{
2793        unsigned int i;
2794
2795        PMD_INIT_FUNC_TRACE();
2796
2797        for (i = 0; i < dev->data->nb_rx_queues; i++) {
2798                txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2799                dev->data->rx_queues[i] = NULL;
2800        }
2801        dev->data->nb_rx_queues = 0;
2802
2803        for (i = 0; i < dev->data->nb_tx_queues; i++) {
2804                txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2805                dev->data->tx_queues[i] = NULL;
2806        }
2807        dev->data->nb_tx_queues = 0;
2808}
2809
2810/**
2811 * Receive Side Scaling (RSS)
2812 *
2813 * Principles:
2814 * The source and destination IP addresses of the IP header and the source
2815 * and destination ports of TCP/UDP headers, if any, of received packets are
2816 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2817 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2818 * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
2819 * RSS output index which is used as the RX queue index where to store the
2820 * received packets.
2821 * The following output is supplied in the RX write-back descriptor:
2822 *     - 32-bit result of the Microsoft RSS hash function,
2823 *     - 4-bit RSS type field.
2824 */
2825
2826/*
2827 * Used as the default key.
2828 */
2829static uint8_t rss_intel_key[40] = {
2830        0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2831        0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2832        0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2833        0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2834        0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2835};
2836
2837static void
2838txgbe_rss_disable(struct rte_eth_dev *dev)
2839{
2840        struct txgbe_hw *hw;
2841
2842        hw = TXGBE_DEV_HW(dev);
2843        if (hw->mac.type == txgbe_mac_raptor_vf)
2844                wr32m(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_RSSENA, 0);
2845        else
2846                wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
2847}
2848
2849int
2850txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2851                          struct rte_eth_rss_conf *rss_conf)
2852{
2853        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2854        uint8_t  *hash_key;
2855        uint32_t mrqc;
2856        uint32_t rss_key;
2857        uint64_t rss_hf;
2858        uint16_t i;
2859
2860        if (!txgbe_rss_update_sp(hw->mac.type)) {
2861                PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2862                        "NIC.");
2863                return -ENOTSUP;
2864        }
2865
2866        hash_key = rss_conf->rss_key;
2867        if (hash_key) {
2868                /* Fill in RSS hash key */
2869                for (i = 0; i < 10; i++) {
2870                        rss_key  = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
2871                        rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
2872                        rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
2873                        rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
2874                        wr32at(hw, TXGBE_REG_RSSKEY, i, rss_key);
2875                }
2876        }
2877
2878        /* Set configured hashing protocols */
2879        rss_hf = rss_conf->rss_hf & TXGBE_RSS_OFFLOAD_ALL;
2880        if (hw->mac.type == txgbe_mac_raptor_vf) {
2881                mrqc = rd32(hw, TXGBE_VFPLCFG);
2882                mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
2883                if (rss_hf & ETH_RSS_IPV4)
2884                        mrqc |= TXGBE_VFPLCFG_RSSIPV4;
2885                if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2886                        mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
2887                if (rss_hf & ETH_RSS_IPV6 ||
2888                    rss_hf & ETH_RSS_IPV6_EX)
2889                        mrqc |= TXGBE_VFPLCFG_RSSIPV6;
2890                if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
2891                    rss_hf & ETH_RSS_IPV6_TCP_EX)
2892                        mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
2893                if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2894                        mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
2895                if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
2896                    rss_hf & ETH_RSS_IPV6_UDP_EX)
2897                        mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
2898
2899                if (rss_hf)
2900                        mrqc |= TXGBE_VFPLCFG_RSSENA;
2901                else
2902                        mrqc &= ~TXGBE_VFPLCFG_RSSENA;
2903
2904                if (dev->data->nb_rx_queues > 3)
2905                        mrqc |= TXGBE_VFPLCFG_RSSHASH(2);
2906                else if (dev->data->nb_rx_queues > 1)
2907                        mrqc |= TXGBE_VFPLCFG_RSSHASH(1);
2908
2909                wr32(hw, TXGBE_VFPLCFG, mrqc);
2910        } else {
2911                mrqc = rd32(hw, TXGBE_RACTL);
2912                mrqc &= ~TXGBE_RACTL_RSSMASK;
2913                if (rss_hf & ETH_RSS_IPV4)
2914                        mrqc |= TXGBE_RACTL_RSSIPV4;
2915                if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2916                        mrqc |= TXGBE_RACTL_RSSIPV4TCP;
2917                if (rss_hf & ETH_RSS_IPV6 ||
2918                    rss_hf & ETH_RSS_IPV6_EX)
2919                        mrqc |= TXGBE_RACTL_RSSIPV6;
2920                if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
2921                    rss_hf & ETH_RSS_IPV6_TCP_EX)
2922                        mrqc |= TXGBE_RACTL_RSSIPV6TCP;
2923                if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2924                        mrqc |= TXGBE_RACTL_RSSIPV4UDP;
2925                if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
2926                    rss_hf & ETH_RSS_IPV6_UDP_EX)
2927                        mrqc |= TXGBE_RACTL_RSSIPV6UDP;
2928
2929                if (rss_hf)
2930                        mrqc |= TXGBE_RACTL_RSSENA;
2931                else
2932                        mrqc &= ~TXGBE_RACTL_RSSENA;
2933
2934                wr32(hw, TXGBE_RACTL, mrqc);
2935        }
2936
2937        return 0;
2938}
2939
2940int
2941txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2942                            struct rte_eth_rss_conf *rss_conf)
2943{
2944        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2945        uint8_t *hash_key;
2946        uint32_t mrqc;
2947        uint32_t rss_key;
2948        uint64_t rss_hf;
2949        uint16_t i;
2950
2951        hash_key = rss_conf->rss_key;
2952        if (hash_key) {
2953                /* Return RSS hash key */
2954                for (i = 0; i < 10; i++) {
2955                        rss_key = rd32at(hw, TXGBE_REG_RSSKEY, i);
2956                        hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
2957                        hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
2958                        hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
2959                        hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
2960                }
2961        }
2962
2963        rss_hf = 0;
2964        if (hw->mac.type == txgbe_mac_raptor_vf) {
2965                mrqc = rd32(hw, TXGBE_VFPLCFG);
2966                if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
2967                        rss_hf |= ETH_RSS_IPV4;
2968                if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
2969                        rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2970                if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
2971                        rss_hf |= ETH_RSS_IPV6 |
2972                                  ETH_RSS_IPV6_EX;
2973                if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
2974                        rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
2975                                  ETH_RSS_IPV6_TCP_EX;
2976                if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
2977                        rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2978                if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
2979                        rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
2980                                  ETH_RSS_IPV6_UDP_EX;
2981                if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
2982                        rss_hf = 0;
2983        } else {
2984                mrqc = rd32(hw, TXGBE_RACTL);
2985                if (mrqc & TXGBE_RACTL_RSSIPV4)
2986                        rss_hf |= ETH_RSS_IPV4;
2987                if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
2988                        rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2989                if (mrqc & TXGBE_RACTL_RSSIPV6)
2990                        rss_hf |= ETH_RSS_IPV6 |
2991                                  ETH_RSS_IPV6_EX;
2992                if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
2993                        rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
2994                                  ETH_RSS_IPV6_TCP_EX;
2995                if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
2996                        rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2997                if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
2998                        rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
2999                                  ETH_RSS_IPV6_UDP_EX;
3000                if (!(mrqc & TXGBE_RACTL_RSSENA))
3001                        rss_hf = 0;
3002        }
3003
3004        rss_hf &= TXGBE_RSS_OFFLOAD_ALL;
3005
3006        rss_conf->rss_hf = rss_hf;
3007        return 0;
3008}
3009
3010static void
3011txgbe_rss_configure(struct rte_eth_dev *dev)
3012{
3013        struct rte_eth_rss_conf rss_conf;
3014        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3015        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3016        uint32_t reta;
3017        uint16_t i;
3018        uint16_t j;
3019
3020        PMD_INIT_FUNC_TRACE();
3021
3022        /*
3023         * Fill in redirection table
3024         * The byte-swap is needed because NIC registers are in
3025         * little-endian order.
3026         */
3027        if (adapter->rss_reta_updated == 0) {
3028                reta = 0;
3029                for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
3030                        if (j == dev->data->nb_rx_queues)
3031                                j = 0;
3032                        reta = (reta >> 8) | LS32(j, 24, 0xFF);
3033                        if ((i & 3) == 3)
3034                                wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3035                }
3036        }
3037        /*
3038         * Configure the RSS key and the RSS protocols used to compute
3039         * the RSS hash of input packets.
3040         */
3041        rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3042        if (rss_conf.rss_key == NULL)
3043                rss_conf.rss_key = rss_intel_key; /* Default hash key */
3044        txgbe_dev_rss_hash_update(dev, &rss_conf);
3045}
3046
3047#define NUM_VFTA_REGISTERS 128
3048#define NIC_RX_BUFFER_SIZE 0x200
3049
3050static void
3051txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3052{
3053        struct rte_eth_vmdq_dcb_conf *cfg;
3054        struct txgbe_hw *hw;
3055        enum rte_eth_nb_pools num_pools;
3056        uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3057        uint16_t pbsize;
3058        uint8_t nb_tcs; /* number of traffic classes */
3059        int i;
3060
3061        PMD_INIT_FUNC_TRACE();
3062        hw = TXGBE_DEV_HW(dev);
3063        cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3064        num_pools = cfg->nb_queue_pools;
3065        /* Check we have a valid number of pools */
3066        if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3067                txgbe_rss_disable(dev);
3068                return;
3069        }
3070        /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3071        nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3072
3073        /*
3074         * split rx buffer up into sections, each for 1 traffic class
3075         */
3076        pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3077        for (i = 0; i < nb_tcs; i++) {
3078                uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
3079
3080                rxpbsize &= (~(0x3FF << 10));
3081                /* clear 10 bits. */
3082                rxpbsize |= (pbsize << 10); /* set value */
3083                wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3084        }
3085        /* zero alloc all unused TCs */
3086        for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3087                uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
3088
3089                rxpbsize &= (~(0x3FF << 10));
3090                /* clear 10 bits. */
3091                wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3092        }
3093
3094        if (num_pools == ETH_16_POOLS) {
3095                mrqc = TXGBE_PORTCTL_NUMTC_8;
3096                mrqc |= TXGBE_PORTCTL_NUMVT_16;
3097        } else {
3098                mrqc = TXGBE_PORTCTL_NUMTC_4;
3099                mrqc |= TXGBE_PORTCTL_NUMVT_32;
3100        }
3101        wr32m(hw, TXGBE_PORTCTL,
3102              TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK, mrqc);
3103
3104        vt_ctl = TXGBE_POOLCTL_RPLEN;
3105        if (cfg->enable_default_pool)
3106                vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
3107        else
3108                vt_ctl |= TXGBE_POOLCTL_DEFDSA;
3109
3110        wr32(hw, TXGBE_POOLCTL, vt_ctl);
3111
3112        queue_mapping = 0;
3113        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3114                /*
3115                 * mapping is done with 3 bits per priority,
3116                 * so shift by i*3 each time
3117                 */
3118                queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3119
3120        wr32(hw, TXGBE_RPUP2TC, queue_mapping);
3121
3122        wr32(hw, TXGBE_ARBRXCTL, TXGBE_ARBRXCTL_RRM);
3123
3124        /* enable vlan filtering and allow all vlan tags through */
3125        vlanctrl = rd32(hw, TXGBE_VLANCTL);
3126        vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3127        wr32(hw, TXGBE_VLANCTL, vlanctrl);
3128
3129        /* enable all vlan filters */
3130        for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3131                wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
3132
3133        wr32(hw, TXGBE_POOLRXENA(0),
3134                        num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3135
3136        wr32(hw, TXGBE_ETHADDRIDX, 0);
3137        wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
3138        wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
3139
3140        /* set up filters for vlan tags as configured */
3141        for (i = 0; i < cfg->nb_pool_maps; i++) {
3142                /* set vlan id in VF register and set the valid bit */
3143                wr32(hw, TXGBE_PSRVLANIDX, i);
3144                wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
3145                                (cfg->pool_map[i].vlan_id & 0xFFF)));
3146
3147                wr32(hw, TXGBE_PSRVLANPLM(0), cfg->pool_map[i].pools);
3148        }
3149}
3150
3151/**
3152 * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3153 * @dev: pointer to eth_dev structure
3154 * @dcb_config: pointer to txgbe_dcb_config structure
3155 */
3156static void
3157txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3158                       struct txgbe_dcb_config *dcb_config)
3159{
3160        uint32_t reg;
3161        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3162
3163        PMD_INIT_FUNC_TRACE();
3164
3165        /* Disable the Tx desc arbiter */
3166        reg = rd32(hw, TXGBE_ARBTXCTL);
3167        reg |= TXGBE_ARBTXCTL_DIA;
3168        wr32(hw, TXGBE_ARBTXCTL, reg);
3169
3170        /* Enable DCB for Tx with 8 TCs */
3171        reg = rd32(hw, TXGBE_PORTCTL);
3172        reg &= TXGBE_PORTCTL_NUMTC_MASK;
3173        reg |= TXGBE_PORTCTL_DCB;
3174        if (dcb_config->num_tcs.pg_tcs == 8)
3175                reg |= TXGBE_PORTCTL_NUMTC_8;
3176        else
3177                reg |= TXGBE_PORTCTL_NUMTC_4;
3178
3179        wr32(hw, TXGBE_PORTCTL, reg);
3180
3181        /* Enable the Tx desc arbiter */
3182        reg = rd32(hw, TXGBE_ARBTXCTL);
3183        reg &= ~TXGBE_ARBTXCTL_DIA;
3184        wr32(hw, TXGBE_ARBTXCTL, reg);
3185}
3186
3187/**
3188 * txgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3189 * @dev: pointer to rte_eth_dev structure
3190 * @dcb_config: pointer to txgbe_dcb_config structure
3191 */
3192static void
3193txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3194                        struct txgbe_dcb_config *dcb_config)
3195{
3196        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3197                        &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3198        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3199
3200        PMD_INIT_FUNC_TRACE();
3201        /*PF VF Transmit Enable*/
3202        wr32(hw, TXGBE_POOLTXENA(0),
3203                vmdq_tx_conf->nb_queue_pools ==
3204                                ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3205
3206        /*Configure general DCB TX parameters*/
3207        txgbe_dcb_tx_hw_config(dev, dcb_config);
3208}
3209
3210static void
3211txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3212                        struct txgbe_dcb_config *dcb_config)
3213{
3214        struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3215                        &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3216        struct txgbe_dcb_tc_config *tc;
3217        uint8_t i, j;
3218
3219        /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
3220        if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3221                dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3222                dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3223        } else {
3224                dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3225                dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3226        }
3227
3228        /* Initialize User Priority to Traffic Class mapping */
3229        for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3230                tc = &dcb_config->tc_config[j];
3231                tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3232        }
3233
3234        /* User Priority to Traffic Class mapping */
3235        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3236                j = vmdq_rx_conf->dcb_tc[i];
3237                tc = &dcb_config->tc_config[j];
3238                tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3239                                                (uint8_t)(1 << i);
3240        }
3241}
3242
3243static void
3244txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3245                        struct txgbe_dcb_config *dcb_config)
3246{
3247        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3248                        &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3249        struct txgbe_dcb_tc_config *tc;
3250        uint8_t i, j;
3251
3252        /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
3253        if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3254                dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3255                dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3256        } else {
3257                dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3258                dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3259        }
3260
3261        /* Initialize User Priority to Traffic Class mapping */
3262        for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3263                tc = &dcb_config->tc_config[j];
3264                tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3265        }
3266
3267        /* User Priority to Traffic Class mapping */
3268        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3269                j = vmdq_tx_conf->dcb_tc[i];
3270                tc = &dcb_config->tc_config[j];
3271                tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3272                                                (uint8_t)(1 << i);
3273        }
3274}
3275
3276static void
3277txgbe_dcb_rx_config(struct rte_eth_dev *dev,
3278                struct txgbe_dcb_config *dcb_config)
3279{
3280        struct rte_eth_dcb_rx_conf *rx_conf =
3281                        &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3282        struct txgbe_dcb_tc_config *tc;
3283        uint8_t i, j;
3284
3285        dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3286        dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3287
3288        /* Initialize User Priority to Traffic Class mapping */
3289        for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3290                tc = &dcb_config->tc_config[j];
3291                tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3292        }
3293
3294        /* User Priority to Traffic Class mapping */
3295        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3296                j = rx_conf->dcb_tc[i];
3297                tc = &dcb_config->tc_config[j];
3298                tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3299                                                (uint8_t)(1 << i);
3300        }
3301}
3302
3303static void
3304txgbe_dcb_tx_config(struct rte_eth_dev *dev,
3305                struct txgbe_dcb_config *dcb_config)
3306{
3307        struct rte_eth_dcb_tx_conf *tx_conf =
3308                        &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3309        struct txgbe_dcb_tc_config *tc;
3310        uint8_t i, j;
3311
3312        dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3313        dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3314
3315        /* Initialize User Priority to Traffic Class mapping */
3316        for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3317                tc = &dcb_config->tc_config[j];
3318                tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3319        }
3320
3321        /* User Priority to Traffic Class mapping */
3322        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3323                j = tx_conf->dcb_tc[i];
3324                tc = &dcb_config->tc_config[j];
3325                tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3326                                                (uint8_t)(1 << i);
3327        }
3328}
3329
3330/**
3331 * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3332 * @dev: pointer to eth_dev structure
3333 * @dcb_config: pointer to txgbe_dcb_config structure
3334 */
3335static void
3336txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3337                       struct txgbe_dcb_config *dcb_config)
3338{
3339        uint32_t reg;
3340        uint32_t vlanctrl;
3341        uint8_t i;
3342        uint32_t q;
3343        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3344
3345        PMD_INIT_FUNC_TRACE();
3346        /*
3347         * Disable the arbiter before changing parameters
3348         * (always enable recycle mode; WSP)
3349         */
3350        reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
3351        wr32(hw, TXGBE_ARBRXCTL, reg);
3352
3353        reg = rd32(hw, TXGBE_PORTCTL);
3354        reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3355        if (dcb_config->num_tcs.pg_tcs == 4) {
3356                reg |= TXGBE_PORTCTL_NUMTC_4;
3357                if (dcb_config->vt_mode)
3358                        reg |= TXGBE_PORTCTL_NUMVT_32;
3359                else
3360                        wr32(hw, TXGBE_POOLCTL, 0);
3361        }
3362
3363        if (dcb_config->num_tcs.pg_tcs == 8) {
3364                reg |= TXGBE_PORTCTL_NUMTC_8;
3365                if (dcb_config->vt_mode)
3366                        reg |= TXGBE_PORTCTL_NUMVT_16;
3367                else
3368                        wr32(hw, TXGBE_POOLCTL, 0);
3369        }
3370
3371        wr32(hw, TXGBE_PORTCTL, reg);
3372
3373        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3374                /* Disable drop for all queues in VMDQ mode*/
3375                for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
3376                        u32 val = 1 << (q % 32);
3377                        wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3378                }
3379        } else {
3380                /* Enable drop for all queues in SRIOV mode */
3381                for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
3382                        u32 val = 1 << (q % 32);
3383                        wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3384                }
3385        }
3386
3387        /* VLNCTL: enable vlan filtering and allow all vlan tags through */
3388        vlanctrl = rd32(hw, TXGBE_VLANCTL);
3389        vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3390        wr32(hw, TXGBE_VLANCTL, vlanctrl);
3391
3392        /* VLANTBL - enable all vlan filters */
3393        for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3394                wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
3395
3396        /*
3397         * Configure Rx packet plane (recycle mode; WSP) and
3398         * enable arbiter
3399         */
3400        reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
3401        wr32(hw, TXGBE_ARBRXCTL, reg);
3402}
3403
3404static void
3405txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
3406                uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3407{
3408        txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
3409                                          tsa, map);
3410}
3411
3412static void
3413txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill,
3414                uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3415{
3416        switch (hw->mac.type) {
3417        case txgbe_mac_raptor:
3418                txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill,
3419                                                        max, bwg_id, tsa);
3420                txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill,
3421                                                        max, bwg_id, tsa, map);
3422                break;
3423        default:
3424                break;
3425        }
3426}
3427
3428#define DCB_RX_CONFIG  1
3429#define DCB_TX_CONFIG  1
3430#define DCB_TX_PB      1024
3431/**
3432 * txgbe_dcb_hw_configure - Enable DCB and configure
3433 * general DCB in VT mode and non-VT mode parameters
3434 * @dev: pointer to rte_eth_dev structure
3435 * @dcb_config: pointer to txgbe_dcb_config structure
3436 */
3437static int
3438txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3439                        struct txgbe_dcb_config *dcb_config)
3440{
3441        int     ret = 0;
3442        uint8_t i, pfc_en, nb_tcs;
3443        uint16_t pbsize, rx_buffer_size;
3444        uint8_t config_dcb_rx = 0;
3445        uint8_t config_dcb_tx = 0;
3446        uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
3447        uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
3448        uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
3449        uint16_t max[TXGBE_DCB_TC_MAX] = {0};
3450        uint8_t map[TXGBE_DCB_TC_MAX] = {0};
3451        struct txgbe_dcb_tc_config *tc;
3452        uint32_t max_frame = dev->data->mtu +
3453                        RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3454        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3455        struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
3456
3457        switch (dev->data->dev_conf.rxmode.mq_mode) {
3458        case ETH_MQ_RX_VMDQ_DCB:
3459                dcb_config->vt_mode = true;
3460                config_dcb_rx = DCB_RX_CONFIG;
3461                /*
3462                 * get dcb and VT rx configuration parameters
3463                 * from rte_eth_conf
3464                 */
3465                txgbe_vmdq_dcb_rx_config(dev, dcb_config);
3466                /*Configure general VMDQ and DCB RX parameters*/
3467                txgbe_vmdq_dcb_configure(dev);
3468                break;
3469        case ETH_MQ_RX_DCB:
3470        case ETH_MQ_RX_DCB_RSS:
3471                dcb_config->vt_mode = false;
3472                config_dcb_rx = DCB_RX_CONFIG;
3473                /* Get dcb TX configuration parameters from rte_eth_conf */
3474                txgbe_dcb_rx_config(dev, dcb_config);
3475                /*Configure general DCB RX parameters*/
3476                txgbe_dcb_rx_hw_config(dev, dcb_config);
3477                break;
3478        default:
3479                PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3480                break;
3481        }
3482        switch (dev->data->dev_conf.txmode.mq_mode) {
3483        case ETH_MQ_TX_VMDQ_DCB:
3484                dcb_config->vt_mode = true;
3485                config_dcb_tx = DCB_TX_CONFIG;
3486                /* get DCB and VT TX configuration parameters
3487                 * from rte_eth_conf
3488                 */
3489                txgbe_dcb_vt_tx_config(dev, dcb_config);
3490                /* Configure general VMDQ and DCB TX parameters */
3491                txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
3492                break;
3493
3494        case ETH_MQ_TX_DCB:
3495                dcb_config->vt_mode = false;
3496                config_dcb_tx = DCB_TX_CONFIG;
3497                /* get DCB TX configuration parameters from rte_eth_conf */
3498                txgbe_dcb_tx_config(dev, dcb_config);
3499                /* Configure general DCB TX parameters */
3500                txgbe_dcb_tx_hw_config(dev, dcb_config);
3501                break;
3502        default:
3503                PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3504                break;
3505        }
3506
3507        nb_tcs = dcb_config->num_tcs.pfc_tcs;
3508        /* Unpack map */
3509        txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3510        if (nb_tcs == ETH_4_TCS) {
3511                /* Avoid un-configured priority mapping to TC0 */
3512                uint8_t j = 4;
3513                uint8_t mask = 0xFF;
3514
3515                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3516                        mask = (uint8_t)(mask & (~(1 << map[i])));
3517                for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
3518                        if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
3519                                map[j++] = i;
3520                        mask >>= 1;
3521                }
3522                /* Re-configure 4 TCs BW */
3523                for (i = 0; i < nb_tcs; i++) {
3524                        tc = &dcb_config->tc_config[i];
3525                        if (bw_conf->tc_num != nb_tcs)
3526                                tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
3527                                        (uint8_t)(100 / nb_tcs);
3528                        tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
3529                                                (uint8_t)(100 / nb_tcs);
3530                }
3531                for (; i < TXGBE_DCB_TC_MAX; i++) {
3532                        tc = &dcb_config->tc_config[i];
3533                        tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3534                        tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3535                }
3536        } else {
3537                /* Re-configure 8 TCs BW */
3538                for (i = 0; i < nb_tcs; i++) {
3539                        tc = &dcb_config->tc_config[i];
3540                        if (bw_conf->tc_num != nb_tcs)
3541                                tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
3542                                        (uint8_t)(100 / nb_tcs + (i & 1));
3543                        tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
3544                                (uint8_t)(100 / nb_tcs + (i & 1));
3545                }
3546        }
3547
3548        rx_buffer_size = NIC_RX_BUFFER_SIZE;
3549
3550        if (config_dcb_rx) {
3551                /* Set RX buffer size */
3552                pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3553                uint32_t rxpbsize = pbsize << 10;
3554
3555                for (i = 0; i < nb_tcs; i++)
3556                        wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3557
3558                /* zero alloc all unused TCs */
3559                for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3560                        wr32(hw, TXGBE_PBRXSIZE(i), 0);
3561        }
3562        if (config_dcb_tx) {
3563                /* Only support an equally distributed
3564                 *  Tx packet buffer strategy.
3565                 */
3566                uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
3567                uint32_t txpbthresh = (txpktsize / DCB_TX_PB) -
3568                                        TXGBE_TXPKT_SIZE_MAX;
3569
3570                for (i = 0; i < nb_tcs; i++) {
3571                        wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
3572                        wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
3573                }
3574                /* Clear unused TCs, if any, to zero buffer size*/
3575                for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3576                        wr32(hw, TXGBE_PBTXSIZE(i), 0);
3577                        wr32(hw, TXGBE_PBTXDMATH(i), 0);
3578                }
3579        }
3580
3581        /*Calculates traffic class credits*/
3582        txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3583                                TXGBE_DCB_TX_CONFIG);
3584        txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3585                                TXGBE_DCB_RX_CONFIG);
3586
3587        if (config_dcb_rx) {
3588                /* Unpack CEE standard containers */
3589                txgbe_dcb_unpack_refill_cee(dcb_config,
3590                                TXGBE_DCB_RX_CONFIG, refill);
3591                txgbe_dcb_unpack_max_cee(dcb_config, max);
3592                txgbe_dcb_unpack_bwgid_cee(dcb_config,
3593                                TXGBE_DCB_RX_CONFIG, bwgid);
3594                txgbe_dcb_unpack_tsa_cee(dcb_config,
3595                                TXGBE_DCB_RX_CONFIG, tsa);
3596                /* Configure PG(ETS) RX */
3597                txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
3598        }
3599
3600        if (config_dcb_tx) {
3601                /* Unpack CEE standard containers */
3602                txgbe_dcb_unpack_refill_cee(dcb_config,
3603                                TXGBE_DCB_TX_CONFIG, refill);
3604                txgbe_dcb_unpack_max_cee(dcb_config, max);
3605                txgbe_dcb_unpack_bwgid_cee(dcb_config,
3606                                TXGBE_DCB_TX_CONFIG, bwgid);
3607                txgbe_dcb_unpack_tsa_cee(dcb_config,
3608                                TXGBE_DCB_TX_CONFIG, tsa);
3609                /* Configure PG(ETS) TX */
3610                txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
3611        }
3612
3613        /* Configure queue statistics registers */
3614        txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
3615
3616        /* Check if the PFC is supported */
3617        if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
3618                pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3619                for (i = 0; i < nb_tcs; i++) {
3620                        /* If the TC count is 8,
3621                         * and the default high_water is 48,
3622                         * the low_water is 16 as default.
3623                         */
3624                        hw->fc.high_water[i] = (pbsize * 3) / 4;
3625                        hw->fc.low_water[i] = pbsize / 4;
3626                        /* Enable pfc for this TC */
3627                        tc = &dcb_config->tc_config[i];
3628                        tc->pfc = txgbe_dcb_pfc_enabled;
3629                }
3630                txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3631                if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
3632                        pfc_en &= 0x0F;
3633                ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
3634        }
3635
3636        return ret;
3637}
3638
3639void txgbe_configure_pb(struct rte_eth_dev *dev)
3640{
3641        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3642        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3643
3644        int hdrm;
3645        int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
3646
3647        /* Reserve 256KB(/512KB) rx buffer for fdir */
3648        hdrm = 256; /*KB*/
3649
3650        hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
3651}
3652
3653void txgbe_configure_port(struct rte_eth_dev *dev)
3654{
3655        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3656        int i = 0;
3657        uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
3658                                0x9100, 0x9200,
3659                                0x0000, 0x0000,
3660                                0x0000, 0x0000};
3661
3662        PMD_INIT_FUNC_TRACE();
3663
3664        /* default outer vlan tpid */
3665        wr32(hw, TXGBE_EXTAG,
3666                TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
3667                TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
3668
3669        /* default inner vlan tpid */
3670        wr32m(hw, TXGBE_VLANCTL,
3671                TXGBE_VLANCTL_TPID_MASK,
3672                TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
3673        wr32m(hw, TXGBE_DMATXCTRL,
3674                TXGBE_DMATXCTRL_TPID_MASK,
3675                TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
3676
3677        /* default vlan tpid filters */
3678        for (i = 0; i < 8; i++) {
3679                wr32m(hw, TXGBE_TAGTPID(i / 2),
3680                        (i % 2 ? TXGBE_TAGTPID_MSB_MASK
3681                               : TXGBE_TAGTPID_LSB_MASK),
3682                        (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
3683                               : TXGBE_TAGTPID_LSB(tpids[i])));
3684        }
3685
3686        /* default vxlan port */
3687        wr32(hw, TXGBE_VXLANPORT, 4789);
3688}
3689
3690/**
3691 * txgbe_configure_dcb - Configure DCB  Hardware
3692 * @dev: pointer to rte_eth_dev
3693 */
3694void txgbe_configure_dcb(struct rte_eth_dev *dev)
3695{
3696        struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
3697        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3698
3699        PMD_INIT_FUNC_TRACE();
3700
3701        /* check support mq_mode for DCB */
3702        if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
3703            dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
3704            dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
3705                return;
3706
3707        if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
3708                return;
3709
3710        /** Configure DCB hardware **/
3711        txgbe_dcb_hw_configure(dev, dcb_cfg);
3712}
3713
3714/*
3715 * VMDq only support for 10 GbE NIC.
3716 */
3717static void
3718txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3719{
3720        struct rte_eth_vmdq_rx_conf *cfg;
3721        struct txgbe_hw *hw;
3722        enum rte_eth_nb_pools num_pools;
3723        uint32_t mrqc, vt_ctl, vlanctrl;
3724        uint32_t vmolr = 0;
3725        int i;
3726
3727        PMD_INIT_FUNC_TRACE();
3728        hw = TXGBE_DEV_HW(dev);
3729        cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3730        num_pools = cfg->nb_queue_pools;
3731
3732        txgbe_rss_disable(dev);
3733
3734        /* enable vmdq */
3735        mrqc = TXGBE_PORTCTL_NUMVT_64;
3736        wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
3737
3738        /* turn on virtualisation and set the default pool */
3739        vt_ctl = TXGBE_POOLCTL_RPLEN;
3740        if (cfg->enable_default_pool)
3741                vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
3742        else
3743                vt_ctl |= TXGBE_POOLCTL_DEFDSA;
3744
3745        wr32(hw, TXGBE_POOLCTL, vt_ctl);
3746
3747        for (i = 0; i < (int)num_pools; i++) {
3748                vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3749                wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
3750        }
3751
3752        /* enable vlan filtering and allow all vlan tags through */
3753        vlanctrl = rd32(hw, TXGBE_VLANCTL);
3754        vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3755        wr32(hw, TXGBE_VLANCTL, vlanctrl);
3756
3757        /* enable all vlan filters */
3758        for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3759                wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
3760
3761        /* pool enabling for receive - 64 */
3762        wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
3763        if (num_pools == ETH_64_POOLS)
3764                wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
3765
3766        /*
3767         * allow pools to read specific mac addresses
3768         * In this case, all pools should be able to read from mac addr 0
3769         */
3770        wr32(hw, TXGBE_ETHADDRIDX, 0);
3771        wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
3772        wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
3773
3774        /* set up filters for vlan tags as configured */
3775        for (i = 0; i < cfg->nb_pool_maps; i++) {
3776                /* set vlan id in VF register and set the valid bit */
3777                wr32(hw, TXGBE_PSRVLANIDX, i);
3778                wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
3779                                TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
3780                /*
3781                 * Put the allowed pools in VFB reg. As we only have 16 or 64
3782                 * pools, we only need to use the first half of the register
3783                 * i.e. bits 0-31
3784                 */
3785                if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3786                        wr32(hw, TXGBE_PSRVLANPLM(0),
3787                                (cfg->pool_map[i].pools & UINT32_MAX));
3788                else
3789                        wr32(hw, TXGBE_PSRVLANPLM(1),
3790                                ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
3791        }
3792
3793        /* Tx General Switch Control Enables VMDQ loopback */
3794        if (cfg->enable_loop_back) {
3795                wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
3796                for (i = 0; i < 64; i++)
3797                        wr32m(hw, TXGBE_POOLETHCTL(i),
3798                                TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
3799        }
3800
3801        txgbe_flush(hw);
3802}
3803
3804/*
3805 * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
3806 * @hw: pointer to hardware structure
3807 */
3808static void
3809txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
3810{
3811        uint32_t reg;
3812        uint32_t q;
3813
3814        PMD_INIT_FUNC_TRACE();
3815        /*PF VF Transmit Enable*/
3816        wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
3817        wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
3818
3819        /* Disable the Tx desc arbiter */
3820        reg = rd32(hw, TXGBE_ARBTXCTL);
3821        reg |= TXGBE_ARBTXCTL_DIA;
3822        wr32(hw, TXGBE_ARBTXCTL, reg);
3823
3824        wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
3825                TXGBE_PORTCTL_NUMVT_64);
3826
3827        /* Disable drop for all queues */
3828        for (q = 0; q < 128; q++) {
3829                u32 val = 1 << (q % 32);
3830                wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3831        }
3832
3833        /* Enable the Tx desc arbiter */
3834        reg = rd32(hw, TXGBE_ARBTXCTL);
3835        reg &= ~TXGBE_ARBTXCTL_DIA;
3836        wr32(hw, TXGBE_ARBTXCTL, reg);
3837
3838        txgbe_flush(hw);
3839}
3840
3841static int __rte_cold
3842txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
3843{
3844        struct txgbe_rx_entry *rxe = rxq->sw_ring;
3845        uint64_t dma_addr;
3846        unsigned int i;
3847
3848        /* Initialize software ring entries */
3849        for (i = 0; i < rxq->nb_rx_desc; i++) {
3850                volatile struct txgbe_rx_desc *rxd;
3851                struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
3852
3853                if (mbuf == NULL) {
3854                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3855                                     (unsigned int)rxq->queue_id);
3856                        return -ENOMEM;
3857                }
3858
3859                mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3860                mbuf->port = rxq->port_id;
3861
3862                dma_addr =
3863                        rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
3864                rxd = &rxq->rx_ring[i];
3865                TXGBE_RXD_HDRADDR(rxd, 0);
3866                TXGBE_RXD_PKTADDR(rxd, dma_addr);
3867                rxe[i].mbuf = mbuf;
3868        }
3869
3870        return 0;
3871}
3872
3873static int
3874txgbe_config_vf_rss(struct rte_eth_dev *dev)
3875{
3876        struct txgbe_hw *hw;
3877        uint32_t mrqc;
3878
3879        txgbe_rss_configure(dev);
3880
3881        hw = TXGBE_DEV_HW(dev);
3882
3883        /* enable VF RSS */
3884        mrqc = rd32(hw, TXGBE_PORTCTL);
3885        mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3886        switch (RTE_ETH_DEV_SRIOV(dev).active) {
3887        case ETH_64_POOLS:
3888                mrqc |= TXGBE_PORTCTL_NUMVT_64;
3889                break;
3890
3891        case ETH_32_POOLS:
3892                mrqc |= TXGBE_PORTCTL_NUMVT_32;
3893                break;
3894
3895        default:
3896                PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3897                return -EINVAL;
3898        }
3899
3900        wr32(hw, TXGBE_PORTCTL, mrqc);
3901
3902        return 0;
3903}
3904
3905static int
3906txgbe_config_vf_default(struct rte_eth_dev *dev)
3907{
3908        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3909        uint32_t mrqc;
3910
3911        mrqc = rd32(hw, TXGBE_PORTCTL);
3912        mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3913        switch (RTE_ETH_DEV_SRIOV(dev).active) {
3914        case ETH_64_POOLS:
3915                mrqc |= TXGBE_PORTCTL_NUMVT_64;
3916                break;
3917
3918        case ETH_32_POOLS:
3919                mrqc |= TXGBE_PORTCTL_NUMVT_32;
3920                break;
3921
3922        case ETH_16_POOLS:
3923                mrqc |= TXGBE_PORTCTL_NUMVT_16;
3924                break;
3925        default:
3926                PMD_INIT_LOG(ERR,
3927                        "invalid pool number in IOV mode");
3928                return 0;
3929        }
3930
3931        wr32(hw, TXGBE_PORTCTL, mrqc);
3932
3933        return 0;
3934}
3935
3936static int
3937txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3938{
3939        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3940                /*
3941                 * SRIOV inactive scheme
3942                 * any DCB/RSS w/o VMDq multi-queue setting
3943                 */
3944                switch (dev->data->dev_conf.rxmode.mq_mode) {
3945                case ETH_MQ_RX_RSS:
3946                case ETH_MQ_RX_DCB_RSS:
3947                case ETH_MQ_RX_VMDQ_RSS:
3948                        txgbe_rss_configure(dev);
3949                        break;
3950
3951                case ETH_MQ_RX_VMDQ_DCB:
3952                        txgbe_vmdq_dcb_configure(dev);
3953                        break;
3954
3955                case ETH_MQ_RX_VMDQ_ONLY:
3956                        txgbe_vmdq_rx_hw_configure(dev);
3957                        break;
3958
3959                case ETH_MQ_RX_NONE:
3960                default:
3961                        /* if mq_mode is none, disable rss mode.*/
3962                        txgbe_rss_disable(dev);
3963                        break;
3964                }
3965        } else {
3966                /* SRIOV active scheme
3967                 * Support RSS together with SRIOV.
3968                 */
3969                switch (dev->data->dev_conf.rxmode.mq_mode) {
3970                case ETH_MQ_RX_RSS:
3971                case ETH_MQ_RX_VMDQ_RSS:
3972                        txgbe_config_vf_rss(dev);
3973                        break;
3974                case ETH_MQ_RX_VMDQ_DCB:
3975                case ETH_MQ_RX_DCB:
3976                /* In SRIOV, the configuration is the same as VMDq case */
3977                        txgbe_vmdq_dcb_configure(dev);
3978                        break;
3979                /* DCB/RSS together with SRIOV is not supported */
3980                case ETH_MQ_RX_VMDQ_DCB_RSS:
3981                case ETH_MQ_RX_DCB_RSS:
3982                        PMD_INIT_LOG(ERR,
3983                                "Could not support DCB/RSS with VMDq & SRIOV");
3984                        return -1;
3985                default:
3986                        txgbe_config_vf_default(dev);
3987                        break;
3988                }
3989        }
3990
3991        return 0;
3992}
3993
3994static int
3995txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
3996{
3997        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3998        uint32_t mtqc;
3999        uint32_t rttdcs;
4000
4001        /* disable arbiter */
4002        rttdcs = rd32(hw, TXGBE_ARBTXCTL);
4003        rttdcs |= TXGBE_ARBTXCTL_DIA;
4004        wr32(hw, TXGBE_ARBTXCTL, rttdcs);
4005
4006        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4007                /*
4008                 * SRIOV inactive scheme
4009                 * any DCB w/o VMDq multi-queue setting
4010                 */
4011                if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4012                        txgbe_vmdq_tx_hw_configure(hw);
4013                else
4014                        wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
4015        } else {
4016                switch (RTE_ETH_DEV_SRIOV(dev).active) {
4017                /*
4018                 * SRIOV active scheme
4019                 * FIXME if support DCB together with VMDq & SRIOV
4020                 */
4021                case ETH_64_POOLS:
4022                        mtqc = TXGBE_PORTCTL_NUMVT_64;
4023                        break;
4024                case ETH_32_POOLS:
4025                        mtqc = TXGBE_PORTCTL_NUMVT_32;
4026                        break;
4027                case ETH_16_POOLS:
4028                        mtqc = TXGBE_PORTCTL_NUMVT_16;
4029                        break;
4030                default:
4031                        mtqc = 0;
4032                        PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4033                }
4034                wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
4035        }
4036
4037        /* re-enable arbiter */
4038        rttdcs &= ~TXGBE_ARBTXCTL_DIA;
4039        wr32(hw, TXGBE_ARBTXCTL, rttdcs);
4040
4041        return 0;
4042}
4043
4044/**
4045 * txgbe_get_rscctl_maxdesc
4046 *
4047 * @pool Memory pool of the Rx queue
4048 */
4049static inline uint32_t
4050txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4051{
4052        struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4053
4054        uint16_t maxdesc =
4055                RTE_IPV4_MAX_PKT_LEN /
4056                        (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4057
4058        if (maxdesc >= 16)
4059                return TXGBE_RXCFG_RSCMAX_16;
4060        else if (maxdesc >= 8)
4061                return TXGBE_RXCFG_RSCMAX_8;
4062        else if (maxdesc >= 4)
4063                return TXGBE_RXCFG_RSCMAX_4;
4064        else
4065                return TXGBE_RXCFG_RSCMAX_1;
4066}
4067
4068/**
4069 * txgbe_set_rsc - configure RSC related port HW registers
4070 *
4071 * Configures the port's RSC related registers.
4072 *
4073 * @dev port handle
4074 *
4075 * Returns 0 in case of success or a non-zero error code
4076 */
4077static int
4078txgbe_set_rsc(struct rte_eth_dev *dev)
4079{
4080        struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4081        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4082        struct rte_eth_dev_info dev_info = { 0 };
4083        bool rsc_capable = false;
4084        uint16_t i;
4085        uint32_t rdrxctl;
4086        uint32_t rfctl;
4087
4088        /* Sanity check */
4089        dev->dev_ops->dev_infos_get(dev, &dev_info);
4090        if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4091                rsc_capable = true;
4092
4093        if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4094                PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4095                                   "support it");
4096                return -EINVAL;
4097        }
4098
4099        /* RSC global configuration */
4100
4101        if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
4102             (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
4103                PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4104                                    "is disabled");
4105                return -EINVAL;
4106        }
4107
4108        rfctl = rd32(hw, TXGBE_PSRCTL);
4109        if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4110                rfctl &= ~TXGBE_PSRCTL_RSCDIA;
4111        else
4112                rfctl |= TXGBE_PSRCTL_RSCDIA;
4113        wr32(hw, TXGBE_PSRCTL, rfctl);
4114
4115        /* If LRO hasn't been requested - we are done here. */
4116        if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4117                return 0;
4118
4119        /* Set PSRCTL.RSCACK bit */
4120        rdrxctl = rd32(hw, TXGBE_PSRCTL);
4121        rdrxctl |= TXGBE_PSRCTL_RSCACK;
4122        wr32(hw, TXGBE_PSRCTL, rdrxctl);
4123
4124        /* Per-queue RSC configuration */
4125        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4126                struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
4127                uint32_t srrctl =
4128                        rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4129                uint32_t psrtype =
4130                        rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
4131                uint32_t eitr =
4132                        rd32(hw, TXGBE_ITR(rxq->reg_idx));
4133
4134                /*
4135                 * txgbe PMD doesn't support header-split at the moment.
4136                 */
4137                srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
4138                srrctl |= TXGBE_RXCFG_HDRLEN(128);
4139
4140                /*
4141                 * TODO: Consider setting the Receive Descriptor Minimum
4142                 * Threshold Size for an RSC case. This is not an obviously
4143                 * beneficiary option but the one worth considering...
4144                 */
4145
4146                srrctl |= TXGBE_RXCFG_RSCENA;
4147                srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
4148                srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
4149                psrtype |= TXGBE_POOLRSS_L4HDR;
4150
4151                /*
4152                 * RSC: Set ITR interval corresponding to 2K ints/s.
4153                 *
4154                 * Full-sized RSC aggregations for a 10Gb/s link will
4155                 * arrive at about 20K aggregation/s rate.
4156                 *
4157                 * 2K inst/s rate will make only 10% of the
4158                 * aggregations to be closed due to the interrupt timer
4159                 * expiration for a streaming at wire-speed case.
4160                 *
4161                 * For a sparse streaming case this setting will yield
4162                 * at most 500us latency for a single RSC aggregation.
4163                 */
4164                eitr &= ~TXGBE_ITR_IVAL_MASK;
4165                eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4166                eitr |= TXGBE_ITR_WRDSA;
4167
4168                wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
4169                wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
4170                wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
4171
4172                /*
4173                 * RSC requires the mapping of the queue to the
4174                 * interrupt vector.
4175                 */
4176                txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
4177        }
4178
4179        dev->data->lro = 1;
4180
4181        PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4182
4183        return 0;
4184}
4185
4186void __rte_cold
4187txgbe_set_rx_function(struct rte_eth_dev *dev)
4188{
4189        uint16_t i;
4190        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4191
4192        /*
4193         * Initialize the appropriate LRO callback.
4194         *
4195         * If all queues satisfy the bulk allocation preconditions
4196         * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
4197         * bulk allocation. Otherwise use a single allocation version.
4198         */
4199        if (dev->data->lro) {
4200                if (adapter->rx_bulk_alloc_allowed) {
4201                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4202                                           "allocation version");
4203                        dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
4204                } else {
4205                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4206                                           "allocation version");
4207                        dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
4208                }
4209        } else if (dev->data->scattered_rx) {
4210                /*
4211                 * Set the non-LRO scattered callback: there are bulk and
4212                 * single allocation versions.
4213                 */
4214                if (adapter->rx_bulk_alloc_allowed) {
4215                        PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4216                                           "allocation callback (port=%d).",
4217                                     dev->data->port_id);
4218                        dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
4219                } else {
4220                        PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
4221                                            "single allocation) "
4222                                            "Scattered Rx callback "
4223                                            "(port=%d).",
4224                                     dev->data->port_id);
4225
4226                        dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
4227                }
4228        /*
4229         * Below we set "simple" callbacks according to port/queues parameters.
4230         * If parameters allow we are going to choose between the following
4231         * callbacks:
4232         *    - Bulk Allocation
4233         *    - Single buffer allocation (the simplest one)
4234         */
4235        } else if (adapter->rx_bulk_alloc_allowed) {
4236                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4237                                    "satisfied. Rx Burst Bulk Alloc function "
4238                                    "will be used on port=%d.",
4239                             dev->data->port_id);
4240
4241                dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
4242        } else {
4243                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4244                                    "satisfied, or Scattered Rx is requested "
4245                                    "(port=%d).",
4246                             dev->data->port_id);
4247
4248                dev->rx_pkt_burst = txgbe_recv_pkts;
4249        }
4250
4251#ifdef RTE_LIB_SECURITY
4252        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4253                struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
4254
4255                rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4256                                DEV_RX_OFFLOAD_SECURITY);
4257        }
4258#endif
4259}
4260
4261/*
4262 * Initializes Receive Unit.
4263 */
4264int __rte_cold
4265txgbe_dev_rx_init(struct rte_eth_dev *dev)
4266{
4267        struct txgbe_hw *hw;
4268        struct txgbe_rx_queue *rxq;
4269        uint64_t bus_addr;
4270        uint32_t fctrl;
4271        uint32_t hlreg0;
4272        uint32_t srrctl;
4273        uint32_t rdrxctl;
4274        uint32_t rxcsum;
4275        uint16_t buf_size;
4276        uint16_t i;
4277        struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4278        int rc;
4279
4280        PMD_INIT_FUNC_TRACE();
4281        hw = TXGBE_DEV_HW(dev);
4282
4283        /*
4284         * Make sure receives are disabled while setting
4285         * up the RX context (registers, descriptor rings, etc.).
4286         */
4287        wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
4288        wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
4289
4290        /* Enable receipt of broadcasted frames */
4291        fctrl = rd32(hw, TXGBE_PSRCTL);
4292        fctrl |= TXGBE_PSRCTL_BCA;
4293        wr32(hw, TXGBE_PSRCTL, fctrl);
4294
4295        /*
4296         * Configure CRC stripping, if any.
4297         */
4298        hlreg0 = rd32(hw, TXGBE_SECRXCTL);
4299        if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4300                hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
4301        else
4302                hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
4303        wr32(hw, TXGBE_SECRXCTL, hlreg0);
4304
4305        /*
4306         * Configure jumbo frame support, if any.
4307         */
4308        if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
4309                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
4310                        TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
4311        } else {
4312                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
4313                        TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
4314        }
4315
4316        /*
4317         * If loopback mode is configured, set LPBK bit.
4318         */
4319        hlreg0 = rd32(hw, TXGBE_PSRCTL);
4320        if (hw->mac.type == txgbe_mac_raptor &&
4321            dev->data->dev_conf.lpbk_mode)
4322                hlreg0 |= TXGBE_PSRCTL_LBENA;
4323        else
4324                hlreg0 &= ~TXGBE_PSRCTL_LBENA;
4325
4326        wr32(hw, TXGBE_PSRCTL, hlreg0);
4327
4328        /*
4329         * Assume no header split and no VLAN strip support
4330         * on any Rx queue first .
4331         */
4332        rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4333
4334        /* Setup RX queues */
4335        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4336                rxq = dev->data->rx_queues[i];
4337
4338                /*
4339                 * Reset crc_len in case it was changed after queue setup by a
4340                 * call to configure.
4341                 */
4342                if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4343                        rxq->crc_len = RTE_ETHER_CRC_LEN;
4344                else
4345                        rxq->crc_len = 0;
4346
4347                /* Setup the Base and Length of the Rx Descriptor Rings */
4348                bus_addr = rxq->rx_ring_phys_addr;
4349                wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
4350                                (uint32_t)(bus_addr & BIT_MASK32));
4351                wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
4352                                (uint32_t)(bus_addr >> 32));
4353                wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
4354                wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
4355
4356                srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
4357
4358                /* Set if packets are dropped when no descriptors available */
4359                if (rxq->drop_en)
4360                        srrctl |= TXGBE_RXCFG_DROP;
4361
4362                /*
4363                 * Configure the RX buffer size in the PKTLEN field of
4364                 * the RXCFG register of the queue.
4365                 * The value is in 1 KB resolution. Valid values can be from
4366                 * 1 KB to 16 KB.
4367                 */
4368                buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4369                        RTE_PKTMBUF_HEADROOM);
4370                buf_size = ROUND_UP(buf_size, 0x1 << 10);
4371                srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
4372
4373                wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
4374
4375                /* It adds dual VLAN length for supporting dual VLAN */
4376                if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4377                                            2 * TXGBE_VLAN_TAG_SIZE > buf_size)
4378                        dev->data->scattered_rx = 1;
4379                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4380                        rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4381        }
4382
4383        if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
4384                dev->data->scattered_rx = 1;
4385
4386        /*
4387         * Device configured with multiple RX queues.
4388         */
4389        txgbe_dev_mq_rx_configure(dev);
4390
4391        /*
4392         * Setup the Checksum Register.
4393         * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4394         * Enable IP/L4 checksum computation by hardware if requested to do so.
4395         */
4396        rxcsum = rd32(hw, TXGBE_PSRCTL);
4397        rxcsum |= TXGBE_PSRCTL_PCSD;
4398        if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
4399                rxcsum |= TXGBE_PSRCTL_L4CSUM;
4400        else
4401                rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
4402
4403        wr32(hw, TXGBE_PSRCTL, rxcsum);
4404
4405        if (hw->mac.type == txgbe_mac_raptor) {
4406                rdrxctl = rd32(hw, TXGBE_SECRXCTL);
4407                if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4408                        rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
4409                else
4410                        rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
4411                wr32(hw, TXGBE_SECRXCTL, rdrxctl);
4412        }
4413
4414        rc = txgbe_set_rsc(dev);
4415        if (rc)
4416                return rc;
4417
4418        txgbe_set_rx_function(dev);
4419
4420        return 0;
4421}
4422
4423/*
4424 * Initializes Transmit Unit.
4425 */
4426void __rte_cold
4427txgbe_dev_tx_init(struct rte_eth_dev *dev)
4428{
4429        struct txgbe_hw     *hw;
4430        struct txgbe_tx_queue *txq;
4431        uint64_t bus_addr;
4432        uint16_t i;
4433
4434        PMD_INIT_FUNC_TRACE();
4435        hw = TXGBE_DEV_HW(dev);
4436
4437        /* Setup the Base and Length of the Tx Descriptor Rings */
4438        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4439                txq = dev->data->tx_queues[i];
4440
4441                bus_addr = txq->tx_ring_phys_addr;
4442                wr32(hw, TXGBE_TXBAL(txq->reg_idx),
4443                                (uint32_t)(bus_addr & BIT_MASK32));
4444                wr32(hw, TXGBE_TXBAH(txq->reg_idx),
4445                                (uint32_t)(bus_addr >> 32));
4446                wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
4447                        TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
4448                /* Setup the HW Tx Head and TX Tail descriptor pointers */
4449                wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
4450                wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
4451        }
4452
4453        /* Device configured with multiple TX queues. */
4454        txgbe_dev_mq_tx_configure(dev);
4455}
4456
4457/*
4458 * Set up link loopback mode Tx->Rx.
4459 */
4460static inline void __rte_cold
4461txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw)
4462{
4463        PMD_INIT_FUNC_TRACE();
4464
4465        wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB);
4466
4467        msec_delay(50);
4468}
4469
4470/*
4471 * Start Transmit and Receive Units.
4472 */
4473int __rte_cold
4474txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4475{
4476        struct txgbe_hw     *hw;
4477        struct txgbe_tx_queue *txq;
4478        struct txgbe_rx_queue *rxq;
4479        uint32_t dmatxctl;
4480        uint32_t rxctrl;
4481        uint16_t i;
4482        int ret = 0;
4483
4484        PMD_INIT_FUNC_TRACE();
4485        hw = TXGBE_DEV_HW(dev);
4486
4487        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4488                txq = dev->data->tx_queues[i];
4489                /* Setup Transmit Threshold Registers */
4490                wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
4491                      TXGBE_TXCFG_HTHRESH_MASK |
4492                      TXGBE_TXCFG_WTHRESH_MASK,
4493                      TXGBE_TXCFG_HTHRESH(txq->hthresh) |
4494                      TXGBE_TXCFG_WTHRESH(txq->wthresh));
4495        }
4496
4497        dmatxctl = rd32(hw, TXGBE_DMATXCTRL);
4498        dmatxctl |= TXGBE_DMATXCTRL_ENA;
4499        wr32(hw, TXGBE_DMATXCTRL, dmatxctl);
4500
4501        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4502                txq = dev->data->tx_queues[i];
4503                if (!txq->tx_deferred_start) {
4504                        ret = txgbe_dev_tx_queue_start(dev, i);
4505                        if (ret < 0)
4506                                return ret;
4507                }
4508        }
4509
4510        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4511                rxq = dev->data->rx_queues[i];
4512                if (!rxq->rx_deferred_start) {
4513                        ret = txgbe_dev_rx_queue_start(dev, i);
4514                        if (ret < 0)
4515                                return ret;
4516                }
4517        }
4518
4519        /* Enable Receive engine */
4520        rxctrl = rd32(hw, TXGBE_PBRXCTL);
4521        rxctrl |= TXGBE_PBRXCTL_ENA;
4522        hw->mac.enable_rx_dma(hw, rxctrl);
4523
4524        /* If loopback mode is enabled, set up the link accordingly */
4525        if (hw->mac.type == txgbe_mac_raptor &&
4526            dev->data->dev_conf.lpbk_mode)
4527                txgbe_setup_loopback_link_raptor(hw);
4528
4529#ifdef RTE_LIB_SECURITY
4530        if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
4531            (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
4532                ret = txgbe_crypto_enable_ipsec(dev);
4533                if (ret != 0) {
4534                        PMD_DRV_LOG(ERR,
4535                                    "txgbe_crypto_enable_ipsec fails with %d.",
4536                                    ret);
4537                        return ret;
4538                }
4539        }
4540#endif
4541
4542        return 0;
4543}
4544
4545void
4546txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
4547{
4548        u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
4549        *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
4550        *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
4551        *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
4552}
4553
4554void
4555txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
4556{
4557        u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
4558        wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
4559        wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
4560        wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
4561}
4562
4563void
4564txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
4565{
4566        u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
4567        *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
4568        *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
4569        *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
4570}
4571
4572void
4573txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
4574{
4575        u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
4576        wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
4577        wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
4578        wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
4579}
4580
4581/*
4582 * Start Receive Units for specified queue.
4583 */
4584int __rte_cold
4585txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4586{
4587        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4588        struct txgbe_rx_queue *rxq;
4589        uint32_t rxdctl;
4590        int poll_ms;
4591
4592        PMD_INIT_FUNC_TRACE();
4593
4594        rxq = dev->data->rx_queues[rx_queue_id];
4595
4596        /* Allocate buffers for descriptor rings */
4597        if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4598                PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4599                             rx_queue_id);
4600                return -1;
4601        }
4602        rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4603        rxdctl |= TXGBE_RXCFG_ENA;
4604        wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
4605
4606        /* Wait until RX Enable ready */
4607        poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4608        do {
4609                rte_delay_ms(1);
4610                rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4611        } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
4612        if (!poll_ms)
4613                PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
4614        rte_wmb();
4615        wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
4616        wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
4617        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4618
4619        return 0;
4620}
4621
4622/*
4623 * Stop Receive Units for specified queue.
4624 */
4625int __rte_cold
4626txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4627{
4628        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4629        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4630        struct txgbe_rx_queue *rxq;
4631        uint32_t rxdctl;
4632        int poll_ms;
4633
4634        PMD_INIT_FUNC_TRACE();
4635
4636        rxq = dev->data->rx_queues[rx_queue_id];
4637
4638        txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
4639        wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
4640
4641        /* Wait until RX Enable bit clear */
4642        poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4643        do {
4644                rte_delay_ms(1);
4645                rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4646        } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
4647        if (!poll_ms)
4648                PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
4649
4650        rte_delay_us(RTE_TXGBE_WAIT_100_US);
4651        txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
4652
4653        txgbe_rx_queue_release_mbufs(rxq);
4654        txgbe_reset_rx_queue(adapter, rxq);
4655        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4656
4657        return 0;
4658}
4659
4660/*
4661 * Start Transmit Units for specified queue.
4662 */
4663int __rte_cold
4664txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4665{
4666        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4667        struct txgbe_tx_queue *txq;
4668        uint32_t txdctl;
4669        int poll_ms;
4670
4671        PMD_INIT_FUNC_TRACE();
4672
4673        txq = dev->data->tx_queues[tx_queue_id];
4674        wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
4675
4676        /* Wait until TX Enable ready */
4677        poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4678        do {
4679                rte_delay_ms(1);
4680                txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
4681        } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
4682        if (!poll_ms)
4683                PMD_INIT_LOG(ERR, "Could not enable "
4684                             "Tx Queue %d", tx_queue_id);
4685
4686        rte_wmb();
4687        wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
4688        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4689
4690        return 0;
4691}
4692
4693/*
4694 * Stop Transmit Units for specified queue.
4695 */
4696int __rte_cold
4697txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4698{
4699        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4700        struct txgbe_tx_queue *txq;
4701        uint32_t txdctl;
4702        uint32_t txtdh, txtdt;
4703        int poll_ms;
4704
4705        PMD_INIT_FUNC_TRACE();
4706
4707        txq = dev->data->tx_queues[tx_queue_id];
4708
4709        /* Wait until TX queue is empty */
4710        poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4711        do {
4712                rte_delay_us(RTE_TXGBE_WAIT_100_US);
4713                txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
4714                txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
4715        } while (--poll_ms && (txtdh != txtdt));
4716        if (!poll_ms)
4717                PMD_INIT_LOG(ERR,
4718                        "Tx Queue %d is not empty when stopping.",
4719                        tx_queue_id);
4720
4721        txgbe_dev_save_tx_queue(hw, txq->reg_idx);
4722        wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
4723
4724        /* Wait until TX Enable bit clear */
4725        poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4726        do {
4727                rte_delay_ms(1);
4728                txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
4729        } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
4730        if (!poll_ms)
4731                PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
4732                        tx_queue_id);
4733
4734        rte_delay_us(RTE_TXGBE_WAIT_100_US);
4735        txgbe_dev_store_tx_queue(hw, txq->reg_idx);
4736
4737        if (txq->ops != NULL) {
4738                txq->ops->release_mbufs(txq);
4739                txq->ops->reset(txq);
4740        }
4741        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4742
4743        return 0;
4744}
4745
4746void
4747txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4748        struct rte_eth_rxq_info *qinfo)
4749{
4750        struct txgbe_rx_queue *rxq;
4751
4752        rxq = dev->data->rx_queues[queue_id];
4753
4754        qinfo->mp = rxq->mb_pool;
4755        qinfo->scattered_rx = dev->data->scattered_rx;
4756        qinfo->nb_desc = rxq->nb_rx_desc;
4757
4758        qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4759        qinfo->conf.rx_drop_en = rxq->drop_en;
4760        qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4761        qinfo->conf.offloads = rxq->offloads;
4762}
4763
4764void
4765txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4766        struct rte_eth_txq_info *qinfo)
4767{
4768        struct txgbe_tx_queue *txq;
4769
4770        txq = dev->data->tx_queues[queue_id];
4771
4772        qinfo->nb_desc = txq->nb_tx_desc;
4773
4774        qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4775        qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4776        qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4777
4778        qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4779        qinfo->conf.offloads = txq->offloads;
4780        qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4781}
4782
4783/*
4784 * [VF] Initializes Receive Unit.
4785 */
4786int __rte_cold
4787txgbevf_dev_rx_init(struct rte_eth_dev *dev)
4788{
4789        struct txgbe_hw     *hw;
4790        struct txgbe_rx_queue *rxq;
4791        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4792        uint64_t bus_addr;
4793        uint32_t srrctl, psrtype;
4794        uint16_t buf_size;
4795        uint16_t i;
4796        int ret;
4797
4798        PMD_INIT_FUNC_TRACE();
4799        hw = TXGBE_DEV_HW(dev);
4800
4801        if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4802                PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4803                        "it should be power of 2");
4804                return -1;
4805        }
4806
4807        if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4808                PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4809                        "it should be equal to or less than %d",
4810                        hw->mac.max_rx_queues);
4811                return -1;
4812        }
4813
4814        /*
4815         * When the VF driver issues a TXGBE_VF_RESET request, the PF driver
4816         * disables the VF receipt of packets if the PF MTU is > 1500.
4817         * This is done to deal with limitations that imposes
4818         * the PF and all VFs to share the same MTU.
4819         * Then, the PF driver enables again the VF receipt of packet when
4820         * the VF driver issues a TXGBE_VF_SET_LPE request.
4821         * In the meantime, the VF device cannot be used, even if the VF driver
4822         * and the Guest VM network stack are ready to accept packets with a
4823         * size up to the PF MTU.
4824         * As a work-around to this PF behaviour, force the call to
4825         * txgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4826         * VF packets received can work in all cases.
4827         */
4828        if (txgbevf_rlpml_set_vf(hw,
4829            (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
4830                PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
4831                             dev->data->dev_conf.rxmode.max_rx_pkt_len);
4832                return -EINVAL;
4833        }
4834
4835        /*
4836         * Assume no header split and no VLAN strip support
4837         * on any Rx queue first .
4838         */
4839        rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
4840
4841        /* Set PSR type for VF RSS according to max Rx queue */
4842        psrtype = TXGBE_VFPLCFG_PSRL4HDR |
4843                  TXGBE_VFPLCFG_PSRL4HDR |
4844                  TXGBE_VFPLCFG_PSRL2HDR |
4845                  TXGBE_VFPLCFG_PSRTUNHDR |
4846                  TXGBE_VFPLCFG_PSRTUNMAC;
4847        wr32(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_PSR(psrtype));
4848
4849        /* Setup RX queues */
4850        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4851                rxq = dev->data->rx_queues[i];
4852
4853                /* Allocate buffers for descriptor rings */
4854                ret = txgbe_alloc_rx_queue_mbufs(rxq);
4855                if (ret)
4856                        return ret;
4857
4858                /* Setup the Base and Length of the Rx Descriptor Rings */
4859                bus_addr = rxq->rx_ring_phys_addr;
4860
4861                wr32(hw, TXGBE_RXBAL(i),
4862                                (uint32_t)(bus_addr & BIT_MASK32));
4863                wr32(hw, TXGBE_RXBAH(i),
4864                                (uint32_t)(bus_addr >> 32));
4865                wr32(hw, TXGBE_RXRP(i), 0);
4866                wr32(hw, TXGBE_RXWP(i), 0);
4867
4868                /* Configure the RXCFG register */
4869                srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
4870
4871                /* Set if packets are dropped when no descriptors available */
4872                if (rxq->drop_en)
4873                        srrctl |= TXGBE_RXCFG_DROP;
4874
4875                /*
4876                 * Configure the RX buffer size in the PKTLEN field of
4877                 * the RXCFG register of the queue.
4878                 * The value is in 1 KB resolution. Valid values can be from
4879                 * 1 KB to 16 KB.
4880                 */
4881                buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4882                        RTE_PKTMBUF_HEADROOM);
4883                buf_size = ROUND_UP(buf_size, 1 << 10);
4884                srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
4885
4886                /*
4887                 * VF modification to write virtual function RXCFG register
4888                 */
4889                wr32(hw, TXGBE_RXCFG(i), srrctl);
4890
4891                if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
4892                    /* It adds dual VLAN length for supporting dual VLAN */
4893                    (rxmode->max_rx_pkt_len +
4894                                2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
4895                        if (!dev->data->scattered_rx)
4896                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4897                        dev->data->scattered_rx = 1;
4898                }
4899
4900                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4901                        rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
4902        }
4903
4904        /*
4905         * Device configured with multiple RX queues.
4906         */
4907        txgbe_dev_mq_rx_configure(dev);
4908
4909        txgbe_set_rx_function(dev);
4910
4911        return 0;
4912}
4913
4914/*
4915 * [VF] Initializes Transmit Unit.
4916 */
4917void __rte_cold
4918txgbevf_dev_tx_init(struct rte_eth_dev *dev)
4919{
4920        struct txgbe_hw     *hw;
4921        struct txgbe_tx_queue *txq;
4922        uint64_t bus_addr;
4923        uint16_t i;
4924
4925        PMD_INIT_FUNC_TRACE();
4926        hw = TXGBE_DEV_HW(dev);
4927
4928        /* Setup the Base and Length of the Tx Descriptor Rings */
4929        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4930                txq = dev->data->tx_queues[i];
4931                bus_addr = txq->tx_ring_phys_addr;
4932                wr32(hw, TXGBE_TXBAL(i),
4933                                (uint32_t)(bus_addr & BIT_MASK32));
4934                wr32(hw, TXGBE_TXBAH(i),
4935                                (uint32_t)(bus_addr >> 32));
4936                wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_BUFLEN_MASK,
4937                        TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
4938                /* Setup the HW Tx Head and TX Tail descriptor pointers */
4939                wr32(hw, TXGBE_TXRP(i), 0);
4940                wr32(hw, TXGBE_TXWP(i), 0);
4941        }
4942}
4943
4944/*
4945 * [VF] Start Transmit and Receive Units.
4946 */
4947void __rte_cold
4948txgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4949{
4950        struct txgbe_hw     *hw;
4951        struct txgbe_tx_queue *txq;
4952        struct txgbe_rx_queue *rxq;
4953        uint32_t txdctl;
4954        uint32_t rxdctl;
4955        uint16_t i;
4956        int poll_ms;
4957
4958        PMD_INIT_FUNC_TRACE();
4959        hw = TXGBE_DEV_HW(dev);
4960
4961        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4962                txq = dev->data->tx_queues[i];
4963                /* Setup Transmit Threshold Registers */
4964                wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
4965                      TXGBE_TXCFG_HTHRESH_MASK |
4966                      TXGBE_TXCFG_WTHRESH_MASK,
4967                      TXGBE_TXCFG_HTHRESH(txq->hthresh) |
4968                      TXGBE_TXCFG_WTHRESH(txq->wthresh));
4969        }
4970
4971        for (i = 0; i < dev->data->nb_tx_queues; i++) {
4972                wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
4973
4974                poll_ms = 10;
4975                /* Wait until TX Enable ready */
4976                do {
4977                        rte_delay_ms(1);
4978                        txdctl = rd32(hw, TXGBE_TXCFG(i));
4979                } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
4980                if (!poll_ms)
4981                        PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4982        }
4983        for (i = 0; i < dev->data->nb_rx_queues; i++) {
4984                rxq = dev->data->rx_queues[i];
4985
4986                wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ENA, TXGBE_RXCFG_ENA);
4987
4988                /* Wait until RX Enable ready */
4989                poll_ms = 10;
4990                do {
4991                        rte_delay_ms(1);
4992                        rxdctl = rd32(hw, TXGBE_RXCFG(i));
4993                } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
4994                if (!poll_ms)
4995                        PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
4996                rte_wmb();
4997                wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1);
4998        }
4999}
5000
5001int
5002txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
5003                    const struct rte_flow_action_rss *in)
5004{
5005        if (in->key_len > RTE_DIM(out->key) ||
5006            in->queue_num > RTE_DIM(out->queue))
5007                return -EINVAL;
5008        out->conf = (struct rte_flow_action_rss){
5009                .func = in->func,
5010                .level = in->level,
5011                .types = in->types,
5012                .key_len = in->key_len,
5013                .queue_num = in->queue_num,
5014                .key = memcpy(out->key, in->key, in->key_len),
5015                .queue = memcpy(out->queue, in->queue,
5016                                sizeof(*in->queue) * in->queue_num),
5017        };
5018        return 0;
5019}
5020
5021int
5022txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5023                      const struct rte_flow_action_rss *with)
5024{
5025        return (comp->func == with->func &&
5026                comp->level == with->level &&
5027                comp->types == with->types &&
5028                comp->key_len == with->key_len &&
5029                comp->queue_num == with->queue_num &&
5030                !memcmp(comp->key, with->key, with->key_len) &&
5031                !memcmp(comp->queue, with->queue,
5032                        sizeof(*with->queue) * with->queue_num));
5033}
5034
5035int
5036txgbe_config_rss_filter(struct rte_eth_dev *dev,
5037                struct txgbe_rte_flow_rss_conf *conf, bool add)
5038{
5039        struct txgbe_hw *hw;
5040        uint32_t reta;
5041        uint16_t i;
5042        uint16_t j;
5043        struct rte_eth_rss_conf rss_conf = {
5044                .rss_key = conf->conf.key_len ?
5045                        (void *)(uintptr_t)conf->conf.key : NULL,
5046                .rss_key_len = conf->conf.key_len,
5047                .rss_hf = conf->conf.types,
5048        };
5049        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5050
5051        PMD_INIT_FUNC_TRACE();
5052        hw = TXGBE_DEV_HW(dev);
5053
5054        if (!add) {
5055                if (txgbe_action_rss_same(&filter_info->rss_info.conf,
5056                                          &conf->conf)) {
5057                        txgbe_rss_disable(dev);
5058                        memset(&filter_info->rss_info, 0,
5059                                sizeof(struct txgbe_rte_flow_rss_conf));
5060                        return 0;
5061                }
5062                return -EINVAL;
5063        }
5064
5065        if (filter_info->rss_info.conf.queue_num)
5066                return -EINVAL;
5067        /* Fill in redirection table
5068         * The byte-swap is needed because NIC registers are in
5069         * little-endian order.
5070         */
5071        reta = 0;
5072        for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
5073                if (j == conf->conf.queue_num)
5074                        j = 0;
5075                reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
5076                if ((i & 3) == 3)
5077                        wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
5078        }
5079
5080        /* Configure the RSS key and the RSS protocols used to compute
5081         * the RSS hash of input packets.
5082         */
5083        if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
5084                txgbe_rss_disable(dev);
5085                return 0;
5086        }
5087        if (rss_conf.rss_key == NULL)
5088                rss_conf.rss_key = rss_intel_key; /* Default hash key */
5089        txgbe_dev_rss_hash_update(dev, &rss_conf);
5090
5091        if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5092                return -EINVAL;
5093
5094        return 0;
5095}
5096
5097