dpdk/drivers/net/bnxt/bnxt_rxr.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2014-2021 Broadcom
   3 * All rights reserved.
   4 */
   5
   6#include <inttypes.h>
   7#include <stdbool.h>
   8
   9#include <rte_bitmap.h>
  10#include <rte_byteorder.h>
  11#include <rte_malloc.h>
  12#include <rte_memory.h>
  13#include <rte_alarm.h>
  14
  15#include "bnxt.h"
  16#include "bnxt_reps.h"
  17#include "bnxt_ring.h"
  18#include "bnxt_rxr.h"
  19#include "bnxt_rxq.h"
  20#include "hsi_struct_def_dpdk.h"
  21#include "bnxt_hwrm.h"
  22
  23#include <bnxt_tf_common.h>
  24#include <ulp_mark_mgr.h>
  25
  26/*
  27 * RX Ring handling
  28 */
  29
  30static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
  31{
  32        struct rte_mbuf *data;
  33
  34        data = rte_mbuf_raw_alloc(mb);
  35
  36        return data;
  37}
  38
  39static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
  40                                     struct bnxt_rx_ring_info *rxr,
  41                                     uint16_t raw_prod)
  42{
  43        uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
  44        struct rx_prod_pkt_bd *rxbd;
  45        struct rte_mbuf **rx_buf;
  46        struct rte_mbuf *mbuf;
  47
  48        rxbd = &rxr->rx_desc_ring[prod];
  49        rx_buf = &rxr->rx_buf_ring[prod];
  50        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
  51        if (!mbuf) {
  52                __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
  53                return -ENOMEM;
  54        }
  55
  56        *rx_buf = mbuf;
  57        mbuf->data_off = RTE_PKTMBUF_HEADROOM;
  58
  59        rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
  60
  61        return 0;
  62}
  63
  64static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
  65                                     struct bnxt_rx_ring_info *rxr,
  66                                     uint16_t raw_prod)
  67{
  68        uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod);
  69        struct rx_prod_pkt_bd *rxbd;
  70        struct rte_mbuf **rx_buf;
  71        struct rte_mbuf *mbuf;
  72
  73        rxbd = &rxr->ag_desc_ring[prod];
  74        rx_buf = &rxr->ag_buf_ring[prod];
  75        if (rxbd == NULL) {
  76                PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
  77                return -EINVAL;
  78        }
  79
  80        if (rx_buf == NULL) {
  81                PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
  82                return -EINVAL;
  83        }
  84
  85        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
  86        if (!mbuf) {
  87                __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
  88                return -ENOMEM;
  89        }
  90
  91        *rx_buf = mbuf;
  92        mbuf->data_off = RTE_PKTMBUF_HEADROOM;
  93
  94        rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
  95
  96        return 0;
  97}
  98
  99static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
 100                               struct rte_mbuf *mbuf)
 101{
 102        uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod);
 103        struct rte_mbuf **prod_rx_buf;
 104        struct rx_prod_pkt_bd *prod_bd;
 105
 106        prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
 107        prod_rx_buf = &rxr->rx_buf_ring[prod];
 108
 109        RTE_ASSERT(*prod_rx_buf == NULL);
 110        RTE_ASSERT(mbuf != NULL);
 111
 112        *prod_rx_buf = mbuf;
 113
 114        prod_bd = &rxr->rx_desc_ring[prod];
 115
 116        prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 117
 118        rxr->rx_raw_prod = raw_prod;
 119}
 120
 121static inline
 122struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
 123                                     uint16_t cons)
 124{
 125        struct rte_mbuf **cons_rx_buf;
 126        struct rte_mbuf *mbuf;
 127
 128        cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
 129        RTE_ASSERT(*cons_rx_buf != NULL);
 130        mbuf = *cons_rx_buf;
 131        *cons_rx_buf = NULL;
 132
 133        return mbuf;
 134}
 135
 136static void bnxt_rx_ring_reset(void *arg)
 137{
 138        struct bnxt *bp = arg;
 139        int i, rc = 0;
 140        struct bnxt_rx_queue *rxq;
 141
 142
 143        for (i = 0; i < (int)bp->rx_nr_rings; i++) {
 144                struct bnxt_rx_ring_info *rxr;
 145
 146                rxq = bp->rx_queues[i];
 147                if (!rxq || !rxq->in_reset)
 148                        continue;
 149
 150                rxr = rxq->rx_ring;
 151                /* Disable and flush TPA before resetting the RX ring */
 152                if (rxr->tpa_info)
 153                        bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false);
 154                rc = bnxt_hwrm_rx_ring_reset(bp, i);
 155                if (rc) {
 156                        PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
 157                        continue;
 158                }
 159
 160                bnxt_rx_queue_release_mbufs(rxq);
 161                rxr->rx_raw_prod = 0;
 162                rxr->ag_raw_prod = 0;
 163                rxr->rx_next_cons = 0;
 164                bnxt_init_one_rx_ring(rxq);
 165                bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
 166                bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
 167                if (rxr->tpa_info)
 168                        bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true);
 169
 170                rxq->in_reset = 0;
 171        }
 172}
 173
 174
 175static void bnxt_sched_ring_reset(struct bnxt_rx_queue *rxq)
 176{
 177        rxq->in_reset = 1;
 178        rte_eal_alarm_set(1, bnxt_rx_ring_reset, (void *)rxq->bp);
 179}
 180
 181static void bnxt_tpa_get_metadata(struct bnxt *bp,
 182                                  struct bnxt_tpa_info *tpa_info,
 183                                  struct rx_tpa_start_cmpl *tpa_start,
 184                                  struct rx_tpa_start_cmpl_hi *tpa_start1)
 185{
 186        tpa_info->cfa_code_valid = 0;
 187        tpa_info->vlan_valid = 0;
 188        tpa_info->hash_valid = 0;
 189        tpa_info->l4_csum_valid = 0;
 190
 191        if (likely(tpa_start->flags_type &
 192                   rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
 193                tpa_info->hash_valid = 1;
 194                tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash);
 195        }
 196
 197        if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
 198                struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start;
 199                struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 =
 200                        (void *)tpa_start1;
 201
 202                if (v2_tpa_start->agg_id &
 203                    RX_TPA_START_V2_CMPL_METADATA1_VALID) {
 204                        tpa_info->vlan_valid = 1;
 205                        tpa_info->vlan =
 206                                rte_le_to_cpu_16(v2_tpa_start1->metadata0);
 207                }
 208
 209                if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
 210                        tpa_info->l4_csum_valid = 1;
 211
 212                return;
 213        }
 214
 215        tpa_info->cfa_code_valid = 1;
 216        tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code);
 217        if (tpa_start1->flags2 &
 218            rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
 219                tpa_info->vlan_valid = 1;
 220                tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata);
 221        }
 222
 223        if (likely(tpa_start1->flags2 &
 224                   rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
 225                tpa_info->l4_csum_valid = 1;
 226}
 227
 228static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
 229                           struct rx_tpa_start_cmpl *tpa_start,
 230                           struct rx_tpa_start_cmpl_hi *tpa_start1)
 231{
 232        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 233        uint16_t agg_id;
 234        uint16_t data_cons;
 235        struct bnxt_tpa_info *tpa_info;
 236        struct rte_mbuf *mbuf;
 237
 238        agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
 239
 240        data_cons = tpa_start->opaque;
 241        tpa_info = &rxr->tpa_info[agg_id];
 242        if (unlikely(data_cons != rxr->rx_next_cons)) {
 243                PMD_DRV_LOG(ERR, "TPA cons %x, expected cons %x\n",
 244                            data_cons, rxr->rx_next_cons);
 245                bnxt_sched_ring_reset(rxq);
 246                return;
 247        }
 248
 249        mbuf = bnxt_consume_rx_buf(rxr, data_cons);
 250
 251        bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
 252
 253        tpa_info->agg_count = 0;
 254        tpa_info->mbuf = mbuf;
 255        tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
 256
 257        mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 258        mbuf->nb_segs = 1;
 259        mbuf->next = NULL;
 260        mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
 261        mbuf->data_len = mbuf->pkt_len;
 262        mbuf->port = rxq->port_id;
 263        mbuf->ol_flags = RTE_MBUF_F_RX_LRO;
 264
 265        bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
 266
 267        if (likely(tpa_info->hash_valid)) {
 268                mbuf->hash.rss = tpa_info->rss_hash;
 269                mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 270        } else if (tpa_info->cfa_code_valid) {
 271                mbuf->hash.fdir.id = tpa_info->cfa_code;
 272                mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 273        }
 274
 275        if (tpa_info->vlan_valid && BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
 276                mbuf->vlan_tci = tpa_info->vlan;
 277                mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
 278        }
 279
 280        if (likely(tpa_info->l4_csum_valid))
 281                mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 282
 283        /* recycle next mbuf */
 284        data_cons = RING_NEXT(data_cons);
 285        bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
 286
 287        rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct,
 288                                     RING_NEXT(data_cons));
 289}
 290
 291static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
 292                uint8_t agg_bufs, uint32_t raw_cp_cons)
 293{
 294        uint16_t last_cp_cons;
 295        struct rx_pkt_cmpl *agg_cmpl;
 296
 297        raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
 298        last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
 299        agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
 300        return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons,
 301                                  cpr->cp_ring_struct->ring_size);
 302}
 303
 304/* TPA consume agg buffer out of order, allocate connected data only */
 305static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
 306{
 307        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 308        uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
 309        uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
 310
 311        /* TODO batch allocation for better performance */
 312        while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
 313                if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
 314                        PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
 315                                    raw_next);
 316                        break;
 317                }
 318                rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
 319                rxr->ag_raw_prod = raw_next;
 320                raw_next = RING_NEXT(raw_next);
 321                bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
 322        }
 323
 324        return 0;
 325}
 326
 327static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
 328                         struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
 329                         uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
 330{
 331        struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 332        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 333        int i;
 334        uint16_t cp_cons, ag_cons;
 335        struct rx_pkt_cmpl *rxcmp;
 336        struct rte_mbuf *last = mbuf;
 337        bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
 338
 339        for (i = 0; i < agg_buf; i++) {
 340                struct rte_mbuf **ag_buf;
 341                struct rte_mbuf *ag_mbuf;
 342
 343                if (is_p5_tpa) {
 344                        rxcmp = (void *)&tpa_info->agg_arr[i];
 345                } else {
 346                        *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
 347                        cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
 348                        rxcmp = (struct rx_pkt_cmpl *)
 349                                        &cpr->cp_desc_ring[cp_cons];
 350                }
 351
 352#ifdef BNXT_DEBUG
 353                bnxt_dump_cmpl(cp_cons, rxcmp);
 354#endif
 355
 356                ag_cons = rxcmp->opaque;
 357                RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
 358                ag_buf = &rxr->ag_buf_ring[ag_cons];
 359                ag_mbuf = *ag_buf;
 360                RTE_ASSERT(ag_mbuf != NULL);
 361
 362                ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
 363
 364                mbuf->nb_segs++;
 365                mbuf->pkt_len += ag_mbuf->data_len;
 366
 367                last->next = ag_mbuf;
 368                last = ag_mbuf;
 369
 370                *ag_buf = NULL;
 371
 372                /*
 373                 * As aggregation buffer consumed out of order in TPA module,
 374                 * use bitmap to track freed slots to be allocated and notified
 375                 * to NIC
 376                 */
 377                rte_bitmap_set(rxr->ag_bitmap, ag_cons);
 378        }
 379        last->next = NULL;
 380        bnxt_prod_ag_mbuf(rxq);
 381        return 0;
 382}
 383
 384static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 385                           uint32_t *raw_cons, void *cmp)
 386{
 387        struct rx_pkt_cmpl *rxcmp = cmp;
 388        uint32_t tmp_raw_cons = *raw_cons;
 389        uint8_t cmp_type, agg_bufs = 0;
 390
 391        cmp_type = CMP_TYPE(rxcmp);
 392
 393        if (cmp_type == CMPL_BASE_TYPE_RX_L2) {
 394                agg_bufs = BNXT_RX_L2_AGG_BUFS(rxcmp);
 395        } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
 396                struct rx_tpa_end_cmpl *tpa_end = cmp;
 397
 398                if (BNXT_CHIP_P5(bp))
 399                        return 0;
 400
 401                agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
 402        }
 403
 404        if (agg_bufs) {
 405                if (!bnxt_agg_bufs_valid(cpr, agg_bufs, tmp_raw_cons))
 406                        return -EBUSY;
 407        }
 408        *raw_cons = tmp_raw_cons;
 409        return 0;
 410}
 411
 412static inline struct rte_mbuf *bnxt_tpa_end(
 413                struct bnxt_rx_queue *rxq,
 414                uint32_t *raw_cp_cons,
 415                struct rx_tpa_end_cmpl *tpa_end,
 416                struct rx_tpa_end_cmpl_hi *tpa_end1)
 417{
 418        struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 419        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 420        uint16_t agg_id;
 421        struct rte_mbuf *mbuf;
 422        uint8_t agg_bufs;
 423        uint8_t payload_offset;
 424        struct bnxt_tpa_info *tpa_info;
 425
 426        if (unlikely(rxq->in_reset)) {
 427                PMD_DRV_LOG(ERR, "rxq->in_reset: raw_cp_cons:%d\n",
 428                            *raw_cp_cons);
 429                bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end);
 430                return NULL;
 431        }
 432
 433        if (BNXT_CHIP_P5(rxq->bp)) {
 434                struct rx_tpa_v2_end_cmpl *th_tpa_end;
 435                struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
 436
 437                th_tpa_end = (void *)tpa_end;
 438                th_tpa_end1 = (void *)tpa_end1;
 439                agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
 440                agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
 441                payload_offset = th_tpa_end1->payload_offset;
 442        } else {
 443                agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
 444                agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
 445                if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
 446                        return NULL;
 447                payload_offset = tpa_end->payload_offset;
 448        }
 449
 450        tpa_info = &rxr->tpa_info[agg_id];
 451        mbuf = tpa_info->mbuf;
 452        RTE_ASSERT(mbuf != NULL);
 453
 454        if (agg_bufs) {
 455                bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
 456        }
 457        mbuf->l4_len = payload_offset;
 458
 459        struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
 460        RTE_ASSERT(new_data != NULL);
 461        if (!new_data) {
 462                __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
 463                return NULL;
 464        }
 465        tpa_info->mbuf = new_data;
 466
 467        return mbuf;
 468}
 469
 470uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
 471
 472static void __rte_cold
 473bnxt_init_ptype_table(void)
 474{
 475        uint32_t *pt = bnxt_ptype_table;
 476        static bool initialized;
 477        int ip6, tun, type;
 478        uint32_t l3;
 479        int i;
 480
 481        if (initialized)
 482                return;
 483
 484        for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
 485                if (i & BNXT_PTYPE_TBL_VLAN_MSK)
 486                        pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
 487                else
 488                        pt[i] = RTE_PTYPE_L2_ETHER;
 489
 490                ip6 = !!(i & BNXT_PTYPE_TBL_IP_VER_MSK);
 491                tun = !!(i & BNXT_PTYPE_TBL_TUN_MSK);
 492                type = (i & BNXT_PTYPE_TBL_TYPE_MSK) >> BNXT_PTYPE_TBL_TYPE_SFT;
 493
 494                if (!tun && !ip6)
 495                        l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
 496                else if (!tun && ip6)
 497                        l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
 498                else if (tun && !ip6)
 499                        l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
 500                else
 501                        l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
 502
 503                switch (type) {
 504                case BNXT_PTYPE_TBL_TYPE_ICMP:
 505                        if (tun)
 506                                pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
 507                        else
 508                                pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
 509                        break;
 510                case BNXT_PTYPE_TBL_TYPE_TCP:
 511                        if (tun)
 512                                pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
 513                        else
 514                                pt[i] |= l3 | RTE_PTYPE_L4_TCP;
 515                        break;
 516                case BNXT_PTYPE_TBL_TYPE_UDP:
 517                        if (tun)
 518                                pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
 519                        else
 520                                pt[i] |= l3 | RTE_PTYPE_L4_UDP;
 521                        break;
 522                case BNXT_PTYPE_TBL_TYPE_IP:
 523                        pt[i] |= l3;
 524                        break;
 525                }
 526        }
 527        initialized = true;
 528}
 529
 530static uint32_t
 531bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
 532{
 533        uint32_t flags_type, flags2;
 534        uint8_t index;
 535
 536        flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
 537        flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
 538
 539        /* Validate ptype table indexing at build time. */
 540        bnxt_check_ptype_constants();
 541
 542        /*
 543         * Index format:
 544         *     bit 0: Set if IP tunnel encapsulated packet.
 545         *     bit 1: Set if IPv6 packet, clear if IPv4.
 546         *     bit 2: Set if VLAN tag present.
 547         *     bits 3-6: Four-bit hardware packet type field.
 548         */
 549        index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) |
 550                BNXT_CMPL_VLAN_TUN_TO_IDX(flags2) |
 551                BNXT_CMPL_IP_VER_TO_IDX(flags2);
 552
 553        return bnxt_ptype_table[index];
 554}
 555
 556static void __rte_cold
 557bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
 558{
 559        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 560        struct rte_eth_conf *dev_conf;
 561        bool outer_cksum_enabled;
 562        uint64_t offloads;
 563        uint32_t *pt;
 564        int i;
 565
 566        dev_conf = &rxq->bp->eth_dev->data->dev_conf;
 567        offloads = dev_conf->rxmode.offloads;
 568
 569        outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 570                                             RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
 571
 572        /* Initialize ol_flags table. */
 573        pt = rxr->ol_flags_table;
 574        for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
 575                pt[i] = 0;
 576
 577                if (BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
 578                        if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
 579                                pt[i] |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
 580                }
 581
 582                if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
 583                        /* Tunnel case. */
 584                        if (outer_cksum_enabled) {
 585                                if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
 586                                        pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 587
 588                                if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
 589                                        pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 590
 591                                if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
 592                                        pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
 593                        } else {
 594                                if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
 595                                        pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 596
 597                                if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
 598                                        pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 599                        }
 600                } else {
 601                        /* Non-tunnel case. */
 602                        if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
 603                                pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 604
 605                        if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
 606                                pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 607                }
 608        }
 609
 610        /* Initialize checksum error table. */
 611        pt = rxr->ol_flags_err_table;
 612        for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
 613                pt[i] = 0;
 614
 615                if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) {
 616                        /* Tunnel case. */
 617                        if (outer_cksum_enabled) {
 618                                if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
 619                                        pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
 620
 621                                if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
 622                                        pt[i] |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
 623
 624                                if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
 625                                        pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 626
 627                                if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
 628                                        pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
 629                        } else {
 630                                if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
 631                                        pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
 632
 633                                if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
 634                                        pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 635                        }
 636                } else {
 637                        /* Non-tunnel case. */
 638                        if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
 639                                pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
 640
 641                        if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
 642                                pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 643                }
 644        }
 645}
 646
 647static void
 648bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
 649                  struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)
 650{
 651        uint16_t flags_type, errors, flags;
 652        uint64_t ol_flags;
 653
 654        flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
 655
 656        flags = rte_le_to_cpu_32(rxcmp1->flags2) &
 657                                (RX_PKT_CMPL_FLAGS2_IP_CS_CALC |
 658                                 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
 659                                 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |
 660                                 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC |
 661                                 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN);
 662
 663        flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3;
 664        errors = rte_le_to_cpu_16(rxcmp1->errors_v2) &
 665                                (RX_PKT_CMPL_ERRORS_IP_CS_ERROR |
 666                                 RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
 667                                 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR |
 668                                 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
 669        errors = (errors >> 4) & flags;
 670
 671        ol_flags = rxr->ol_flags_table[flags & ~errors];
 672
 673        if (unlikely(errors)) {
 674                errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2;
 675                ol_flags |= rxr->ol_flags_err_table[errors];
 676        }
 677
 678        if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
 679                mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
 680                ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 681        }
 682
 683#ifdef RTE_LIBRTE_IEEE1588
 684        if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
 685                     RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
 686                ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
 687#endif
 688
 689        mbuf->ol_flags = ol_flags;
 690}
 691
 692#ifdef RTE_LIBRTE_IEEE1588
 693static void
 694bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
 695{
 696        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 697        uint64_t last_hwrm_time;
 698        uint64_t pkt_time = 0;
 699
 700        if (!BNXT_CHIP_P5(bp) || !ptp)
 701                return;
 702
 703        /* On Thor, Rx timestamps are provided directly in the
 704         * Rx completion records to the driver. Only 32 bits of
 705         * the timestamp is present in the completion. Driver needs
 706         * to read the current 48 bit free running timer using the
 707         * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
 708         * from the HWRM response with the lower 32 bits in the
 709         * Rx completion to produce the 48 bit timestamp for the Rx packet
 710         */
 711        last_hwrm_time = ptp->current_time;
 712        pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
 713        if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
 714                /* timer has rolled over */
 715                pkt_time += (1ULL << 32);
 716        }
 717        ptp->rx_timestamp = pkt_time;
 718}
 719#endif
 720
 721static uint32_t
 722bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
 723                          struct rte_mbuf *mbuf, uint32_t *vfr_flag)
 724{
 725        uint32_t cfa_code;
 726        uint32_t meta_fmt;
 727        uint32_t meta;
 728        bool gfid = false;
 729        uint32_t mark_id;
 730        uint32_t flags2;
 731        uint32_t gfid_support = 0;
 732        int rc;
 733
 734        if (BNXT_GFID_ENABLED(bp))
 735                gfid_support = 1;
 736
 737        cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
 738        flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
 739        meta = rte_le_to_cpu_32(rxcmp1->metadata);
 740
 741        /*
 742         * The flags field holds extra bits of info from [6:4]
 743         * which indicate if the flow is in TCAM or EM or EEM
 744         */
 745        meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
 746                BNXT_CFA_META_FMT_SHFT;
 747
 748        switch (meta_fmt) {
 749        case 0:
 750                if (gfid_support) {
 751                        /* Not an LFID or GFID, a flush cmd. */
 752                        goto skip_mark;
 753                } else {
 754                        /* LFID mode, no vlan scenario */
 755                        gfid = false;
 756                }
 757                break;
 758        case 4:
 759        case 5:
 760                /*
 761                 * EM/TCAM case
 762                 * Assume that EM doesn't support Mark due to GFID
 763                 * collisions with EEM.  Simply return without setting the mark
 764                 * in the mbuf.
 765                 */
 766                if (BNXT_CFA_META_EM_TEST(meta)) {
 767                        /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
 768                        gfid = true;
 769                        meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
 770                        cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
 771                } else {
 772                        /*
 773                         * It is a TCAM entry, so it is an LFID.
 774                         * The TCAM IDX and Mode can also be determined
 775                         * by decoding the meta_data. We are not
 776                         * using these for now.
 777                         */
 778                }
 779                break;
 780        case 6:
 781        case 7:
 782                /* EEM Case, only using gfid in EEM for now. */
 783                gfid = true;
 784
 785                /*
 786                 * For EEM flows, The first part of cfa_code is 16 bits.
 787                 * The second part is embedded in the
 788                 * metadata field from bit 19 onwards. The driver needs to
 789                 * ignore the first 19 bits of metadata and use the next 12
 790                 * bits as higher 12 bits of cfa_code.
 791                 */
 792                meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
 793                cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
 794                break;
 795        default:
 796                /* For other values, the cfa_code is assumed to be an LFID. */
 797                break;
 798        }
 799
 800        rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
 801                                  cfa_code, vfr_flag, &mark_id);
 802        if (!rc) {
 803                /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
 804                if (vfr_flag && *vfr_flag)
 805                        return mark_id;
 806                /* Got the mark, write it to the mbuf and return */
 807                mbuf->hash.fdir.hi = mark_id;
 808                *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
 809                mbuf->hash.fdir.id = rxcmp1->cfa_code;
 810                mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 811                return mark_id;
 812        }
 813
 814skip_mark:
 815        mbuf->hash.fdir.hi = 0;
 816        mbuf->hash.fdir.id = 0;
 817
 818        return 0;
 819}
 820
 821void bnxt_set_mark_in_mbuf(struct bnxt *bp,
 822                           struct rx_pkt_cmpl_hi *rxcmp1,
 823                           struct rte_mbuf *mbuf)
 824{
 825        uint32_t cfa_code = 0;
 826
 827        if (unlikely(bp->mark_table == NULL))
 828                return;
 829
 830        cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
 831        if (!cfa_code)
 832                return;
 833
 834        if (cfa_code && !bp->mark_table[cfa_code].valid)
 835                return;
 836
 837        mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
 838        mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 839}
 840
 841static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
 842                       struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
 843{
 844        struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 845        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 846        struct rx_pkt_cmpl *rxcmp;
 847        struct rx_pkt_cmpl_hi *rxcmp1;
 848        uint32_t tmp_raw_cons = *raw_cons;
 849        uint16_t cons, raw_prod, cp_cons =
 850            RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
 851        struct rte_mbuf *mbuf;
 852        int rc = 0;
 853        uint8_t agg_buf = 0;
 854        uint16_t cmp_type;
 855        uint32_t vfr_flag = 0, mark_id = 0;
 856        struct bnxt *bp = rxq->bp;
 857
 858        rxcmp = (struct rx_pkt_cmpl *)
 859            &cpr->cp_desc_ring[cp_cons];
 860
 861        cmp_type = CMP_TYPE(rxcmp);
 862
 863        if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
 864                struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
 865                uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
 866                struct bnxt_tpa_info *tpa_info;
 867
 868                tpa_info = &rxr->tpa_info[agg_id];
 869                RTE_ASSERT(tpa_info->agg_count < 16);
 870                tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
 871                rc = -EINVAL; /* Continue w/o new mbuf */
 872                goto next_rx;
 873        }
 874
 875        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
 876        cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
 877        rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
 878
 879        if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons,
 880                                cpr->cp_ring_struct->ring_size))
 881                return -EBUSY;
 882
 883        if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
 884            cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) {
 885                bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
 886                               (struct rx_tpa_start_cmpl_hi *)rxcmp1);
 887                rc = -EINVAL; /* Continue w/o new mbuf */
 888                goto next_rx;
 889        } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
 890                mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
 891                                   (struct rx_tpa_end_cmpl *)rxcmp,
 892                                   (struct rx_tpa_end_cmpl_hi *)rxcmp1);
 893                if (unlikely(!mbuf))
 894                        return -EBUSY;
 895                *rx_pkt = mbuf;
 896                goto next_rx;
 897        } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) &&
 898                   (cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) {
 899                rc = -EINVAL;
 900                goto next_rx;
 901        }
 902
 903        agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp);
 904        if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
 905                return -EBUSY;
 906
 907        raw_prod = rxr->rx_raw_prod;
 908
 909        cons = rxcmp->opaque;
 910        if (unlikely(cons != rxr->rx_next_cons)) {
 911                bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
 912                PMD_DRV_LOG(ERR, "RX cons %x != expected cons %x\n",
 913                            cons, rxr->rx_next_cons);
 914                bnxt_sched_ring_reset(rxq);
 915                rc = -EBUSY;
 916                goto next_rx;
 917        }
 918        mbuf = bnxt_consume_rx_buf(rxr, cons);
 919        if (mbuf == NULL)
 920                return -EBUSY;
 921
 922        mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 923        mbuf->nb_segs = 1;
 924        mbuf->next = NULL;
 925        mbuf->pkt_len = rxcmp->len;
 926        mbuf->data_len = mbuf->pkt_len;
 927        mbuf->port = rxq->port_id;
 928
 929#ifdef RTE_LIBRTE_IEEE1588
 930        if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
 931                      RX_PKT_CMPL_FLAGS_MASK) ==
 932                     RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
 933                bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
 934#endif
 935
 936        if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
 937                bnxt_parse_csum_v2(mbuf, rxcmp1);
 938                bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1);
 939                bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1);
 940                /* TODO Add support for cfa_code parsing */
 941                goto reuse_rx_mbuf;
 942        }
 943
 944        bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf);
 945
 946        mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
 947
 948        bnxt_set_vlan(rxcmp1, mbuf);
 949
 950        if (BNXT_TRUFLOW_EN(bp))
 951                mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
 952                                                    &vfr_flag);
 953        else
 954                bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
 955
 956reuse_rx_mbuf:
 957        if (agg_buf)
 958                bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
 959
 960#ifdef BNXT_DEBUG
 961        if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
 962                /* Re-install the mbuf back to the rx ring */
 963                bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
 964
 965                rc = -EIO;
 966                goto next_rx;
 967        }
 968#endif
 969        /*
 970         * TODO: Redesign this....
 971         * If the allocation fails, the packet does not get received.
 972         * Simply returning this will result in slowly falling behind
 973         * on the producer ring buffers.
 974         * Instead, "filling up" the producer just before ringing the
 975         * doorbell could be a better solution since it will let the
 976         * producer ring starve until memory is available again pushing
 977         * the drops into hardware and getting them out of the driver
 978         * allowing recovery to a full producer ring.
 979         *
 980         * This could also help with cache usage by preventing per-packet
 981         * calls in favour of a tight loop with the same function being called
 982         * in it.
 983         */
 984        raw_prod = RING_NEXT(raw_prod);
 985        if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
 986                PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
 987                            raw_prod);
 988                rc = -ENOMEM;
 989                goto rx;
 990        }
 991        rxr->rx_raw_prod = raw_prod;
 992rx:
 993        rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, RING_NEXT(cons));
 994
 995        if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
 996            vfr_flag) {
 997                bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf);
 998                /* Now return an error so that nb_rx_pkts is not
 999                 * incremented.
1000                 * This packet was meant to be given to the representor.
1001                 * So no need to account the packet and give it to
1002                 * parent Rx burst function.
1003                 */
1004                rc = -ENODEV;
1005                goto next_rx;
1006        }
1007        /*
1008         * All MBUFs are allocated with the same size under DPDK,
1009         * no optimization for rx_copy_thresh
1010         */
1011        *rx_pkt = mbuf;
1012
1013next_rx:
1014
1015        *raw_cons = tmp_raw_cons;
1016
1017        return rc;
1018}
1019
1020uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1021                               uint16_t nb_pkts)
1022{
1023        struct bnxt_rx_queue *rxq = rx_queue;
1024        struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1025        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1026        uint16_t rx_raw_prod = rxr->rx_raw_prod;
1027        uint16_t ag_raw_prod = rxr->ag_raw_prod;
1028        uint32_t raw_cons = cpr->cp_raw_cons;
1029        bool alloc_failed = false;
1030        uint32_t cons;
1031        int nb_rx_pkts = 0;
1032        int nb_rep_rx_pkts = 0;
1033        struct rx_pkt_cmpl *rxcmp;
1034        int rc = 0;
1035        bool evt = false;
1036
1037        if (unlikely(is_bnxt_in_error(rxq->bp)))
1038                return 0;
1039
1040        /* If Rx Q was stopped return */
1041        if (unlikely(!rxq->rx_started))
1042                return 0;
1043
1044#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1045        /*
1046         * Replenish buffers if needed when a transition has been made from
1047         * vector- to non-vector- receive processing.
1048         */
1049        while (unlikely(rxq->rxrearm_nb)) {
1050                if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
1051                        rxr->rx_raw_prod = rxq->rxrearm_start;
1052                        bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1053                        rxq->rxrearm_start++;
1054                        rxq->rxrearm_nb--;
1055                } else {
1056                        /* Retry allocation on next call. */
1057                        break;
1058                }
1059        }
1060#endif
1061
1062        /* Handle RX burst request */
1063        while (1) {
1064                cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1065                rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1066
1067                if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons,
1068                                        cpr->cp_ring_struct->ring_size))
1069                        break;
1070                if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
1071                        PMD_DRV_LOG(ERR, "Rx flush done\n");
1072                } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
1073                     (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
1074                        rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
1075                        if (!rc)
1076                                nb_rx_pkts++;
1077                        else if (rc == -EBUSY)  /* partial completion */
1078                                break;
1079                        else if (rc == -ENODEV) /* completion for representor */
1080                                nb_rep_rx_pkts++;
1081                        else if (rc == -ENOMEM) {
1082                                nb_rx_pkts++;
1083                                alloc_failed = true;
1084                        }
1085                } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
1086                        evt =
1087                        bnxt_event_hwrm_resp_handler(rxq->bp,
1088                                                     (struct cmpl_base *)rxcmp);
1089                        /* If the async event is Fatal error, return */
1090                        if (unlikely(is_bnxt_in_error(rxq->bp)))
1091                                goto done;
1092                }
1093
1094                raw_cons = NEXT_RAW_CMP(raw_cons);
1095                if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt)
1096                        break;
1097        }
1098
1099        if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
1100                /*
1101                 * For PMD, there is no need to keep on pushing to REARM
1102                 * the doorbell if there are no new completions
1103                 */
1104                goto done;
1105        }
1106
1107        cpr->cp_raw_cons = raw_cons;
1108        /* Ring the completion queue doorbell. */
1109        bnxt_db_cq(cpr);
1110
1111        /* Ring the receive descriptor doorbell. */
1112        if (rx_raw_prod != rxr->rx_raw_prod)
1113                bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1114
1115        /* Ring the AGG ring DB */
1116        if (ag_raw_prod != rxr->ag_raw_prod)
1117                bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
1118
1119        /* Attempt to alloc Rx buf in case of a previous allocation failure. */
1120        if (alloc_failed) {
1121                int cnt;
1122
1123                rx_raw_prod = RING_NEXT(rx_raw_prod);
1124                for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) {
1125                        struct rte_mbuf **rx_buf;
1126                        uint16_t ndx;
1127
1128                        ndx = RING_IDX(rxr->rx_ring_struct, rx_raw_prod + cnt);
1129                        rx_buf = &rxr->rx_buf_ring[ndx];
1130
1131                        /* Buffer already allocated for this index. */
1132                        if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
1133                                continue;
1134
1135                        /* This slot is empty. Alloc buffer for Rx */
1136                        if (!bnxt_alloc_rx_data(rxq, rxr, rx_raw_prod + cnt)) {
1137                                rxr->rx_raw_prod = rx_raw_prod + cnt;
1138                                bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1139                        } else {
1140                                PMD_DRV_LOG(ERR, "Alloc  mbuf failed\n");
1141                                break;
1142                        }
1143                }
1144        }
1145
1146done:
1147        return nb_rx_pkts;
1148}
1149
1150void bnxt_free_rx_rings(struct bnxt *bp)
1151{
1152        int i;
1153        struct bnxt_rx_queue *rxq;
1154
1155        if (!bp->rx_queues)
1156                return;
1157
1158        for (i = 0; i < (int)bp->rx_nr_rings; i++) {
1159                rxq = bp->rx_queues[i];
1160                if (!rxq)
1161                        continue;
1162
1163                bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
1164                rte_free(rxq->rx_ring->rx_ring_struct);
1165
1166                /* Free the Aggregator ring */
1167                bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
1168                rte_free(rxq->rx_ring->ag_ring_struct);
1169                rxq->rx_ring->ag_ring_struct = NULL;
1170
1171                rte_free(rxq->rx_ring);
1172
1173                bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
1174                rte_free(rxq->cp_ring->cp_ring_struct);
1175                rte_free(rxq->cp_ring);
1176
1177                rte_memzone_free(rxq->mz);
1178                rxq->mz = NULL;
1179
1180                rte_free(rxq);
1181                bp->rx_queues[i] = NULL;
1182        }
1183}
1184
1185int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
1186{
1187        struct bnxt_cp_ring_info *cpr;
1188        struct bnxt_rx_ring_info *rxr;
1189        struct bnxt_ring *ring;
1190
1191        rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
1192
1193        if (rxq->rx_ring != NULL) {
1194                rxr = rxq->rx_ring;
1195        } else {
1196
1197                rxr = rte_zmalloc_socket("bnxt_rx_ring",
1198                                         sizeof(struct bnxt_rx_ring_info),
1199                                         RTE_CACHE_LINE_SIZE, socket_id);
1200                if (rxr == NULL)
1201                        return -ENOMEM;
1202                rxq->rx_ring = rxr;
1203        }
1204
1205        if (rxr->rx_ring_struct == NULL) {
1206                ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1207                                           sizeof(struct bnxt_ring),
1208                                           RTE_CACHE_LINE_SIZE, socket_id);
1209                if (ring == NULL)
1210                        return -ENOMEM;
1211                rxr->rx_ring_struct = ring;
1212                ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
1213                ring->ring_mask = ring->ring_size - 1;
1214                ring->bd = (void *)rxr->rx_desc_ring;
1215                ring->bd_dma = rxr->rx_desc_mapping;
1216
1217                /* Allocate extra rx ring entries for vector rx. */
1218                ring->vmem_size = sizeof(struct rte_mbuf *) *
1219                                  (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES);
1220
1221                ring->vmem = (void **)&rxr->rx_buf_ring;
1222                ring->fw_ring_id = INVALID_HW_RING_ID;
1223        }
1224
1225        if (rxq->cp_ring != NULL) {
1226                cpr = rxq->cp_ring;
1227        } else {
1228                cpr = rte_zmalloc_socket("bnxt_rx_ring",
1229                                         sizeof(struct bnxt_cp_ring_info),
1230                                         RTE_CACHE_LINE_SIZE, socket_id);
1231                if (cpr == NULL)
1232                        return -ENOMEM;
1233                rxq->cp_ring = cpr;
1234        }
1235
1236        if (cpr->cp_ring_struct == NULL) {
1237                ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1238                                           sizeof(struct bnxt_ring),
1239                                           RTE_CACHE_LINE_SIZE, socket_id);
1240                if (ring == NULL)
1241                        return -ENOMEM;
1242                cpr->cp_ring_struct = ring;
1243
1244                /* Allocate two completion slots per entry in desc ring. */
1245                ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
1246                if (bnxt_need_agg_ring(rxq->bp->eth_dev))
1247                        ring->ring_size *= AGG_RING_SIZE_FACTOR;
1248
1249                ring->ring_size = rte_align32pow2(ring->ring_size);
1250                ring->ring_mask = ring->ring_size - 1;
1251                ring->bd = (void *)cpr->cp_desc_ring;
1252                ring->bd_dma = cpr->cp_desc_mapping;
1253                ring->vmem_size = 0;
1254                ring->vmem = NULL;
1255                ring->fw_ring_id = INVALID_HW_RING_ID;
1256        }
1257
1258        if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
1259                return 0;
1260
1261        rxr = rxq->rx_ring;
1262        /* Allocate Aggregator rings */
1263        ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1264                                   sizeof(struct bnxt_ring),
1265                                   RTE_CACHE_LINE_SIZE, socket_id);
1266        if (ring == NULL)
1267                return -ENOMEM;
1268        rxr->ag_ring_struct = ring;
1269        ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
1270                                          AGG_RING_SIZE_FACTOR);
1271        ring->ring_mask = ring->ring_size - 1;
1272        ring->bd = (void *)rxr->ag_desc_ring;
1273        ring->bd_dma = rxr->ag_desc_mapping;
1274        ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
1275        ring->vmem = (void **)&rxr->ag_buf_ring;
1276        ring->fw_ring_id = INVALID_HW_RING_ID;
1277
1278        return 0;
1279}
1280
1281static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1282                            uint16_t len)
1283{
1284        uint32_t j;
1285        struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1286
1287        if (!rx_bd_ring)
1288                return;
1289        for (j = 0; j < ring->ring_size; j++) {
1290                rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1291                rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1292                rx_bd_ring[j].opaque = j;
1293        }
1294}
1295
1296int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1297{
1298        struct bnxt_rx_ring_info *rxr;
1299        struct bnxt_ring *ring;
1300        uint32_t raw_prod, type;
1301        unsigned int i;
1302        uint16_t size;
1303
1304        /* Initialize packet type table. */
1305        bnxt_init_ptype_table();
1306
1307        size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1308        size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1309
1310        type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
1311
1312        rxr = rxq->rx_ring;
1313        ring = rxr->rx_ring_struct;
1314        bnxt_init_rxbds(ring, type, size);
1315
1316        /* Initialize offload flags parsing table. */
1317        bnxt_init_ol_flags_tables(rxq);
1318
1319        raw_prod = rxr->rx_raw_prod;
1320        for (i = 0; i < ring->ring_size; i++) {
1321                if (unlikely(!rxr->rx_buf_ring[i])) {
1322                        if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
1323                                PMD_DRV_LOG(WARNING,
1324                                            "RxQ %d allocated %d of %d mbufs\n",
1325                                            rxq->queue_id, i, ring->ring_size);
1326                                return -ENOMEM;
1327                        }
1328                }
1329                rxr->rx_raw_prod = raw_prod;
1330                raw_prod = RING_NEXT(raw_prod);
1331        }
1332
1333        /* Initialize dummy mbuf pointers for vector mode rx. */
1334        for (i = ring->ring_size;
1335             i < ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES; i++) {
1336                rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
1337        }
1338
1339        /* Explicitly reset this driver internal tracker on a ring init */
1340        rxr->rx_next_cons = 0;
1341
1342        if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
1343                return 0;
1344
1345        ring = rxr->ag_ring_struct;
1346        type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1347        bnxt_init_rxbds(ring, type, size);
1348        raw_prod = rxr->ag_raw_prod;
1349
1350        for (i = 0; i < ring->ring_size; i++) {
1351                if (unlikely(!rxr->ag_buf_ring[i])) {
1352                        if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
1353                                PMD_DRV_LOG(WARNING,
1354                                            "RxQ %d allocated %d of %d mbufs\n",
1355                                            rxq->queue_id, i, ring->ring_size);
1356                                return -ENOMEM;
1357                        }
1358                }
1359                rxr->ag_raw_prod = raw_prod;
1360                raw_prod = RING_NEXT(raw_prod);
1361        }
1362        PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1363
1364        if (rxr->tpa_info) {
1365                unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1366
1367                for (i = 0; i < max_aggs; i++) {
1368                        if (unlikely(!rxr->tpa_info[i].mbuf)) {
1369                                rxr->tpa_info[i].mbuf =
1370                                        __bnxt_alloc_rx_data(rxq->mb_pool);
1371                                if (!rxr->tpa_info[i].mbuf) {
1372                                        __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
1373                                                        __ATOMIC_RELAXED);
1374                                        return -ENOMEM;
1375                                }
1376                        }
1377                }
1378        }
1379        PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
1380
1381        return 0;
1382}
1383
1384/* Sweep the Rx completion queue till HWRM_DONE for ring flush is received.
1385 * The mbufs will not be freed in this call.
1386 * They will be freed during ring free as a part of mem cleanup.
1387 */
1388int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
1389{
1390        struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
1391        uint32_t ring_mask = cp_ring_struct->ring_mask;
1392        uint32_t raw_cons = cpr->cp_raw_cons;
1393        struct rx_pkt_cmpl *rxcmp;
1394        uint32_t nb_rx = 0;
1395        uint32_t cons;
1396
1397        do {
1398                cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1399                rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1400
1401                if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, ring_mask + 1))
1402                        break;
1403
1404                if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
1405                        return 1;
1406
1407                raw_cons = NEXT_RAW_CMP(raw_cons);
1408                nb_rx++;
1409        } while (nb_rx < ring_mask);
1410
1411        cpr->cp_raw_cons = raw_cons;
1412
1413        /* Ring the completion queue doorbell. */
1414        bnxt_db_cq(cpr);
1415
1416        return 0;
1417}
1418