dpdk/drivers/net/octeontx2/otx2_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2019 Marvell International Ltd.
   3 */
   4
   5#include <inttypes.h>
   6
   7#include <ethdev_pci.h>
   8#include <rte_io.h>
   9#include <rte_malloc.h>
  10#include <rte_mbuf.h>
  11#include <rte_mbuf_pool_ops.h>
  12#include <rte_mempool.h>
  13
  14#include "otx2_ethdev.h"
  15#include "otx2_ethdev_sec.h"
  16
  17static inline uint64_t
  18nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
  19{
  20        uint64_t capa = NIX_RX_OFFLOAD_CAPA;
  21
  22        if (otx2_dev_is_vf(dev) ||
  23            dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
  24                capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
  25
  26        return capa;
  27}
  28
  29static inline uint64_t
  30nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
  31{
  32        uint64_t capa = NIX_TX_OFFLOAD_CAPA;
  33
  34        /* TSO not supported for earlier chip revisions */
  35        if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
  36                capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
  37                          RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
  38                          RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
  39                          RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
  40        return capa;
  41}
  42
  43static const struct otx2_dev_ops otx2_dev_ops = {
  44        .link_status_update = otx2_eth_dev_link_status_update,
  45        .ptp_info_update = otx2_eth_dev_ptp_info_update,
  46        .link_status_get = otx2_eth_dev_link_status_get,
  47};
  48
  49static int
  50nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
  51{
  52        struct otx2_mbox *mbox = dev->mbox;
  53        struct nix_lf_alloc_req *req;
  54        struct nix_lf_alloc_rsp *rsp;
  55        int rc;
  56
  57        req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
  58        req->rq_cnt = nb_rxq;
  59        req->sq_cnt = nb_txq;
  60        req->cq_cnt = nb_rxq;
  61        /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
  62        RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
  63        req->xqe_sz = NIX_XQESZ_W16;
  64        req->rss_sz = dev->rss_info.rss_size;
  65        req->rss_grps = NIX_RSS_GRPS;
  66        req->npa_func = otx2_npa_pf_func_get();
  67        req->sso_func = otx2_sso_pf_func_get();
  68        req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
  69        if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
  70                         RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
  71                req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
  72                req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
  73        }
  74        req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */)             |
  75                        BIT_ULL(33 /* Outer L2 Length */)     |
  76                        BIT_ULL(38 /* Inner L4 UDP Length */) |
  77                        BIT_ULL(39 /* Inner L3 Length */)     |
  78                        BIT_ULL(40 /* Outer L4 UDP Length */) |
  79                        BIT_ULL(41 /* Outer L3 Length */));
  80
  81        if (dev->rss_tag_as_xor == 0)
  82                req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
  83
  84        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
  85        if (rc)
  86                return rc;
  87
  88        dev->sqb_size = rsp->sqb_size;
  89        dev->tx_chan_base = rsp->tx_chan_base;
  90        dev->rx_chan_base = rsp->rx_chan_base;
  91        dev->rx_chan_cnt = rsp->rx_chan_cnt;
  92        dev->tx_chan_cnt = rsp->tx_chan_cnt;
  93        dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
  94        dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
  95        dev->lf_tx_stats = rsp->lf_tx_stats;
  96        dev->lf_rx_stats = rsp->lf_rx_stats;
  97        dev->cints = rsp->cints;
  98        dev->qints = rsp->qints;
  99        dev->npc_flow.channel = dev->rx_chan_base;
 100        dev->ptp_en = rsp->hw_rx_tstamp_en;
 101
 102        return 0;
 103}
 104
 105static int
 106nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
 107{
 108        struct otx2_mbox *mbox = dev->mbox;
 109        struct npc_set_pkind *req;
 110        struct msg_resp *rsp;
 111        int rc;
 112
 113        if (dev->npc_flow.switch_header_type == 0)
 114                return 0;
 115
 116        /* Notify AF about higig2 config */
 117        req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
 118        req->mode = dev->npc_flow.switch_header_type;
 119        if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
 120                req->mode = OTX2_PRIV_FLAGS_CUSTOM;
 121                req->pkind = NPC_RX_CHLEN90B_PKIND;
 122        } else if (dev->npc_flow.switch_header_type ==
 123                   OTX2_PRIV_FLAGS_CH_LEN_24B) {
 124                req->mode = OTX2_PRIV_FLAGS_CUSTOM;
 125                req->pkind = NPC_RX_CHLEN24B_PKIND;
 126        } else if (dev->npc_flow.switch_header_type ==
 127                   OTX2_PRIV_FLAGS_EXDSA) {
 128                req->mode = OTX2_PRIV_FLAGS_CUSTOM;
 129                req->pkind = NPC_RX_EXDSA_PKIND;
 130        } else if (dev->npc_flow.switch_header_type ==
 131                   OTX2_PRIV_FLAGS_VLAN_EXDSA) {
 132                req->mode = OTX2_PRIV_FLAGS_CUSTOM;
 133                req->pkind = NPC_RX_VLAN_EXDSA_PKIND;
 134        }
 135
 136        if (enable == 0)
 137                req->mode = OTX2_PRIV_FLAGS_DEFAULT;
 138        req->dir = PKIND_RX;
 139        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
 140        if (rc)
 141                return rc;
 142        req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
 143        req->mode = dev->npc_flow.switch_header_type;
 144        if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B ||
 145            dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_24B)
 146                req->mode = OTX2_PRIV_FLAGS_DEFAULT;
 147
 148        if (enable == 0)
 149                req->mode = OTX2_PRIV_FLAGS_DEFAULT;
 150        req->dir = PKIND_TX;
 151        return otx2_mbox_process_msg(mbox, (void *)&rsp);
 152}
 153
 154static int
 155nix_lf_free(struct otx2_eth_dev *dev)
 156{
 157        struct otx2_mbox *mbox = dev->mbox;
 158        struct nix_lf_free_req *req;
 159        struct ndc_sync_op *ndc_req;
 160        int rc;
 161
 162        /* Sync NDC-NIX for LF */
 163        ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
 164        ndc_req->nix_lf_tx_sync = 1;
 165        ndc_req->nix_lf_rx_sync = 1;
 166        rc = otx2_mbox_process(mbox);
 167        if (rc)
 168                otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
 169
 170        req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
 171        /* Let AF driver free all this nix lf's
 172         * NPC entries allocated using NPC MBOX.
 173         */
 174        req->flags = 0;
 175
 176        return otx2_mbox_process(mbox);
 177}
 178
 179int
 180otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
 181{
 182        struct otx2_mbox *mbox = dev->mbox;
 183
 184        if (otx2_dev_is_vf_or_sdp(dev))
 185                return 0;
 186
 187        otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
 188
 189        return otx2_mbox_process(mbox);
 190}
 191
 192int
 193otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
 194{
 195        struct otx2_mbox *mbox = dev->mbox;
 196
 197        if (otx2_dev_is_vf_or_sdp(dev))
 198                return 0;
 199
 200        otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
 201
 202        return otx2_mbox_process(mbox);
 203}
 204
 205static int
 206npc_rx_enable(struct otx2_eth_dev *dev)
 207{
 208        struct otx2_mbox *mbox = dev->mbox;
 209
 210        otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
 211
 212        return otx2_mbox_process(mbox);
 213}
 214
 215static int
 216npc_rx_disable(struct otx2_eth_dev *dev)
 217{
 218        struct otx2_mbox *mbox = dev->mbox;
 219
 220        otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
 221
 222        return otx2_mbox_process(mbox);
 223}
 224
 225static int
 226nix_cgx_start_link_event(struct otx2_eth_dev *dev)
 227{
 228        struct otx2_mbox *mbox = dev->mbox;
 229
 230        if (otx2_dev_is_vf_or_sdp(dev))
 231                return 0;
 232
 233        otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
 234
 235        return otx2_mbox_process(mbox);
 236}
 237
 238static int
 239cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
 240{
 241        struct otx2_mbox *mbox = dev->mbox;
 242
 243        if (en && otx2_dev_is_vf_or_sdp(dev))
 244                return -ENOTSUP;
 245
 246        if (en)
 247                otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
 248        else
 249                otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
 250
 251        return otx2_mbox_process(mbox);
 252}
 253
 254static int
 255nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
 256{
 257        struct otx2_mbox *mbox = dev->mbox;
 258
 259        if (otx2_dev_is_vf_or_sdp(dev))
 260                return 0;
 261
 262        otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
 263
 264        return otx2_mbox_process(mbox);
 265}
 266
 267static inline void
 268nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
 269{
 270        rxq->head = 0;
 271        rxq->available = 0;
 272}
 273
 274static inline uint32_t
 275nix_qsize_to_val(enum nix_q_size_e qsize)
 276{
 277        return (16UL << (qsize * 2));
 278}
 279
 280static inline enum nix_q_size_e
 281nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
 282{
 283        int i;
 284
 285        if (otx2_ethdev_fixup_is_min_4k_q(dev))
 286                i = nix_q_size_4K;
 287        else
 288                i = nix_q_size_16;
 289
 290        for (; i < nix_q_size_max; i++)
 291                if (val <= nix_qsize_to_val(i))
 292                        break;
 293
 294        if (i >= nix_q_size_max)
 295                i = nix_q_size_max - 1;
 296
 297        return i;
 298}
 299
 300static int
 301nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 302               uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
 303{
 304        struct otx2_mbox *mbox = dev->mbox;
 305        const struct rte_memzone *rz;
 306        uint32_t ring_size, cq_size;
 307        struct nix_aq_enq_req *aq;
 308        uint16_t first_skip;
 309        int rc;
 310
 311        cq_size = rxq->qlen;
 312        ring_size = cq_size * NIX_CQ_ENTRY_SZ;
 313        rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
 314                                      NIX_CQ_ALIGN, dev->node);
 315        if (rz == NULL) {
 316                otx2_err("Failed to allocate mem for cq hw ring");
 317                return -ENOMEM;
 318        }
 319        memset(rz->addr, 0, rz->len);
 320        rxq->desc = (uintptr_t)rz->addr;
 321        rxq->qmask = cq_size - 1;
 322
 323        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 324        aq->qidx = qid;
 325        aq->ctype = NIX_AQ_CTYPE_CQ;
 326        aq->op = NIX_AQ_INSTOP_INIT;
 327
 328        aq->cq.ena = 1;
 329        aq->cq.caching = 1;
 330        aq->cq.qsize = rxq->qsize;
 331        aq->cq.base = rz->iova;
 332        aq->cq.avg_level = 0xff;
 333        aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
 334        aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
 335
 336        /* Many to one reduction */
 337        aq->cq.qint_idx = qid % dev->qints;
 338        /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
 339        aq->cq.cint_idx = qid;
 340
 341        if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
 342                const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
 343                uint16_t min_rx_drop;
 344
 345                min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
 346                aq->cq.drop = min_rx_drop;
 347                aq->cq.drop_ena = 1;
 348                rxq->cq_drop = min_rx_drop;
 349        } else {
 350                rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
 351                aq->cq.drop = rxq->cq_drop;
 352                aq->cq.drop_ena = 1;
 353        }
 354
 355        /* TX pause frames enable flowctrl on RX side */
 356        if (dev->fc_info.tx_pause) {
 357                /* Single bpid is allocated for all rx channels for now */
 358                aq->cq.bpid = dev->fc_info.bpid[0];
 359                aq->cq.bp = rxq->cq_drop;
 360                aq->cq.bp_ena = 1;
 361        }
 362
 363        rc = otx2_mbox_process(mbox);
 364        if (rc) {
 365                otx2_err("Failed to init cq context");
 366                return rc;
 367        }
 368
 369        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 370        aq->qidx = qid;
 371        aq->ctype = NIX_AQ_CTYPE_RQ;
 372        aq->op = NIX_AQ_INSTOP_INIT;
 373
 374        aq->rq.sso_ena = 0;
 375
 376        if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 377                aq->rq.ipsech_ena = 1;
 378
 379        aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
 380        aq->rq.spb_ena = 0;
 381        aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
 382        first_skip = (sizeof(struct rte_mbuf));
 383        first_skip += RTE_PKTMBUF_HEADROOM;
 384        first_skip += rte_pktmbuf_priv_size(mp);
 385        rxq->data_off = first_skip;
 386
 387        first_skip /= 8; /* Expressed in number of dwords */
 388        aq->rq.first_skip = first_skip;
 389        aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
 390        aq->rq.flow_tagw = 32; /* 32-bits */
 391        aq->rq.lpb_sizem1 = mp->elt_size / 8;
 392        aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
 393        aq->rq.ena = 1;
 394        aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
 395        aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 396        aq->rq.rq_int_ena = 0;
 397        /* Many to one reduction */
 398        aq->rq.qint_idx = qid % dev->qints;
 399
 400        aq->rq.xqe_drop_ena = 1;
 401
 402        rc = otx2_mbox_process(mbox);
 403        if (rc) {
 404                otx2_err("Failed to init rq context");
 405                return rc;
 406        }
 407
 408        if (dev->lock_rx_ctx) {
 409                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 410                aq->qidx = qid;
 411                aq->ctype = NIX_AQ_CTYPE_CQ;
 412                aq->op = NIX_AQ_INSTOP_LOCK;
 413
 414                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 415                if (!aq) {
 416                        /* The shared memory buffer can be full.
 417                         * Flush it and retry
 418                         */
 419                        otx2_mbox_msg_send(mbox, 0);
 420                        rc = otx2_mbox_wait_for_rsp(mbox, 0);
 421                        if (rc < 0) {
 422                                otx2_err("Failed to LOCK cq context");
 423                                return rc;
 424                        }
 425
 426                        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 427                        if (!aq) {
 428                                otx2_err("Failed to LOCK rq context");
 429                                return -ENOMEM;
 430                        }
 431                }
 432                aq->qidx = qid;
 433                aq->ctype = NIX_AQ_CTYPE_RQ;
 434                aq->op = NIX_AQ_INSTOP_LOCK;
 435                rc = otx2_mbox_process(mbox);
 436                if (rc < 0) {
 437                        otx2_err("Failed to LOCK rq context");
 438                        return rc;
 439                }
 440        }
 441
 442        return 0;
 443}
 444
 445static int
 446nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
 447               struct otx2_eth_rxq *rxq, const bool enb)
 448{
 449        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 450        struct otx2_mbox *mbox = dev->mbox;
 451        struct nix_aq_enq_req *aq;
 452
 453        /* Pkts will be dropped silently if RQ is disabled */
 454        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 455        aq->qidx = rxq->rq;
 456        aq->ctype = NIX_AQ_CTYPE_RQ;
 457        aq->op = NIX_AQ_INSTOP_WRITE;
 458
 459        aq->rq.ena = enb;
 460        aq->rq_mask.ena = ~(aq->rq_mask.ena);
 461
 462        return otx2_mbox_process(mbox);
 463}
 464
 465static int
 466nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
 467{
 468        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 469        struct otx2_mbox *mbox = dev->mbox;
 470        struct nix_aq_enq_req *aq;
 471        int rc;
 472
 473        /* RQ is already disabled */
 474        /* Disable CQ */
 475        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 476        aq->qidx = rxq->rq;
 477        aq->ctype = NIX_AQ_CTYPE_CQ;
 478        aq->op = NIX_AQ_INSTOP_WRITE;
 479
 480        aq->cq.ena = 0;
 481        aq->cq_mask.ena = ~(aq->cq_mask.ena);
 482
 483        rc = otx2_mbox_process(mbox);
 484        if (rc < 0) {
 485                otx2_err("Failed to disable cq context");
 486                return rc;
 487        }
 488
 489        if (dev->lock_rx_ctx) {
 490                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 491                aq->qidx = rxq->rq;
 492                aq->ctype = NIX_AQ_CTYPE_CQ;
 493                aq->op = NIX_AQ_INSTOP_UNLOCK;
 494
 495                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 496                if (!aq) {
 497                        /* The shared memory buffer can be full.
 498                         * Flush it and retry
 499                         */
 500                        otx2_mbox_msg_send(mbox, 0);
 501                        rc = otx2_mbox_wait_for_rsp(mbox, 0);
 502                        if (rc < 0) {
 503                                otx2_err("Failed to UNLOCK cq context");
 504                                return rc;
 505                        }
 506
 507                        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 508                        if (!aq) {
 509                                otx2_err("Failed to UNLOCK rq context");
 510                                return -ENOMEM;
 511                        }
 512                }
 513                aq->qidx = rxq->rq;
 514                aq->ctype = NIX_AQ_CTYPE_RQ;
 515                aq->op = NIX_AQ_INSTOP_UNLOCK;
 516                rc = otx2_mbox_process(mbox);
 517                if (rc < 0) {
 518                        otx2_err("Failed to UNLOCK rq context");
 519                        return rc;
 520                }
 521        }
 522
 523        return 0;
 524}
 525
 526static inline int
 527nix_get_data_off(struct otx2_eth_dev *dev)
 528{
 529        return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
 530}
 531
 532uint64_t
 533otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
 534{
 535        struct rte_mbuf mb_def;
 536        uint64_t *tmp;
 537
 538        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
 539        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
 540                                offsetof(struct rte_mbuf, data_off) != 2);
 541        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
 542                                offsetof(struct rte_mbuf, data_off) != 4);
 543        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
 544                                offsetof(struct rte_mbuf, data_off) != 6);
 545        mb_def.nb_segs = 1;
 546        mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
 547        mb_def.port = port_id;
 548        rte_mbuf_refcnt_set(&mb_def, 1);
 549
 550        /* Prevent compiler reordering: rearm_data covers previous fields */
 551        rte_compiler_barrier();
 552        tmp = (uint64_t *)&mb_def.rearm_data;
 553
 554        return *tmp;
 555}
 556
 557static void
 558otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 559{
 560        struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
 561
 562        if (!rxq)
 563                return;
 564
 565        otx2_nix_dbg("Releasing rxq %u", rxq->rq);
 566        nix_cq_rq_uninit(rxq->eth_dev, rxq);
 567        rte_free(rxq);
 568        dev->data->rx_queues[qid] = NULL;
 569}
 570
 571static int
 572otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
 573                        uint16_t nb_desc, unsigned int socket,
 574                        const struct rte_eth_rxconf *rx_conf,
 575                        struct rte_mempool *mp)
 576{
 577        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 578        struct rte_mempool_ops *ops;
 579        struct otx2_eth_rxq *rxq;
 580        const char *platform_ops;
 581        enum nix_q_size_e qsize;
 582        uint64_t offloads;
 583        int rc;
 584
 585        rc = -EINVAL;
 586
 587        /* Compile time check to make sure all fast path elements in a CL */
 588        RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
 589
 590        /* Sanity checks */
 591        if (rx_conf->rx_deferred_start == 1) {
 592                otx2_err("Deferred Rx start is not supported");
 593                goto fail;
 594        }
 595
 596        platform_ops = rte_mbuf_platform_mempool_ops();
 597        /* This driver needs octeontx2_npa mempool ops to work */
 598        ops = rte_mempool_get_ops(mp->ops_index);
 599        if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
 600                otx2_err("mempool ops should be of octeontx2_npa type");
 601                goto fail;
 602        }
 603
 604        if (mp->pool_id == 0) {
 605                otx2_err("Invalid pool_id");
 606                goto fail;
 607        }
 608
 609        /* Free memory prior to re-allocation if needed */
 610        if (eth_dev->data->rx_queues[rq] != NULL) {
 611                otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
 612                otx2_nix_rx_queue_release(eth_dev, rq);
 613                rte_eth_dma_zone_free(eth_dev, "cq", rq);
 614        }
 615
 616        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
 617        dev->rx_offloads |= offloads;
 618
 619        /* Find the CQ queue size */
 620        qsize = nix_qsize_clampup_get(dev, nb_desc);
 621        /* Allocate rxq memory */
 622        rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
 623        if (rxq == NULL) {
 624                otx2_err("Failed to allocate rq=%d", rq);
 625                rc = -ENOMEM;
 626                goto fail;
 627        }
 628
 629        rxq->eth_dev = eth_dev;
 630        rxq->rq = rq;
 631        rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
 632        rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
 633        rxq->wdata = (uint64_t)rq << 32;
 634        rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
 635        rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
 636                                                        eth_dev->data->port_id);
 637        rxq->offloads = offloads;
 638        rxq->pool = mp;
 639        rxq->qlen = nix_qsize_to_val(qsize);
 640        rxq->qsize = qsize;
 641        rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
 642        rxq->tstamp = &dev->tstamp;
 643
 644        eth_dev->data->rx_queues[rq] = rxq;
 645
 646        /* Alloc completion queue */
 647        rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
 648        if (rc) {
 649                otx2_err("Failed to allocate rxq=%u", rq);
 650                goto free_rxq;
 651        }
 652
 653        rxq->qconf.socket_id = socket;
 654        rxq->qconf.nb_desc = nb_desc;
 655        rxq->qconf.mempool = mp;
 656        memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
 657
 658        nix_rx_queue_reset(rxq);
 659        otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
 660                     rq, mp->name, qsize, nb_desc, rxq->qlen);
 661
 662        eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
 663
 664        /* Calculating delta and freq mult between PTP HI clock and tsc.
 665         * These are needed in deriving raw clock value from tsc counter.
 666         * read_clock eth op returns raw clock value.
 667         */
 668        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
 669            otx2_ethdev_is_ptp_en(dev)) {
 670                rc = otx2_nix_raw_clock_tsc_conv(dev);
 671                if (rc) {
 672                        otx2_err("Failed to calculate delta and freq mult");
 673                        goto fail;
 674                }
 675        }
 676
 677        /* Setup scatter mode if needed by jumbo */
 678        otx2_nix_enable_mseg_on_jumbo(rxq);
 679
 680        return 0;
 681
 682free_rxq:
 683        otx2_nix_rx_queue_release(eth_dev, rq);
 684fail:
 685        return rc;
 686}
 687
 688static inline uint8_t
 689nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
 690{
 691        /*
 692         * Maximum three segments can be supported with W8, Choose
 693         * NIX_MAXSQESZ_W16 for multi segment offload.
 694         */
 695        if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 696                return NIX_MAXSQESZ_W16;
 697        else
 698                return NIX_MAXSQESZ_W8;
 699}
 700
 701static uint16_t
 702nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 703{
 704        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 705        struct rte_eth_dev_data *data = eth_dev->data;
 706        struct rte_eth_conf *conf = &data->dev_conf;
 707        struct rte_eth_rxmode *rxmode = &conf->rxmode;
 708        uint16_t flags = 0;
 709
 710        if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
 711                        (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
 712                flags |= NIX_RX_OFFLOAD_RSS_F;
 713
 714        if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
 715                         RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
 716                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 717
 718        if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
 719                                RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 720                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 721
 722        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 723                flags |= NIX_RX_MULTI_SEG_F;
 724
 725        if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
 726                                RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
 727                flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 728
 729        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 730                flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 731
 732        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
 733                flags |= NIX_RX_OFFLOAD_SECURITY_F;
 734
 735        if (!dev->ptype_disable)
 736                flags |= NIX_RX_OFFLOAD_PTYPE_F;
 737
 738        return flags;
 739}
 740
 741static uint16_t
 742nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 743{
 744        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
 745        uint64_t conf = dev->tx_offloads;
 746        uint16_t flags = 0;
 747
 748        /* Fastpath is dependent on these enums */
 749        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
 750        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
 751        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
 752        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
 753        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
 754        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
 755        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
 756        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
 757        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
 758        RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
 759        RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
 760        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
 761        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
 762        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
 763                         offsetof(struct rte_mbuf, buf_iova) + 8);
 764        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
 765                         offsetof(struct rte_mbuf, buf_iova) + 16);
 766        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
 767                         offsetof(struct rte_mbuf, ol_flags) + 12);
 768        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
 769                         offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 770
 771        if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
 772            conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
 773                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 774
 775        if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
 776            conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
 777                flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 778
 779        if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
 780            conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
 781            conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
 782            conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
 783                flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 784
 785        if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
 786                flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 787
 788        if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 789                flags |= NIX_TX_MULTI_SEG_F;
 790
 791        /* Enable Inner checksum for TSO */
 792        if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
 793                flags |= (NIX_TX_OFFLOAD_TSO_F |
 794                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 795
 796        /* Enable Inner and Outer checksum for Tunnel TSO */
 797        if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 798                    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
 799                    RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 800                flags |= (NIX_TX_OFFLOAD_TSO_F |
 801                          NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 802                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 803
 804        if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 805                flags |= NIX_TX_OFFLOAD_SECURITY_F;
 806
 807        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 808                flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 809
 810        return flags;
 811}
 812
 813static int
 814nix_sqb_lock(struct rte_mempool *mp)
 815{
 816        struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
 817        struct npa_aq_enq_req *req;
 818        int rc;
 819
 820        req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 821        req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
 822        req->ctype = NPA_AQ_CTYPE_AURA;
 823        req->op = NPA_AQ_INSTOP_LOCK;
 824
 825        req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 826        if (!req) {
 827                /* The shared memory buffer can be full.
 828                 * Flush it and retry
 829                 */
 830                otx2_mbox_msg_send(npa_lf->mbox, 0);
 831                rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
 832                if (rc < 0) {
 833                        otx2_err("Failed to LOCK AURA context");
 834                        return rc;
 835                }
 836
 837                req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 838                if (!req) {
 839                        otx2_err("Failed to LOCK POOL context");
 840                        return -ENOMEM;
 841                }
 842        }
 843
 844        req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
 845        req->ctype = NPA_AQ_CTYPE_POOL;
 846        req->op = NPA_AQ_INSTOP_LOCK;
 847
 848        rc = otx2_mbox_process(npa_lf->mbox);
 849        if (rc < 0) {
 850                otx2_err("Unable to lock POOL in NDC");
 851                return rc;
 852        }
 853
 854        return 0;
 855}
 856
 857static int
 858nix_sqb_unlock(struct rte_mempool *mp)
 859{
 860        struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
 861        struct npa_aq_enq_req *req;
 862        int rc;
 863
 864        req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 865        req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
 866        req->ctype = NPA_AQ_CTYPE_AURA;
 867        req->op = NPA_AQ_INSTOP_UNLOCK;
 868
 869        req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 870        if (!req) {
 871                /* The shared memory buffer can be full.
 872                 * Flush it and retry
 873                 */
 874                otx2_mbox_msg_send(npa_lf->mbox, 0);
 875                rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
 876                if (rc < 0) {
 877                        otx2_err("Failed to UNLOCK AURA context");
 878                        return rc;
 879                }
 880
 881                req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 882                if (!req) {
 883                        otx2_err("Failed to UNLOCK POOL context");
 884                        return -ENOMEM;
 885                }
 886        }
 887        req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 888        req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
 889        req->ctype = NPA_AQ_CTYPE_POOL;
 890        req->op = NPA_AQ_INSTOP_UNLOCK;
 891
 892        rc = otx2_mbox_process(npa_lf->mbox);
 893        if (rc < 0) {
 894                otx2_err("Unable to UNLOCK AURA in NDC");
 895                return rc;
 896        }
 897
 898        return 0;
 899}
 900
 901void
 902otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
 903{
 904        struct rte_pktmbuf_pool_private *mbp_priv;
 905        struct rte_eth_dev *eth_dev;
 906        struct otx2_eth_dev *dev;
 907        uint32_t buffsz;
 908
 909        eth_dev = rxq->eth_dev;
 910        dev = otx2_eth_pmd_priv(eth_dev);
 911
 912        /* Get rx buffer size */
 913        mbp_priv = rte_mempool_get_priv(rxq->pool);
 914        buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 915
 916        if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
 917                dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
 918                dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 919
 920                /* Setting up the rx[tx]_offload_flags due to change
 921                 * in rx[tx]_offloads.
 922                 */
 923                dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
 924                dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
 925        }
 926}
 927
 928static int
 929nix_sq_init(struct otx2_eth_txq *txq)
 930{
 931        struct otx2_eth_dev *dev = txq->dev;
 932        struct otx2_mbox *mbox = dev->mbox;
 933        struct nix_aq_enq_req *sq;
 934        uint32_t rr_quantum;
 935        uint16_t smq;
 936        int rc;
 937
 938        if (txq->sqb_pool->pool_id == 0)
 939                return -EINVAL;
 940
 941        rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
 942        if (rc) {
 943                otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
 944                return rc;
 945        }
 946
 947        sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 948        sq->qidx = txq->sq;
 949        sq->ctype = NIX_AQ_CTYPE_SQ;
 950        sq->op = NIX_AQ_INSTOP_INIT;
 951        sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
 952
 953        sq->sq.smq = smq;
 954        sq->sq.smq_rr_quantum = rr_quantum;
 955        sq->sq.default_chan = dev->tx_chan_base;
 956        sq->sq.sqe_stype = NIX_STYPE_STF;
 957        sq->sq.ena = 1;
 958        if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 959                sq->sq.sqe_stype = NIX_STYPE_STP;
 960        sq->sq.sqb_aura =
 961                npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
 962        sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
 963        sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
 964        sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
 965        sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
 966
 967        /* Many to one reduction */
 968        sq->sq.qint_idx = txq->sq % dev->qints;
 969
 970        rc = otx2_mbox_process(mbox);
 971        if (rc < 0)
 972                return rc;
 973
 974        if (dev->lock_tx_ctx) {
 975                sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 976                sq->qidx = txq->sq;
 977                sq->ctype = NIX_AQ_CTYPE_SQ;
 978                sq->op = NIX_AQ_INSTOP_LOCK;
 979
 980                rc = otx2_mbox_process(mbox);
 981        }
 982
 983        return rc;
 984}
 985
 986static int
 987nix_sq_uninit(struct otx2_eth_txq *txq)
 988{
 989        struct otx2_eth_dev *dev = txq->dev;
 990        struct otx2_mbox *mbox = dev->mbox;
 991        struct ndc_sync_op *ndc_req;
 992        struct nix_aq_enq_rsp *rsp;
 993        struct nix_aq_enq_req *aq;
 994        uint16_t sqes_per_sqb;
 995        void *sqb_buf;
 996        int rc, count;
 997
 998        otx2_nix_dbg("Cleaning up sq %u", txq->sq);
 999
1000        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1001        aq->qidx = txq->sq;
1002        aq->ctype = NIX_AQ_CTYPE_SQ;
1003        aq->op = NIX_AQ_INSTOP_READ;
1004
1005        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1006        if (rc)
1007                return rc;
1008
1009        /* Check if sq is already cleaned up */
1010        if (!rsp->sq.ena)
1011                return 0;
1012
1013        /* Disable sq */
1014        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1015        aq->qidx = txq->sq;
1016        aq->ctype = NIX_AQ_CTYPE_SQ;
1017        aq->op = NIX_AQ_INSTOP_WRITE;
1018
1019        aq->sq_mask.ena = ~aq->sq_mask.ena;
1020        aq->sq.ena = 0;
1021
1022        rc = otx2_mbox_process(mbox);
1023        if (rc)
1024                return rc;
1025
1026        if (dev->lock_tx_ctx) {
1027                /* Unlock sq */
1028                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1029                aq->qidx = txq->sq;
1030                aq->ctype = NIX_AQ_CTYPE_SQ;
1031                aq->op = NIX_AQ_INSTOP_UNLOCK;
1032
1033                rc = otx2_mbox_process(mbox);
1034                if (rc < 0)
1035                        return rc;
1036
1037                nix_sqb_unlock(txq->sqb_pool);
1038        }
1039
1040        /* Read SQ and free sqb's */
1041        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1042        aq->qidx = txq->sq;
1043        aq->ctype = NIX_AQ_CTYPE_SQ;
1044        aq->op = NIX_AQ_INSTOP_READ;
1045
1046        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1047        if (rc)
1048                return rc;
1049
1050        if (aq->sq.smq_pend)
1051                otx2_err("SQ has pending sqe's");
1052
1053        count = aq->sq.sqb_count;
1054        sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
1055        /* Free SQB's that are used */
1056        sqb_buf = (void *)rsp->sq.head_sqb;
1057        while (count) {
1058                void *next_sqb;
1059
1060                next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
1061                                      ((sqes_per_sqb - 1) *
1062                                      nix_sq_max_sqe_sz(txq)));
1063                npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1064                                    (uint64_t)sqb_buf);
1065                sqb_buf = next_sqb;
1066                count--;
1067        }
1068
1069        /* Free next to use sqb */
1070        if (rsp->sq.next_sqb)
1071                npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1072                                    rsp->sq.next_sqb);
1073
1074        /* Sync NDC-NIX-TX for LF */
1075        ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
1076        ndc_req->nix_lf_tx_sync = 1;
1077        rc = otx2_mbox_process(mbox);
1078        if (rc)
1079                otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
1080
1081        return rc;
1082}
1083
1084static int
1085nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
1086{
1087        struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
1088        struct npa_aq_enq_req *aura_req;
1089
1090        aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
1091        aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
1092        aura_req->ctype = NPA_AQ_CTYPE_AURA;
1093        aura_req->op = NPA_AQ_INSTOP_WRITE;
1094
1095        aura_req->aura.limit = nb_sqb_bufs;
1096        aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
1097
1098        return otx2_mbox_process(npa_lf->mbox);
1099}
1100
1101static int
1102nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
1103{
1104        struct otx2_eth_dev *dev = txq->dev;
1105        uint16_t sqes_per_sqb, nb_sqb_bufs;
1106        char name[RTE_MEMPOOL_NAMESIZE];
1107        struct rte_mempool_objsz sz;
1108        struct npa_aura_s *aura;
1109        uint32_t tmp, blk_sz;
1110
1111        aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
1112        snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
1113        blk_sz = dev->sqb_size;
1114
1115        if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
1116                sqes_per_sqb = (dev->sqb_size / 8) / 16;
1117        else
1118                sqes_per_sqb = (dev->sqb_size / 8) / 8;
1119
1120        nb_sqb_bufs = nb_desc / sqes_per_sqb;
1121        /* Clamp up to devarg passed SQB count */
1122        nb_sqb_bufs =  RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
1123                              nb_sqb_bufs + NIX_SQB_LIST_SPACE));
1124
1125        txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
1126                                                 0, 0, dev->node,
1127                                                 RTE_MEMPOOL_F_NO_SPREAD);
1128        txq->nb_sqb_bufs = nb_sqb_bufs;
1129        txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
1130        txq->nb_sqb_bufs_adj = nb_sqb_bufs -
1131                RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
1132        txq->nb_sqb_bufs_adj =
1133                (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
1134
1135        if (txq->sqb_pool == NULL) {
1136                otx2_err("Failed to allocate sqe mempool");
1137                goto fail;
1138        }
1139
1140        memset(aura, 0, sizeof(*aura));
1141        aura->fc_ena = 1;
1142        aura->fc_addr = txq->fc_iova;
1143        aura->fc_hyst_bits = 0; /* Store count on all updates */
1144        if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
1145                otx2_err("Failed to set ops for sqe mempool");
1146                goto fail;
1147        }
1148        if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
1149                otx2_err("Failed to populate sqe mempool");
1150                goto fail;
1151        }
1152
1153        tmp = rte_mempool_calc_obj_size(blk_sz, RTE_MEMPOOL_F_NO_SPREAD, &sz);
1154        if (dev->sqb_size != sz.elt_size) {
1155                otx2_err("sqe pool block size is not expected %d != %d",
1156                         dev->sqb_size, tmp);
1157                goto fail;
1158        }
1159
1160        nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
1161        if (dev->lock_tx_ctx)
1162                nix_sqb_lock(txq->sqb_pool);
1163
1164        return 0;
1165fail:
1166        return -ENOMEM;
1167}
1168
1169void
1170otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
1171{
1172        struct nix_send_ext_s *send_hdr_ext;
1173        struct nix_send_hdr_s *send_hdr;
1174        struct nix_send_mem_s *send_mem;
1175        union nix_send_sg_s *sg;
1176
1177        /* Initialize the fields based on basic single segment packet */
1178        memset(&txq->cmd, 0, sizeof(txq->cmd));
1179
1180        if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
1181                send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1182                /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
1183                send_hdr->w0.sizem1 = 2;
1184
1185                send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
1186                send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
1187                if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
1188                        /* Default: one seg packet would have:
1189                         * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
1190                         * => 8/2 - 1 = 3
1191                         */
1192                        send_hdr->w0.sizem1 = 3;
1193                        send_hdr_ext->w0.tstmp = 1;
1194
1195                        /* To calculate the offset for send_mem,
1196                         * send_hdr->w0.sizem1 * 2
1197                         */
1198                        send_mem = (struct nix_send_mem_s *)(txq->cmd +
1199                                                (send_hdr->w0.sizem1 << 1));
1200                        send_mem->subdc = NIX_SUBDC_MEM;
1201                        send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
1202                        send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
1203                }
1204                sg = (union nix_send_sg_s *)&txq->cmd[4];
1205        } else {
1206                send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1207                /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
1208                send_hdr->w0.sizem1 = 1;
1209                sg = (union nix_send_sg_s *)&txq->cmd[2];
1210        }
1211
1212        send_hdr->w0.sq = txq->sq;
1213        sg->subdc = NIX_SUBDC_SG;
1214        sg->segs = 1;
1215        sg->ld_type = NIX_SENDLDTYPE_LDD;
1216
1217        rte_smp_wmb();
1218}
1219
1220static void
1221otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
1222{
1223        struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
1224
1225        if (!txq)
1226                return;
1227
1228        otx2_nix_dbg("Releasing txq %u", txq->sq);
1229
1230        /* Flush and disable tm */
1231        otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);
1232
1233        /* Free sqb's and disable sq */
1234        nix_sq_uninit(txq);
1235
1236        if (txq->sqb_pool) {
1237                rte_mempool_free(txq->sqb_pool);
1238                txq->sqb_pool = NULL;
1239        }
1240        otx2_nix_sq_flush_post(txq);
1241        rte_free(txq);
1242        eth_dev->data->tx_queues[qid] = NULL;
1243}
1244
1245
1246static int
1247otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
1248                        uint16_t nb_desc, unsigned int socket_id,
1249                        const struct rte_eth_txconf *tx_conf)
1250{
1251        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1252        const struct rte_memzone *fc;
1253        struct otx2_eth_txq *txq;
1254        uint64_t offloads;
1255        int rc;
1256
1257        rc = -EINVAL;
1258
1259        /* Compile time check to make sure all fast path elements in a CL */
1260        RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
1261
1262        if (tx_conf->tx_deferred_start) {
1263                otx2_err("Tx deferred start is not supported");
1264                goto fail;
1265        }
1266
1267        /* Free memory prior to re-allocation if needed. */
1268        if (eth_dev->data->tx_queues[sq] != NULL) {
1269                otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
1270                otx2_nix_tx_queue_release(eth_dev, sq);
1271        }
1272
1273        /* Find the expected offloads for this queue */
1274        offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
1275
1276        /* Allocating tx queue data structure */
1277        txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
1278                                 OTX2_ALIGN, socket_id);
1279        if (txq == NULL) {
1280                otx2_err("Failed to alloc txq=%d", sq);
1281                rc = -ENOMEM;
1282                goto fail;
1283        }
1284        txq->sq = sq;
1285        txq->dev = dev;
1286        txq->sqb_pool = NULL;
1287        txq->offloads = offloads;
1288        dev->tx_offloads |= offloads;
1289        eth_dev->data->tx_queues[sq] = txq;
1290
1291        /*
1292         * Allocate memory for flow control updates from HW.
1293         * Alloc one cache line, so that fits all FC_STYPE modes.
1294         */
1295        fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1296                                      OTX2_ALIGN + sizeof(struct npa_aura_s),
1297                                      OTX2_ALIGN, dev->node);
1298        if (fc == NULL) {
1299                otx2_err("Failed to allocate mem for fcmem");
1300                rc = -ENOMEM;
1301                goto free_txq;
1302        }
1303        txq->fc_iova = fc->iova;
1304        txq->fc_mem = fc->addr;
1305
1306        /* Initialize the aura sqb pool */
1307        rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1308        if (rc) {
1309                otx2_err("Failed to alloc sqe pool rc=%d", rc);
1310                goto free_txq;
1311        }
1312
1313        /* Initialize the SQ */
1314        rc = nix_sq_init(txq);
1315        if (rc) {
1316                otx2_err("Failed to init sq=%d context", sq);
1317                goto free_txq;
1318        }
1319
1320        txq->fc_cache_pkts = 0;
1321        txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1322        /* Evenly distribute LMT slot for each sq */
1323        txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1324
1325        txq->qconf.socket_id = socket_id;
1326        txq->qconf.nb_desc = nb_desc;
1327        memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1328
1329        txq->lso_tun_fmt = dev->lso_tun_fmt;
1330        otx2_nix_form_default_desc(txq);
1331
1332        otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1333                     " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1334                     fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1335                     txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1336        eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1337        return 0;
1338
1339free_txq:
1340        otx2_nix_tx_queue_release(eth_dev, sq);
1341fail:
1342        return rc;
1343}
1344
1345static int
1346nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1347{
1348        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1349        struct otx2_eth_qconf *tx_qconf = NULL;
1350        struct otx2_eth_qconf *rx_qconf = NULL;
1351        struct otx2_eth_txq **txq;
1352        struct otx2_eth_rxq **rxq;
1353        int i, nb_rxq, nb_txq;
1354
1355        nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1356        nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1357
1358        tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1359        if (tx_qconf == NULL) {
1360                otx2_err("Failed to allocate memory for tx_qconf");
1361                goto fail;
1362        }
1363
1364        rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1365        if (rx_qconf == NULL) {
1366                otx2_err("Failed to allocate memory for rx_qconf");
1367                goto fail;
1368        }
1369
1370        txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1371        for (i = 0; i < nb_txq; i++) {
1372                if (txq[i] == NULL) {
1373                        tx_qconf[i].valid = false;
1374                        otx2_info("txq[%d] is already released", i);
1375                        continue;
1376                }
1377                memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1378                tx_qconf[i].valid = true;
1379                otx2_nix_tx_queue_release(eth_dev, i);
1380        }
1381
1382        rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1383        for (i = 0; i < nb_rxq; i++) {
1384                if (rxq[i] == NULL) {
1385                        rx_qconf[i].valid = false;
1386                        otx2_info("rxq[%d] is already released", i);
1387                        continue;
1388                }
1389                memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1390                rx_qconf[i].valid = true;
1391                otx2_nix_rx_queue_release(eth_dev, i);
1392        }
1393
1394        dev->tx_qconf = tx_qconf;
1395        dev->rx_qconf = rx_qconf;
1396        return 0;
1397
1398fail:
1399        free(tx_qconf);
1400        free(rx_qconf);
1401
1402        return -ENOMEM;
1403}
1404
1405static int
1406nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1407{
1408        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1409        struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1410        struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1411        int rc, i, nb_rxq, nb_txq;
1412
1413        nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1414        nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1415
1416        rc = -ENOMEM;
1417        /* Setup tx & rx queues with previous configuration so
1418         * that the queues can be functional in cases like ports
1419         * are started without re configuring queues.
1420         *
1421         * Usual re config sequence is like below:
1422         * port_configure() {
1423         *      if(reconfigure) {
1424         *              queue_release()
1425         *              queue_setup()
1426         *      }
1427         *      queue_configure() {
1428         *              queue_release()
1429         *              queue_setup()
1430         *      }
1431         * }
1432         * port_start()
1433         *
1434         * In some application's control path, queue_configure() would
1435         * NOT be invoked for TXQs/RXQs in port_configure().
1436         * In such cases, queues can be functional after start as the
1437         * queues are already setup in port_configure().
1438         */
1439        for (i = 0; i < nb_txq; i++) {
1440                if (!tx_qconf[i].valid)
1441                        continue;
1442                rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1443                                             tx_qconf[i].socket_id,
1444                                             &tx_qconf[i].conf.tx);
1445                if (rc) {
1446                        otx2_err("Failed to setup tx queue rc=%d", rc);
1447                        for (i -= 1; i >= 0; i--)
1448                                otx2_nix_tx_queue_release(eth_dev, i);
1449                        goto fail;
1450                }
1451        }
1452
1453        free(tx_qconf); tx_qconf = NULL;
1454
1455        for (i = 0; i < nb_rxq; i++) {
1456                if (!rx_qconf[i].valid)
1457                        continue;
1458                rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1459                                             rx_qconf[i].socket_id,
1460                                             &rx_qconf[i].conf.rx,
1461                                             rx_qconf[i].mempool);
1462                if (rc) {
1463                        otx2_err("Failed to setup rx queue rc=%d", rc);
1464                        for (i -= 1; i >= 0; i--)
1465                                otx2_nix_rx_queue_release(eth_dev, i);
1466                        goto release_tx_queues;
1467                }
1468        }
1469
1470        free(rx_qconf); rx_qconf = NULL;
1471
1472        return 0;
1473
1474release_tx_queues:
1475        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1476                otx2_nix_tx_queue_release(eth_dev, i);
1477fail:
1478        if (tx_qconf)
1479                free(tx_qconf);
1480        if (rx_qconf)
1481                free(rx_qconf);
1482
1483        return rc;
1484}
1485
1486static uint16_t
1487nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1488{
1489        RTE_SET_USED(queue);
1490        RTE_SET_USED(mbufs);
1491        RTE_SET_USED(pkts);
1492
1493        return 0;
1494}
1495
1496static void
1497nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1498{
1499        /* These dummy functions are required for supporting
1500         * some applications which reconfigure queues without
1501         * stopping tx burst and rx burst threads(eg kni app)
1502         * When the queues context is saved, txq/rxqs are released
1503         * which caused app crash since rx/tx burst is still
1504         * on different lcores
1505         */
1506        eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1507        eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1508        rte_mb();
1509}
1510
1511static void
1512nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1513{
1514        volatile struct nix_lso_format *field;
1515
1516        /* Format works only with TCP packet marked by OL3/OL4 */
1517        field = (volatile struct nix_lso_format *)&req->fields[0];
1518        req->field_mask = NIX_LSO_FIELD_MASK;
1519        /* Outer IPv4/IPv6 */
1520        field->layer = NIX_TXLAYER_OL3;
1521        field->offset = v4 ? 2 : 4;
1522        field->sizem1 = 1; /* 2B */
1523        field->alg = NIX_LSOALG_ADD_PAYLEN;
1524        field++;
1525        if (v4) {
1526                /* IPID field */
1527                field->layer = NIX_TXLAYER_OL3;
1528                field->offset = 4;
1529                field->sizem1 = 1;
1530                /* Incremented linearly per segment */
1531                field->alg = NIX_LSOALG_ADD_SEGNUM;
1532                field++;
1533        }
1534
1535        /* TCP sequence number update */
1536        field->layer = NIX_TXLAYER_OL4;
1537        field->offset = 4;
1538        field->sizem1 = 3; /* 4 bytes */
1539        field->alg = NIX_LSOALG_ADD_OFFSET;
1540        field++;
1541        /* TCP flags field */
1542        field->layer = NIX_TXLAYER_OL4;
1543        field->offset = 12;
1544        field->sizem1 = 1;
1545        field->alg = NIX_LSOALG_TCP_FLAGS;
1546        field++;
1547}
1548
1549static void
1550nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1551                    bool outer_v4, bool inner_v4)
1552{
1553        volatile struct nix_lso_format *field;
1554
1555        field = (volatile struct nix_lso_format *)&req->fields[0];
1556        req->field_mask = NIX_LSO_FIELD_MASK;
1557        /* Outer IPv4/IPv6 len */
1558        field->layer = NIX_TXLAYER_OL3;
1559        field->offset = outer_v4 ? 2 : 4;
1560        field->sizem1 = 1; /* 2B */
1561        field->alg = NIX_LSOALG_ADD_PAYLEN;
1562        field++;
1563        if (outer_v4) {
1564                /* IPID */
1565                field->layer = NIX_TXLAYER_OL3;
1566                field->offset = 4;
1567                field->sizem1 = 1;
1568                /* Incremented linearly per segment */
1569                field->alg = NIX_LSOALG_ADD_SEGNUM;
1570                field++;
1571        }
1572
1573        /* Outer UDP length */
1574        field->layer = NIX_TXLAYER_OL4;
1575        field->offset = 4;
1576        field->sizem1 = 1;
1577        field->alg = NIX_LSOALG_ADD_PAYLEN;
1578        field++;
1579
1580        /* Inner IPv4/IPv6 */
1581        field->layer = NIX_TXLAYER_IL3;
1582        field->offset = inner_v4 ? 2 : 4;
1583        field->sizem1 = 1; /* 2B */
1584        field->alg = NIX_LSOALG_ADD_PAYLEN;
1585        field++;
1586        if (inner_v4) {
1587                /* IPID field */
1588                field->layer = NIX_TXLAYER_IL3;
1589                field->offset = 4;
1590                field->sizem1 = 1;
1591                /* Incremented linearly per segment */
1592                field->alg = NIX_LSOALG_ADD_SEGNUM;
1593                field++;
1594        }
1595
1596        /* TCP sequence number update */
1597        field->layer = NIX_TXLAYER_IL4;
1598        field->offset = 4;
1599        field->sizem1 = 3; /* 4 bytes */
1600        field->alg = NIX_LSOALG_ADD_OFFSET;
1601        field++;
1602
1603        /* TCP flags field */
1604        field->layer = NIX_TXLAYER_IL4;
1605        field->offset = 12;
1606        field->sizem1 = 1;
1607        field->alg = NIX_LSOALG_TCP_FLAGS;
1608        field++;
1609}
1610
1611static void
1612nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
1613                bool outer_v4, bool inner_v4)
1614{
1615        volatile struct nix_lso_format *field;
1616
1617        field = (volatile struct nix_lso_format *)&req->fields[0];
1618        req->field_mask = NIX_LSO_FIELD_MASK;
1619        /* Outer IPv4/IPv6 len */
1620        field->layer = NIX_TXLAYER_OL3;
1621        field->offset = outer_v4 ? 2 : 4;
1622        field->sizem1 = 1; /* 2B */
1623        field->alg = NIX_LSOALG_ADD_PAYLEN;
1624        field++;
1625        if (outer_v4) {
1626                /* IPID */
1627                field->layer = NIX_TXLAYER_OL3;
1628                field->offset = 4;
1629                field->sizem1 = 1;
1630                /* Incremented linearly per segment */
1631                field->alg = NIX_LSOALG_ADD_SEGNUM;
1632                field++;
1633        }
1634
1635        /* Inner IPv4/IPv6 */
1636        field->layer = NIX_TXLAYER_IL3;
1637        field->offset = inner_v4 ? 2 : 4;
1638        field->sizem1 = 1; /* 2B */
1639        field->alg = NIX_LSOALG_ADD_PAYLEN;
1640        field++;
1641        if (inner_v4) {
1642                /* IPID field */
1643                field->layer = NIX_TXLAYER_IL3;
1644                field->offset = 4;
1645                field->sizem1 = 1;
1646                /* Incremented linearly per segment */
1647                field->alg = NIX_LSOALG_ADD_SEGNUM;
1648                field++;
1649        }
1650
1651        /* TCP sequence number update */
1652        field->layer = NIX_TXLAYER_IL4;
1653        field->offset = 4;
1654        field->sizem1 = 3; /* 4 bytes */
1655        field->alg = NIX_LSOALG_ADD_OFFSET;
1656        field++;
1657
1658        /* TCP flags field */
1659        field->layer = NIX_TXLAYER_IL4;
1660        field->offset = 12;
1661        field->sizem1 = 1;
1662        field->alg = NIX_LSOALG_TCP_FLAGS;
1663        field++;
1664}
1665
1666static int
1667nix_setup_lso_formats(struct otx2_eth_dev *dev)
1668{
1669        struct otx2_mbox *mbox = dev->mbox;
1670        struct nix_lso_format_cfg_rsp *rsp;
1671        struct nix_lso_format_cfg *req;
1672        uint8_t *fmt;
1673        int rc;
1674
1675        /* Skip if TSO was not requested */
1676        if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1677                return 0;
1678        /*
1679         * IPv4/TCP LSO
1680         */
1681        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1682        nix_lso_tcp(req, true);
1683        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1684        if (rc)
1685                return rc;
1686
1687        if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV4)
1688                return -EFAULT;
1689        otx2_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx);
1690
1691
1692        /*
1693         * IPv6/TCP LSO
1694         */
1695        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1696        nix_lso_tcp(req, false);
1697        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1698        if (rc)
1699                return rc;
1700
1701        if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV6)
1702                return -EFAULT;
1703        otx2_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx);
1704
1705        /*
1706         * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1707         */
1708        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1709        nix_lso_udp_tun_tcp(req, true, true);
1710        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1711        if (rc)
1712                return rc;
1713
1714        dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
1715        otx2_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx);
1716
1717        /*
1718         * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1719         */
1720        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1721        nix_lso_udp_tun_tcp(req, true, false);
1722        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1723        if (rc)
1724                return rc;
1725
1726        dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
1727        otx2_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx);
1728
1729        /*
1730         * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1731         */
1732        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1733        nix_lso_udp_tun_tcp(req, false, true);
1734        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1735        if (rc)
1736                return rc;
1737
1738        dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
1739        otx2_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx);
1740
1741        /*
1742         * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1743         */
1744        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1745        nix_lso_udp_tun_tcp(req, false, false);
1746        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1747        if (rc)
1748                return rc;
1749
1750        dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
1751        otx2_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx);
1752
1753        /*
1754         * IPv4/TUN HDR/IPv4/TCP LSO
1755         */
1756        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1757        nix_lso_tun_tcp(req, true, true);
1758        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1759        if (rc)
1760                return rc;
1761
1762        dev->lso_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
1763        otx2_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx);
1764
1765        /*
1766         * IPv4/TUN HDR/IPv6/TCP LSO
1767         */
1768        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1769        nix_lso_tun_tcp(req, true, false);
1770        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1771        if (rc)
1772                return rc;
1773
1774        dev->lso_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
1775        otx2_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx);
1776
1777        /*
1778         * IPv6/TUN HDR/IPv4/TCP LSO
1779         */
1780        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1781        nix_lso_tun_tcp(req, false, true);
1782        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1783        if (rc)
1784                return rc;
1785
1786        dev->lso_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
1787        otx2_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx);
1788
1789        /*
1790         * IPv6/TUN HDR/IPv6/TCP LSO
1791         */
1792        req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1793        nix_lso_tun_tcp(req, false, false);
1794        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1795        if (rc)
1796                return rc;
1797
1798        dev->lso_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
1799        otx2_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx);
1800
1801        /* Save all tun formats into u64 for fast path.
1802         * Lower 32bit has non-udp tunnel formats.
1803         * Upper 32bit has udp tunnel formats.
1804         */
1805        fmt = dev->lso_tun_idx;
1806        dev->lso_tun_fmt = ((uint64_t)fmt[NIX_LSO_TUN_V4V4] |
1807                            (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 8 |
1808                            (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 16 |
1809                            (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 24);
1810
1811        fmt = dev->lso_udp_tun_idx;
1812        dev->lso_tun_fmt |= ((uint64_t)fmt[NIX_LSO_TUN_V4V4] << 32 |
1813                             (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 40 |
1814                             (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 48 |
1815                             (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 56);
1816
1817        return 0;
1818}
1819
1820static int
1821otx2_nix_configure(struct rte_eth_dev *eth_dev)
1822{
1823        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1824        struct rte_eth_dev_data *data = eth_dev->data;
1825        struct rte_eth_conf *conf = &data->dev_conf;
1826        struct rte_eth_rxmode *rxmode = &conf->rxmode;
1827        struct rte_eth_txmode *txmode = &conf->txmode;
1828        char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1829        struct rte_ether_addr *ea;
1830        uint8_t nb_rxq, nb_txq;
1831        int rc;
1832
1833        rc = -EINVAL;
1834
1835        /* Sanity checks */
1836        if (rte_eal_has_hugepages() == 0) {
1837                otx2_err("Huge page is not configured");
1838                goto fail_configure;
1839        }
1840
1841        if (conf->dcb_capability_en == 1) {
1842                otx2_err("dcb enable is not supported");
1843                goto fail_configure;
1844        }
1845
1846        if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1847                otx2_err("Flow director is not supported");
1848                goto fail_configure;
1849        }
1850
1851        if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1852            rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1853                otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1854                goto fail_configure;
1855        }
1856
1857        if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1858                otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1859                goto fail_configure;
1860        }
1861
1862        if (otx2_dev_is_Ax(dev) &&
1863            (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
1864            ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1865            (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1866                otx2_err("Outer IP and SCTP checksum unsupported");
1867                goto fail_configure;
1868        }
1869
1870        /* Free the resources allocated from the previous configure */
1871        if (dev->configured == 1) {
1872                otx2_eth_sec_fini(eth_dev);
1873                otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1874                otx2_nix_vlan_fini(eth_dev);
1875                otx2_nix_mc_addr_list_uninstall(eth_dev);
1876                otx2_flow_free_all_resources(dev);
1877                oxt2_nix_unregister_queue_irqs(eth_dev);
1878                if (eth_dev->data->dev_conf.intr_conf.rxq)
1879                        oxt2_nix_unregister_cq_irqs(eth_dev);
1880                nix_set_nop_rxtx_function(eth_dev);
1881                rc = nix_store_queue_cfg_and_then_release(eth_dev);
1882                if (rc)
1883                        goto fail_configure;
1884                otx2_nix_tm_fini(eth_dev);
1885                nix_lf_free(dev);
1886        }
1887
1888        dev->rx_offloads = rxmode->offloads;
1889        dev->tx_offloads = txmode->offloads;
1890        dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1891        dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1892        dev->rss_info.rss_grps = NIX_RSS_GRPS;
1893
1894        nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1895        nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1896
1897        /* Alloc a nix lf */
1898        rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1899        if (rc) {
1900                otx2_err("Failed to init nix_lf rc=%d", rc);
1901                goto fail_offloads;
1902        }
1903
1904        otx2_nix_err_intr_enb_dis(eth_dev, true);
1905        otx2_nix_ras_intr_enb_dis(eth_dev, true);
1906
1907        if (dev->ptp_en &&
1908            dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
1909                otx2_err("Both PTP and switch header enabled");
1910                goto free_nix_lf;
1911        }
1912
1913        rc = nix_lf_switch_header_type_enable(dev, true);
1914        if (rc) {
1915                otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
1916                goto free_nix_lf;
1917        }
1918
1919        rc = nix_setup_lso_formats(dev);
1920        if (rc) {
1921                otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1922                goto free_nix_lf;
1923        }
1924
1925        /* Configure RSS */
1926        rc = otx2_nix_rss_config(eth_dev);
1927        if (rc) {
1928                otx2_err("Failed to configure rss rc=%d", rc);
1929                goto free_nix_lf;
1930        }
1931
1932        /* Init the default TM scheduler hierarchy */
1933        rc = otx2_nix_tm_init_default(eth_dev);
1934        if (rc) {
1935                otx2_err("Failed to init traffic manager rc=%d", rc);
1936                goto free_nix_lf;
1937        }
1938
1939        rc = otx2_nix_vlan_offload_init(eth_dev);
1940        if (rc) {
1941                otx2_err("Failed to init vlan offload rc=%d", rc);
1942                goto tm_fini;
1943        }
1944
1945        /* Register queue IRQs */
1946        rc = oxt2_nix_register_queue_irqs(eth_dev);
1947        if (rc) {
1948                otx2_err("Failed to register queue interrupts rc=%d", rc);
1949                goto vlan_fini;
1950        }
1951
1952        /* Register cq IRQs */
1953        if (eth_dev->data->dev_conf.intr_conf.rxq) {
1954                if (eth_dev->data->nb_rx_queues > dev->cints) {
1955                        otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1956                                 dev->cints);
1957                        goto q_irq_fini;
1958                }
1959                /* Rx interrupt feature cannot work with vector mode because,
1960                 * vector mode doesn't process packets unless min 4 pkts are
1961                 * received, while cq interrupts are generated even for 1 pkt
1962                 * in the CQ.
1963                 */
1964                dev->scalar_ena = true;
1965
1966                rc = oxt2_nix_register_cq_irqs(eth_dev);
1967                if (rc) {
1968                        otx2_err("Failed to register CQ interrupts rc=%d", rc);
1969                        goto q_irq_fini;
1970                }
1971        }
1972
1973        /* Configure loop back mode */
1974        rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1975        if (rc) {
1976                otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1977                goto cq_fini;
1978        }
1979
1980        rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1981        if (rc) {
1982                otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1983                goto cq_fini;
1984        }
1985
1986        /* Enable security */
1987        rc = otx2_eth_sec_init(eth_dev);
1988        if (rc)
1989                goto cq_fini;
1990
1991        rc = otx2_nix_flow_ctrl_init(eth_dev);
1992        if (rc) {
1993                otx2_err("Failed to init flow ctrl mode %d", rc);
1994                goto cq_fini;
1995        }
1996
1997        rc = otx2_nix_mc_addr_list_install(eth_dev);
1998        if (rc < 0) {
1999                otx2_err("Failed to install mc address list rc=%d", rc);
2000                goto sec_fini;
2001        }
2002
2003        /*
2004         * Restore queue config when reconfigure followed by
2005         * reconfigure and no queue configure invoked from application case.
2006         */
2007        if (dev->configured == 1) {
2008                rc = nix_restore_queue_cfg(eth_dev);
2009                if (rc)
2010                        goto uninstall_mc_list;
2011        }
2012
2013        /* Update the mac address */
2014        ea = eth_dev->data->mac_addrs;
2015        memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2016        if (rte_is_zero_ether_addr(ea))
2017                rte_eth_random_addr((uint8_t *)ea);
2018
2019        rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
2020
2021        /* Apply new link configurations if changed */
2022        rc = otx2_apply_link_speed(eth_dev);
2023        if (rc) {
2024                otx2_err("Failed to set link configuration");
2025                goto uninstall_mc_list;
2026        }
2027
2028        otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
2029                " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
2030                " rx_flags=0x%x tx_flags=0x%x",
2031                eth_dev->data->port_id, ea_fmt, nb_rxq,
2032                nb_txq, dev->rx_offloads, dev->tx_offloads,
2033                dev->rx_offload_flags, dev->tx_offload_flags);
2034
2035        /* All good */
2036        dev->configured = 1;
2037        dev->configured_nb_rx_qs = data->nb_rx_queues;
2038        dev->configured_nb_tx_qs = data->nb_tx_queues;
2039        return 0;
2040
2041uninstall_mc_list:
2042        otx2_nix_mc_addr_list_uninstall(eth_dev);
2043sec_fini:
2044        otx2_eth_sec_fini(eth_dev);
2045cq_fini:
2046        oxt2_nix_unregister_cq_irqs(eth_dev);
2047q_irq_fini:
2048        oxt2_nix_unregister_queue_irqs(eth_dev);
2049vlan_fini:
2050        otx2_nix_vlan_fini(eth_dev);
2051tm_fini:
2052        otx2_nix_tm_fini(eth_dev);
2053free_nix_lf:
2054        nix_lf_free(dev);
2055fail_offloads:
2056        dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
2057        dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
2058fail_configure:
2059        dev->configured = 0;
2060        return rc;
2061}
2062
2063int
2064otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2065{
2066        struct rte_eth_dev_data *data = eth_dev->data;
2067        struct otx2_eth_txq *txq;
2068        int rc = -EINVAL;
2069
2070        txq = eth_dev->data->tx_queues[qidx];
2071
2072        if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2073                return 0;
2074
2075        rc = otx2_nix_sq_sqb_aura_fc(txq, true);
2076        if (rc) {
2077                otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
2078                         qidx, rc);
2079                goto done;
2080        }
2081
2082        data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2083
2084done:
2085        return rc;
2086}
2087
2088int
2089otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2090{
2091        struct rte_eth_dev_data *data = eth_dev->data;
2092        struct otx2_eth_txq *txq;
2093        int rc;
2094
2095        txq = eth_dev->data->tx_queues[qidx];
2096
2097        if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2098                return 0;
2099
2100        txq->fc_cache_pkts = 0;
2101
2102        rc = otx2_nix_sq_sqb_aura_fc(txq, false);
2103        if (rc) {
2104                otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
2105                         qidx, rc);
2106                goto done;
2107        }
2108
2109        data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2110
2111done:
2112        return rc;
2113}
2114
2115static int
2116otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2117{
2118        struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2119        struct rte_eth_dev_data *data = eth_dev->data;
2120        int rc;
2121
2122        if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2123                return 0;
2124
2125        rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
2126        if (rc) {
2127                otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
2128                goto done;
2129        }
2130
2131        data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2132
2133done:
2134        return rc;
2135}
2136
2137static int
2138otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2139{
2140        struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2141        struct rte_eth_dev_data *data = eth_dev->data;
2142        int rc;
2143
2144        if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2145                return 0;
2146
2147        rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
2148        if (rc) {
2149                otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
2150                goto done;
2151        }
2152
2153        data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2154
2155done:
2156        return rc;
2157}
2158
2159static int
2160otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
2161{
2162        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2163        struct rte_mbuf *rx_pkts[32];
2164        struct otx2_eth_rxq *rxq;
2165        struct rte_eth_link link;
2166        int count, i, j, rc;
2167
2168        nix_lf_switch_header_type_enable(dev, false);
2169        nix_cgx_stop_link_event(dev);
2170        npc_rx_disable(dev);
2171
2172        /* Stop rx queues and free up pkts pending */
2173        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2174                rc = otx2_nix_rx_queue_stop(eth_dev, i);
2175                if (rc)
2176                        continue;
2177
2178                rxq = eth_dev->data->rx_queues[i];
2179                count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2180                while (count) {
2181                        for (j = 0; j < count; j++)
2182                                rte_pktmbuf_free(rx_pkts[j]);
2183                        count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2184                }
2185        }
2186
2187        /* Stop tx queues  */
2188        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2189                otx2_nix_tx_queue_stop(eth_dev, i);
2190
2191        /* Bring down link status internally */
2192        memset(&link, 0, sizeof(link));
2193        rte_eth_linkstatus_set(eth_dev, &link);
2194
2195        return 0;
2196}
2197
2198static int
2199otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
2200{
2201        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2202        int rc, i;
2203
2204        /* MTU recalculate should be avoided here if PTP is enabled by PF, as
2205         * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
2206         * call below.
2207         */
2208        if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
2209                rc = otx2_nix_recalc_mtu(eth_dev);
2210                if (rc)
2211                        return rc;
2212        }
2213
2214        /* Start rx queues */
2215        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2216                rc = otx2_nix_rx_queue_start(eth_dev, i);
2217                if (rc)
2218                        return rc;
2219        }
2220
2221        /* Start tx queues  */
2222        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2223                rc = otx2_nix_tx_queue_start(eth_dev, i);
2224                if (rc)
2225                        return rc;
2226        }
2227
2228        rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
2229        if (rc) {
2230                otx2_err("Failed to update flow ctrl mode %d", rc);
2231                return rc;
2232        }
2233
2234        /* Enable PTP if it was requested by the app or if it is already
2235         * enabled in PF owning this VF
2236         */
2237        memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
2238        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
2239            otx2_ethdev_is_ptp_en(dev))
2240                otx2_nix_timesync_enable(eth_dev);
2241        else
2242                otx2_nix_timesync_disable(eth_dev);
2243
2244        /* Update VF about data off shifted by 8 bytes if PTP already
2245         * enabled in PF owning this VF
2246         */
2247        if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
2248                otx2_nix_ptp_enable_vf(eth_dev);
2249
2250        if (dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F) {
2251                rc = rte_mbuf_dyn_rx_timestamp_register(
2252                                &dev->tstamp.tstamp_dynfield_offset,
2253                                &dev->tstamp.rx_tstamp_dynflag);
2254                if (rc != 0) {
2255                        otx2_err("Failed to register Rx timestamp field/flag");
2256                        return -rte_errno;
2257                }
2258        }
2259
2260        rc = npc_rx_enable(dev);
2261        if (rc) {
2262                otx2_err("Failed to enable NPC rx %d", rc);
2263                return rc;
2264        }
2265
2266        otx2_nix_toggle_flag_link_cfg(dev, true);
2267
2268        rc = nix_cgx_start_link_event(dev);
2269        if (rc) {
2270                otx2_err("Failed to start cgx link event %d", rc);
2271                goto rx_disable;
2272        }
2273
2274        otx2_nix_toggle_flag_link_cfg(dev, false);
2275        otx2_eth_set_tx_function(eth_dev);
2276        otx2_eth_set_rx_function(eth_dev);
2277
2278        return 0;
2279
2280rx_disable:
2281        npc_rx_disable(dev);
2282        otx2_nix_toggle_flag_link_cfg(dev, false);
2283        return rc;
2284}
2285
2286static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
2287static int otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
2288
2289/* Initialize and register driver with DPDK Application */
2290static const struct eth_dev_ops otx2_eth_dev_ops = {
2291        .dev_infos_get            = otx2_nix_info_get,
2292        .dev_configure            = otx2_nix_configure,
2293        .link_update              = otx2_nix_link_update,
2294        .tx_queue_setup           = otx2_nix_tx_queue_setup,
2295        .tx_queue_release         = otx2_nix_tx_queue_release,
2296        .tm_ops_get               = otx2_nix_tm_ops_get,
2297        .rx_queue_setup           = otx2_nix_rx_queue_setup,
2298        .rx_queue_release         = otx2_nix_rx_queue_release,
2299        .dev_start                = otx2_nix_dev_start,
2300        .dev_stop                 = otx2_nix_dev_stop,
2301        .dev_close                = otx2_nix_dev_close,
2302        .tx_queue_start           = otx2_nix_tx_queue_start,
2303        .tx_queue_stop            = otx2_nix_tx_queue_stop,
2304        .rx_queue_start           = otx2_nix_rx_queue_start,
2305        .rx_queue_stop            = otx2_nix_rx_queue_stop,
2306        .dev_set_link_up          = otx2_nix_dev_set_link_up,
2307        .dev_set_link_down        = otx2_nix_dev_set_link_down,
2308        .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
2309        .dev_ptypes_set           = otx2_nix_ptypes_set,
2310        .dev_reset                = otx2_nix_dev_reset,
2311        .stats_get                = otx2_nix_dev_stats_get,
2312        .stats_reset              = otx2_nix_dev_stats_reset,
2313        .get_reg                  = otx2_nix_dev_get_reg,
2314        .mtu_set                  = otx2_nix_mtu_set,
2315        .mac_addr_add             = otx2_nix_mac_addr_add,
2316        .mac_addr_remove          = otx2_nix_mac_addr_del,
2317        .mac_addr_set             = otx2_nix_mac_addr_set,
2318        .set_mc_addr_list         = otx2_nix_set_mc_addr_list,
2319        .promiscuous_enable       = otx2_nix_promisc_enable,
2320        .promiscuous_disable      = otx2_nix_promisc_disable,
2321        .allmulticast_enable      = otx2_nix_allmulticast_enable,
2322        .allmulticast_disable     = otx2_nix_allmulticast_disable,
2323        .queue_stats_mapping_set  = otx2_nix_queue_stats_mapping,
2324        .reta_update              = otx2_nix_dev_reta_update,
2325        .reta_query               = otx2_nix_dev_reta_query,
2326        .rss_hash_update          = otx2_nix_rss_hash_update,
2327        .rss_hash_conf_get        = otx2_nix_rss_hash_conf_get,
2328        .xstats_get               = otx2_nix_xstats_get,
2329        .xstats_get_names         = otx2_nix_xstats_get_names,
2330        .xstats_reset             = otx2_nix_xstats_reset,
2331        .xstats_get_by_id         = otx2_nix_xstats_get_by_id,
2332        .xstats_get_names_by_id   = otx2_nix_xstats_get_names_by_id,
2333        .rxq_info_get             = otx2_nix_rxq_info_get,
2334        .txq_info_get             = otx2_nix_txq_info_get,
2335        .rx_burst_mode_get        = otx2_rx_burst_mode_get,
2336        .tx_burst_mode_get        = otx2_tx_burst_mode_get,
2337        .tx_done_cleanup          = otx2_nix_tx_done_cleanup,
2338        .set_queue_rate_limit     = otx2_nix_tm_set_queue_rate_limit,
2339        .pool_ops_supported       = otx2_nix_pool_ops_supported,
2340        .flow_ops_get             = otx2_nix_dev_flow_ops_get,
2341        .get_module_info          = otx2_nix_get_module_info,
2342        .get_module_eeprom        = otx2_nix_get_module_eeprom,
2343        .fw_version_get           = otx2_nix_fw_version_get,
2344        .flow_ctrl_get            = otx2_nix_flow_ctrl_get,
2345        .flow_ctrl_set            = otx2_nix_flow_ctrl_set,
2346        .timesync_enable          = otx2_nix_timesync_enable,
2347        .timesync_disable         = otx2_nix_timesync_disable,
2348        .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
2349        .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
2350        .timesync_adjust_time     = otx2_nix_timesync_adjust_time,
2351        .timesync_read_time       = otx2_nix_timesync_read_time,
2352        .timesync_write_time      = otx2_nix_timesync_write_time,
2353        .vlan_offload_set         = otx2_nix_vlan_offload_set,
2354        .vlan_filter_set          = otx2_nix_vlan_filter_set,
2355        .vlan_strip_queue_set     = otx2_nix_vlan_strip_queue_set,
2356        .vlan_tpid_set            = otx2_nix_vlan_tpid_set,
2357        .vlan_pvid_set            = otx2_nix_vlan_pvid_set,
2358        .rx_queue_intr_enable     = otx2_nix_rx_queue_intr_enable,
2359        .rx_queue_intr_disable    = otx2_nix_rx_queue_intr_disable,
2360        .read_clock               = otx2_nix_read_clock,
2361};
2362
2363static inline int
2364nix_lf_attach(struct otx2_eth_dev *dev)
2365{
2366        struct otx2_mbox *mbox = dev->mbox;
2367        struct rsrc_attach_req *req;
2368
2369        /* Attach NIX(lf) */
2370        req = otx2_mbox_alloc_msg_attach_resources(mbox);
2371        req->modify = true;
2372        req->nixlf = true;
2373
2374        return otx2_mbox_process(mbox);
2375}
2376
2377static inline int
2378nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
2379{
2380        struct otx2_mbox *mbox = dev->mbox;
2381        struct msix_offset_rsp *msix_rsp;
2382        int rc;
2383
2384        /* Get NPA and NIX MSIX vector offsets */
2385        otx2_mbox_alloc_msg_msix_offset(mbox);
2386
2387        rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
2388
2389        dev->nix_msixoff = msix_rsp->nix_msixoff;
2390
2391        return rc;
2392}
2393
2394static inline int
2395otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
2396{
2397        struct rsrc_detach_req *req;
2398
2399        req = otx2_mbox_alloc_msg_detach_resources(mbox);
2400
2401        /* Detach all except npa lf */
2402        req->partial = true;
2403        req->nixlf = true;
2404        req->sso = true;
2405        req->ssow = true;
2406        req->timlfs = true;
2407        req->cptlfs = true;
2408
2409        return otx2_mbox_process(mbox);
2410}
2411
2412static bool
2413otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
2414{
2415        if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
2416            pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2417                return true;
2418        return false;
2419}
2420
2421static inline uint64_t
2422nix_get_blkaddr(struct otx2_eth_dev *dev)
2423{
2424        uint64_t reg;
2425
2426        /* Reading the discovery register to know which NIX is the LF
2427         * attached to.
2428         */
2429        reg = otx2_read64(dev->bar2 +
2430                          RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0));
2431
2432        return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1;
2433}
2434
2435static int
2436otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
2437{
2438        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2439        struct rte_pci_device *pci_dev;
2440        int rc, max_entries;
2441
2442        eth_dev->dev_ops = &otx2_eth_dev_ops;
2443        eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
2444        eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
2445        eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
2446
2447        /* For secondary processes, the primary has done all the work */
2448        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2449                /* Setup callbacks for secondary process */
2450                otx2_eth_set_tx_function(eth_dev);
2451                otx2_eth_set_rx_function(eth_dev);
2452                return 0;
2453        }
2454
2455        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2456
2457        rte_eth_copy_pci_info(eth_dev, pci_dev);
2458        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2459
2460        /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
2461        memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
2462                offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
2463
2464        /* Parse devargs string */
2465        rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
2466        if (rc) {
2467                otx2_err("Failed to parse devargs rc=%d", rc);
2468                goto error;
2469        }
2470
2471        if (!dev->mbox_active) {
2472                /* Initialize the base otx2_dev object
2473                 * only if already present
2474                 */
2475                rc = otx2_dev_init(pci_dev, dev);
2476                if (rc) {
2477                        otx2_err("Failed to initialize otx2_dev rc=%d", rc);
2478                        goto error;
2479                }
2480        }
2481        if (otx2_eth_dev_is_sdp(pci_dev))
2482                dev->sdp_link = true;
2483        else
2484                dev->sdp_link = false;
2485        /* Device generic callbacks */
2486        dev->ops = &otx2_dev_ops;
2487        dev->eth_dev = eth_dev;
2488
2489        /* Grab the NPA LF if required */
2490        rc = otx2_npa_lf_init(pci_dev, dev);
2491        if (rc)
2492                goto otx2_dev_uninit;
2493
2494        dev->configured = 0;
2495        dev->drv_inited = true;
2496        dev->ptype_disable = 0;
2497        dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2498
2499        /* Attach NIX LF */
2500        rc = nix_lf_attach(dev);
2501        if (rc)
2502                goto otx2_npa_uninit;
2503
2504        dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20);
2505
2506        /* Get NIX MSIX offset */
2507        rc = nix_lf_get_msix_offset(dev);
2508        if (rc)
2509                goto otx2_npa_uninit;
2510
2511        /* Register LF irq handlers */
2512        rc = otx2_nix_register_irqs(eth_dev);
2513        if (rc)
2514                goto mbox_detach;
2515
2516        /* Get maximum number of supported MAC entries */
2517        max_entries = otx2_cgx_mac_max_entries_get(dev);
2518        if (max_entries < 0) {
2519                otx2_err("Failed to get max entries for mac addr");
2520                rc = -ENOTSUP;
2521                goto unregister_irq;
2522        }
2523
2524        /* For VFs, returned max_entries will be 0. But to keep default MAC
2525         * address, one entry must be allocated. So setting up to 1.
2526         */
2527        if (max_entries == 0)
2528                max_entries = 1;
2529
2530        eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2531                                               RTE_ETHER_ADDR_LEN, 0);
2532        if (eth_dev->data->mac_addrs == NULL) {
2533                otx2_err("Failed to allocate memory for mac addr");
2534                rc = -ENOMEM;
2535                goto unregister_irq;
2536        }
2537
2538        dev->max_mac_entries = max_entries;
2539
2540        rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2541        if (rc)
2542                goto free_mac_addrs;
2543
2544        /* Update the mac address */
2545        memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2546
2547        /* Also sync same MAC address to CGX table */
2548        otx2_cgx_mac_addr_set(eth_dev, &eth_dev->data->mac_addrs[0]);
2549
2550        /* Initialize the tm data structures */
2551        otx2_nix_tm_conf_init(eth_dev);
2552
2553        dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2554        dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2555
2556        if (otx2_dev_is_96xx_A0(dev) ||
2557            otx2_dev_is_95xx_Ax(dev)) {
2558                dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2559                dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2560        }
2561
2562        /* Create security ctx */
2563        rc = otx2_eth_sec_ctx_create(eth_dev);
2564        if (rc)
2565                goto free_mac_addrs;
2566        dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
2567        dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
2568
2569        /* Initialize rte-flow */
2570        rc = otx2_flow_init(dev);
2571        if (rc)
2572                goto sec_ctx_destroy;
2573
2574        otx2_nix_mc_filter_init(dev);
2575
2576        otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2577                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2578                     eth_dev->data->port_id, dev->pf, dev->vf,
2579                     OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2580                     dev->rx_offload_capa, dev->tx_offload_capa);
2581        return 0;
2582
2583sec_ctx_destroy:
2584        otx2_eth_sec_ctx_destroy(eth_dev);
2585free_mac_addrs:
2586        rte_free(eth_dev->data->mac_addrs);
2587unregister_irq:
2588        otx2_nix_unregister_irqs(eth_dev);
2589mbox_detach:
2590        otx2_eth_dev_lf_detach(dev->mbox);
2591otx2_npa_uninit:
2592        otx2_npa_lf_fini();
2593otx2_dev_uninit:
2594        otx2_dev_fini(pci_dev, dev);
2595error:
2596        otx2_err("Failed to init nix eth_dev rc=%d", rc);
2597        return rc;
2598}
2599
2600static int
2601otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2602{
2603        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2604        struct rte_pci_device *pci_dev;
2605        int rc, i;
2606
2607        /* Nothing to be done for secondary processes */
2608        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2609                return 0;
2610
2611        /* Clear the flag since we are closing down */
2612        dev->configured = 0;
2613
2614        /* Disable nix bpid config */
2615        otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2616
2617        npc_rx_disable(dev);
2618
2619        /* Disable vlan offloads */
2620        otx2_nix_vlan_fini(eth_dev);
2621
2622        /* Disable other rte_flow entries */
2623        otx2_flow_fini(dev);
2624
2625        /* Free multicast filter list */
2626        otx2_nix_mc_filter_fini(dev);
2627
2628        /* Disable PTP if already enabled */
2629        if (otx2_ethdev_is_ptp_en(dev))
2630                otx2_nix_timesync_disable(eth_dev);
2631
2632        nix_cgx_stop_link_event(dev);
2633
2634        /* Unregister the dev ops, this is required to stop VFs from
2635         * receiving link status updates on exit path.
2636         */
2637        dev->ops = NULL;
2638
2639        /* Free up SQs */
2640        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2641                otx2_nix_tx_queue_release(eth_dev, i);
2642        eth_dev->data->nb_tx_queues = 0;
2643
2644        /* Free up RQ's and CQ's */
2645        for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
2646                otx2_nix_rx_queue_release(eth_dev, i);
2647        eth_dev->data->nb_rx_queues = 0;
2648
2649        /* Free tm resources */
2650        rc = otx2_nix_tm_fini(eth_dev);
2651        if (rc)
2652                otx2_err("Failed to cleanup tm, rc=%d", rc);
2653
2654        /* Unregister queue irqs */
2655        oxt2_nix_unregister_queue_irqs(eth_dev);
2656
2657        /* Unregister cq irqs */
2658        if (eth_dev->data->dev_conf.intr_conf.rxq)
2659                oxt2_nix_unregister_cq_irqs(eth_dev);
2660
2661        rc = nix_lf_free(dev);
2662        if (rc)
2663                otx2_err("Failed to free nix lf, rc=%d", rc);
2664
2665        rc = otx2_npa_lf_fini();
2666        if (rc)
2667                otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2668
2669        /* Disable security */
2670        otx2_eth_sec_fini(eth_dev);
2671
2672        /* Destroy security ctx */
2673        otx2_eth_sec_ctx_destroy(eth_dev);
2674
2675        rte_free(eth_dev->data->mac_addrs);
2676        eth_dev->data->mac_addrs = NULL;
2677        dev->drv_inited = false;
2678
2679        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2680        otx2_nix_unregister_irqs(eth_dev);
2681
2682        rc = otx2_eth_dev_lf_detach(dev->mbox);
2683        if (rc)
2684                otx2_err("Failed to detach resources, rc=%d", rc);
2685
2686        /* Check if mbox close is needed */
2687        if (!mbox_close)
2688                return 0;
2689
2690        if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2691                /* Will be freed later by PMD */
2692                eth_dev->data->dev_private = NULL;
2693                return 0;
2694        }
2695
2696        otx2_dev_fini(pci_dev, dev);
2697        return 0;
2698}
2699
2700static int
2701otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2702{
2703        otx2_eth_dev_uninit(eth_dev, true);
2704        return 0;
2705}
2706
2707static int
2708otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2709{
2710        int rc;
2711
2712        rc = otx2_eth_dev_uninit(eth_dev, false);
2713        if (rc)
2714                return rc;
2715
2716        return otx2_eth_dev_init(eth_dev);
2717}
2718
2719static int
2720nix_remove(struct rte_pci_device *pci_dev)
2721{
2722        struct rte_eth_dev *eth_dev;
2723        struct otx2_idev_cfg *idev;
2724        struct otx2_dev *otx2_dev;
2725        int rc;
2726
2727        eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2728        if (eth_dev) {
2729                /* Cleanup eth dev */
2730                rc = otx2_eth_dev_uninit(eth_dev, true);
2731                if (rc)
2732                        return rc;
2733
2734                rte_eth_dev_release_port(eth_dev);
2735        }
2736
2737        /* Nothing to be done for secondary processes */
2738        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2739                return 0;
2740
2741        /* Check for common resources */
2742        idev = otx2_intra_dev_get_cfg();
2743        if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2744                return 0;
2745
2746        otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2747
2748        if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2749                goto exit;
2750
2751        /* Safe to cleanup mbox as no more users */
2752        otx2_dev_fini(pci_dev, otx2_dev);
2753        rte_free(otx2_dev);
2754        return 0;
2755
2756exit:
2757        otx2_info("%s: common resource in use by other devices", pci_dev->name);
2758        return -EAGAIN;
2759}
2760
2761static int
2762nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2763{
2764        int rc;
2765
2766        RTE_SET_USED(pci_drv);
2767
2768        rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2769                                           otx2_eth_dev_init);
2770
2771        /* On error on secondary, recheck if port exists in primary or
2772         * in mid of detach state.
2773         */
2774        if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2775                if (!rte_eth_dev_allocated(pci_dev->device.name))
2776                        return 0;
2777        return rc;
2778}
2779
2780static const struct rte_pci_id pci_nix_map[] = {
2781        {
2782                RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2783        },
2784        {
2785                RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2786        },
2787        {
2788                RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2789                               PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2790        },
2791        {
2792                RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2793                               PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
2794        },
2795        {
2796                RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2797                               PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2798        },
2799        {
2800                .vendor_id = 0,
2801        },
2802};
2803
2804static struct rte_pci_driver pci_nix = {
2805        .id_table = pci_nix_map,
2806        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2807                        RTE_PCI_DRV_INTR_LSC,
2808        .probe = nix_probe,
2809        .remove = nix_remove,
2810};
2811
2812RTE_PMD_REGISTER_PCI(OCTEONTX2_PMD, pci_nix);
2813RTE_PMD_REGISTER_PCI_TABLE(OCTEONTX2_PMD, pci_nix_map);
2814RTE_PMD_REGISTER_KMOD_DEP(OCTEONTX2_PMD, "vfio-pci");
2815