dpdk/drivers/net/cnxk/cn9k_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4#include "cn9k_ethdev.h"
   5#include "cn9k_rte_flow.h"
   6#include "cn9k_rx.h"
   7#include "cn9k_tx.h"
   8
   9static uint16_t
  10nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
  11{
  12        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
  13        struct rte_eth_dev_data *data = eth_dev->data;
  14        struct rte_eth_conf *conf = &data->dev_conf;
  15        struct rte_eth_rxmode *rxmode = &conf->rxmode;
  16        uint16_t flags = 0;
  17
  18        if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
  19            (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
  20                flags |= NIX_RX_OFFLOAD_RSS_F;
  21
  22        if (dev->rx_offloads &
  23            (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
  24                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
  25
  26        if (dev->rx_offloads &
  27            (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
  28                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
  29
  30        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
  31                flags |= NIX_RX_MULTI_SEG_F;
  32
  33        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
  34                flags |= NIX_RX_OFFLOAD_TSTAMP_F;
  35
  36        if (!dev->ptype_disable)
  37                flags |= NIX_RX_OFFLOAD_PTYPE_F;
  38
  39        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
  40                flags |= NIX_RX_OFFLOAD_SECURITY_F;
  41
  42        return flags;
  43}
  44
  45static uint16_t
  46nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
  47{
  48        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
  49        uint64_t conf = dev->tx_offloads;
  50        uint16_t flags = 0;
  51
  52        /* Fastpath is dependent on these enums */
  53        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
  54        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
  55        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
  56        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
  57        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
  58        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
  59        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
  60        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
  61        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
  62        RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
  63        RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
  64        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
  65        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
  66        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
  67                         offsetof(struct rte_mbuf, buf_iova) + 8);
  68        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
  69                         offsetof(struct rte_mbuf, buf_iova) + 16);
  70        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
  71                         offsetof(struct rte_mbuf, ol_flags) + 12);
  72        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
  73                         offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
  74
  75        if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
  76            conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
  77                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
  78
  79        if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
  80            conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
  81                flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
  82
  83        if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
  84            conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
  85            conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
  86                flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
  87
  88        if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
  89                flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
  90
  91        if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
  92                flags |= NIX_TX_MULTI_SEG_F;
  93
  94        /* Enable Inner checksum for TSO */
  95        if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
  96                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
  97
  98        /* Enable Inner and Outer checksum for Tunnel TSO */
  99        if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 100                    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 101                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 102                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 103
 104        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 105                flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 106
 107        if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 108                flags |= NIX_TX_OFFLOAD_SECURITY_F;
 109
 110        return flags;
 111}
 112
 113static int
 114cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
 115{
 116        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 117
 118        if (ptype_mask) {
 119                dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
 120                dev->ptype_disable = 0;
 121        } else {
 122                dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
 123                dev->ptype_disable = 1;
 124        }
 125
 126        cn9k_eth_set_rx_function(eth_dev);
 127        return 0;
 128}
 129
 130static void
 131nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
 132                      uint16_t qid)
 133{
 134        struct nix_send_ext_s *send_hdr_ext;
 135        struct nix_send_hdr_s *send_hdr;
 136        struct nix_send_mem_s *send_mem;
 137        union nix_send_sg_s *sg;
 138
 139        /* Initialize the fields based on basic single segment packet */
 140        memset(&txq->cmd, 0, sizeof(txq->cmd));
 141
 142        if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
 143                send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
 144                /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
 145                send_hdr->w0.sizem1 = 2;
 146
 147                send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
 148                send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
 149                if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
 150                        /* Default: one seg packet would have:
 151                         * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
 152                         * => 8/2 - 1 = 3
 153                         */
 154                        send_hdr->w0.sizem1 = 3;
 155                        send_hdr_ext->w0.tstmp = 1;
 156
 157                        /* To calculate the offset for send_mem,
 158                         * send_hdr->w0.sizem1 * 2
 159                         */
 160                        send_mem = (struct nix_send_mem_s *)
 161                                (txq->cmd + (send_hdr->w0.sizem1 << 1));
 162                        send_mem->w0.cn9k.subdc = NIX_SUBDC_MEM;
 163                        send_mem->w0.cn9k.alg = NIX_SENDMEMALG_SETTSTMP;
 164                        send_mem->addr = dev->tstamp.tx_tstamp_iova;
 165                }
 166                sg = (union nix_send_sg_s *)&txq->cmd[4];
 167        } else {
 168                send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
 169                /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
 170                send_hdr->w0.sizem1 = 1;
 171                sg = (union nix_send_sg_s *)&txq->cmd[2];
 172        }
 173
 174        send_hdr->w0.sq = qid;
 175        sg->subdc = NIX_SUBDC_SG;
 176        sg->segs = 1;
 177        sg->ld_type = NIX_SENDLDTYPE_LDD;
 178
 179        rte_wmb();
 180}
 181
 182static int
 183cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 184                        uint16_t nb_desc, unsigned int socket,
 185                        const struct rte_eth_txconf *tx_conf)
 186{
 187        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 188        struct roc_cpt_lf *inl_lf;
 189        struct cn9k_eth_txq *txq;
 190        struct roc_nix_sq *sq;
 191        uint16_t crypto_qid;
 192        int rc;
 193
 194        RTE_SET_USED(socket);
 195
 196        /* Common Tx queue setup */
 197        rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
 198                                     sizeof(struct cn9k_eth_txq), tx_conf);
 199        if (rc)
 200                return rc;
 201
 202        sq = &dev->sqs[qid];
 203        /* Update fast path queue */
 204        txq = eth_dev->data->tx_queues[qid];
 205        txq->fc_mem = sq->fc;
 206        txq->lmt_addr = sq->lmt_addr;
 207        txq->io_addr = sq->io_addr;
 208        txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 209        txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
 210
 211        /* Fetch CPT LF info for outbound if present */
 212        if (dev->outb.lf_base) {
 213                crypto_qid = qid % dev->outb.nb_crypto_qs;
 214                inl_lf = dev->outb.lf_base + crypto_qid;
 215
 216                txq->cpt_io_addr = inl_lf->io_addr;
 217                txq->cpt_fc = inl_lf->fc_addr;
 218                txq->cpt_desc = inl_lf->nb_desc * 0.7;
 219                txq->sa_base = (uint64_t)dev->outb.sa_base;
 220                txq->sa_base |= eth_dev->data->port_id;
 221                PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
 222        }
 223
 224        nix_form_default_desc(dev, txq, qid);
 225        txq->lso_tun_fmt = dev->lso_tun_fmt;
 226        return 0;
 227}
 228
 229static int
 230cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 231                        uint16_t nb_desc, unsigned int socket,
 232                        const struct rte_eth_rxconf *rx_conf,
 233                        struct rte_mempool *mp)
 234{
 235        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 236        struct cn9k_eth_rxq *rxq;
 237        struct roc_nix_rq *rq;
 238        struct roc_nix_cq *cq;
 239        int rc;
 240
 241        RTE_SET_USED(socket);
 242
 243        /* CQ Errata needs min 4K ring */
 244        if (dev->cq_min_4k && nb_desc < 4096)
 245                nb_desc = 4096;
 246
 247        /* Common Rx queue setup */
 248        rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
 249                                     sizeof(struct cn9k_eth_rxq), rx_conf, mp);
 250        if (rc)
 251                return rc;
 252
 253        rq = &dev->rqs[qid];
 254        cq = &dev->cqs[qid];
 255
 256        /* Update fast path queue */
 257        rxq = eth_dev->data->rx_queues[qid];
 258        rxq->rq = qid;
 259        rxq->desc = (uintptr_t)cq->desc_base;
 260        rxq->cq_door = cq->door;
 261        rxq->cq_status = cq->status;
 262        rxq->wdata = cq->wdata;
 263        rxq->head = cq->head;
 264        rxq->qmask = cq->qmask;
 265        rxq->tstamp = &dev->tstamp;
 266
 267        /* Data offset from data to start of mbuf is first_skip */
 268        rxq->data_off = rq->first_skip;
 269        rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
 270
 271        /* Lookup mem */
 272        rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
 273        return 0;
 274}
 275
 276static int
 277cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 278{
 279        struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
 280        int rc;
 281
 282        rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
 283        if (rc)
 284                return rc;
 285
 286        /* Clear fc cache pkts to trigger worker stop */
 287        txq->fc_cache_pkts = 0;
 288        return 0;
 289}
 290
 291static int
 292cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 293{
 294        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 295        struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
 296        struct rte_eth_txmode *txmode = &conf->txmode;
 297        int rc;
 298
 299        /* Platform specific checks */
 300        if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
 301            (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 302            ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 303             (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 304                plt_err("Outer IP and SCTP checksum unsupported");
 305                return -EINVAL;
 306        }
 307
 308        /* Common nix configure */
 309        rc = cnxk_nix_configure(eth_dev);
 310        if (rc)
 311                return rc;
 312
 313        /* Update offload flags */
 314        dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
 315        dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
 316
 317        plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
 318                    " tx_offload_flags=0x%x",
 319                    eth_dev->data->port_id, dev->rx_offload_flags,
 320                    dev->tx_offload_flags);
 321        return 0;
 322}
 323
 324/* Function to enable ptp config for VFs */
 325static void
 326nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
 327{
 328        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 329
 330        if (nix_recalc_mtu(eth_dev))
 331                plt_err("Failed to set MTU size for ptp");
 332
 333        dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 334
 335        /* Setting up the function pointers as per new offload flags */
 336        cn9k_eth_set_rx_function(eth_dev);
 337        cn9k_eth_set_tx_function(eth_dev);
 338}
 339
 340static uint16_t
 341nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
 342{
 343        struct cn9k_eth_rxq *rxq = queue;
 344        struct cnxk_eth_rxq_sp *rxq_sp;
 345        struct rte_eth_dev *eth_dev;
 346
 347        RTE_SET_USED(mbufs);
 348        RTE_SET_USED(pkts);
 349
 350        rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 351        eth_dev = rxq_sp->dev->eth_dev;
 352        nix_ptp_enable_vf(eth_dev);
 353
 354        return 0;
 355}
 356
 357static int
 358cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
 359{
 360        struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
 361        struct rte_eth_dev *eth_dev;
 362        struct cn9k_eth_rxq *rxq;
 363        int i;
 364
 365        if (!dev)
 366                return -EINVAL;
 367
 368        eth_dev = dev->eth_dev;
 369        if (!eth_dev)
 370                return -EINVAL;
 371
 372        dev->ptp_en = ptp_en;
 373
 374        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 375                rxq = eth_dev->data->rx_queues[i];
 376                rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
 377        }
 378
 379        if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
 380            !(roc_nix_is_lbk(nix))) {
 381                /* In case of VF, setting of MTU cannot be done directly in this
 382                 * function as this is running as part of MBOX request(PF->VF)
 383                 * and MTU setting also requires MBOX message to be
 384                 * sent(VF->PF)
 385                 */
 386                eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
 387                rte_mb();
 388        }
 389
 390        return 0;
 391}
 392
 393static int
 394cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 395{
 396        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 397        int i, rc;
 398
 399        rc = cnxk_nix_timesync_enable(eth_dev);
 400        if (rc)
 401                return rc;
 402
 403        dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 404        dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 405
 406        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
 407                nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
 408
 409        /* Setting up the rx[tx]_offload_flags due to change
 410         * in rx[tx]_offloads.
 411         */
 412        cn9k_eth_set_rx_function(eth_dev);
 413        cn9k_eth_set_tx_function(eth_dev);
 414        return 0;
 415}
 416
 417static int
 418cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 419{
 420        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 421        int i, rc;
 422
 423        rc = cnxk_nix_timesync_disable(eth_dev);
 424        if (rc)
 425                return rc;
 426
 427        dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 428        dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 429
 430        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
 431                nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
 432
 433        /* Setting up the rx[tx]_offload_flags due to change
 434         * in rx[tx]_offloads.
 435         */
 436        cn9k_eth_set_rx_function(eth_dev);
 437        cn9k_eth_set_tx_function(eth_dev);
 438        return 0;
 439}
 440
 441static int
 442cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
 443{
 444        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 445        struct roc_nix *nix = &dev->nix;
 446        int rc;
 447
 448        /* Common eth dev start */
 449        rc = cnxk_nix_dev_start(eth_dev);
 450        if (rc)
 451                return rc;
 452
 453        /* Update VF about data off shifted by 8 bytes if PTP already
 454         * enabled in PF owning this VF
 455         */
 456        if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
 457                nix_ptp_enable_vf(eth_dev);
 458
 459        /* Setting up the rx[tx]_offload_flags due to change
 460         * in rx[tx]_offloads.
 461         */
 462        dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
 463        dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
 464
 465        cn9k_eth_set_tx_function(eth_dev);
 466        cn9k_eth_set_rx_function(eth_dev);
 467        return 0;
 468}
 469
 470/* Update platform specific eth dev ops */
 471static void
 472nix_eth_dev_ops_override(void)
 473{
 474        static int init_once;
 475
 476        if (init_once)
 477                return;
 478        init_once = 1;
 479
 480        /* Update platform specific ops */
 481        cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
 482        cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
 483        cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
 484        cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
 485        cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
 486        cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
 487        cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable;
 488        cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable;
 489        cnxk_eth_dev_ops.mtr_ops_get = NULL;
 490}
 491
 492static void
 493npc_flow_ops_override(void)
 494{
 495        static int init_once;
 496
 497        if (init_once)
 498                return;
 499        init_once = 1;
 500
 501        /* Update platform specific ops */
 502        cnxk_flow_ops.create = cn9k_flow_create;
 503        cnxk_flow_ops.destroy = cn9k_flow_destroy;
 504}
 505
 506static int
 507cn9k_nix_remove(struct rte_pci_device *pci_dev)
 508{
 509        return cnxk_nix_remove(pci_dev);
 510}
 511
 512static int
 513cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 514{
 515        struct rte_eth_dev *eth_dev;
 516        struct cnxk_eth_dev *dev;
 517        int rc;
 518
 519        if (RTE_CACHE_LINE_SIZE != 128) {
 520                plt_err("Driver not compiled for CN9K");
 521                return -EFAULT;
 522        }
 523
 524        rc = roc_plt_init();
 525        if (rc) {
 526                plt_err("Failed to initialize platform model, rc=%d", rc);
 527                return rc;
 528        }
 529
 530        nix_eth_dev_ops_override();
 531        npc_flow_ops_override();
 532
 533        cn9k_eth_sec_ops_override();
 534
 535        /* Common probe */
 536        rc = cnxk_nix_probe(pci_drv, pci_dev);
 537        if (rc)
 538                return rc;
 539
 540        /* Find eth dev allocated */
 541        eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
 542        if (!eth_dev)
 543                return -ENOENT;
 544
 545        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 546                /* Setup callbacks for secondary process */
 547                cn9k_eth_set_tx_function(eth_dev);
 548                cn9k_eth_set_rx_function(eth_dev);
 549                return 0;
 550        }
 551
 552        dev = cnxk_eth_pmd_priv(eth_dev);
 553        /* Update capabilities already set for TSO.
 554         * TSO not supported for earlier chip revisions
 555         */
 556        if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
 557                dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
 558                                          RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 559                                          RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
 560                                          RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 561
 562        /* 50G and 100G to be supported for board version C0
 563         * and above of CN9K.
 564         */
 565        if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
 566                dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
 567                dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 568        }
 569
 570        dev->hwcap = 0;
 571
 572        /* Register up msg callbacks for PTP information */
 573        roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
 574
 575        /* Update HW erratas */
 576        if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
 577                dev->cq_min_4k = 1;
 578        return 0;
 579}
 580
 581static const struct rte_pci_id cn9k_pci_nix_map[] = {
 582        {
 583                .vendor_id = 0,
 584        },
 585};
 586
 587static struct rte_pci_driver cn9k_pci_nix = {
 588        .id_table = cn9k_pci_nix_map,
 589        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
 590                     RTE_PCI_DRV_INTR_LSC,
 591        .probe = cn9k_nix_probe,
 592        .remove = cn9k_nix_remove,
 593};
 594
 595RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
 596RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
 597RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");
 598