dpdk/drivers/net/cnxk/cn9k_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4#include "cn9k_ethdev.h"
   5#include "cn9k_flow.h"
   6#include "cn9k_rx.h"
   7#include "cn9k_tx.h"
   8
   9static uint16_t
  10nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
  11{
  12        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
  13        struct rte_eth_dev_data *data = eth_dev->data;
  14        struct rte_eth_conf *conf = &data->dev_conf;
  15        struct rte_eth_rxmode *rxmode = &conf->rxmode;
  16        uint16_t flags = 0;
  17
  18        if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
  19            (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
  20                flags |= NIX_RX_OFFLOAD_RSS_F;
  21
  22        if (dev->rx_offloads &
  23            (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
  24                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
  25
  26        if (dev->rx_offloads &
  27            (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
  28                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
  29
  30        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
  31                flags |= NIX_RX_MULTI_SEG_F;
  32
  33        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
  34                flags |= NIX_RX_OFFLOAD_TSTAMP_F;
  35
  36        if (!dev->ptype_disable)
  37                flags |= NIX_RX_OFFLOAD_PTYPE_F;
  38
  39        if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
  40                flags |= NIX_RX_OFFLOAD_SECURITY_F;
  41
  42        if (dev->rx_mark_update)
  43                flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
  44
  45        return flags;
  46}
  47
  48static uint16_t
  49nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
  50{
  51        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
  52        uint64_t conf = dev->tx_offloads;
  53        uint16_t flags = 0;
  54
  55        /* Fastpath is dependent on these enums */
  56        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
  57        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
  58        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
  59        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
  60        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
  61        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
  62        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
  63        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
  64        RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
  65        RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
  66        RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
  67        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
  68        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
  69        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
  70                         offsetof(struct rte_mbuf, buf_iova) + 8);
  71        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
  72                         offsetof(struct rte_mbuf, buf_iova) + 16);
  73        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
  74                         offsetof(struct rte_mbuf, ol_flags) + 12);
  75        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
  76                         offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
  77
  78        if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
  79            conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
  80                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
  81
  82        if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
  83            conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
  84                flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
  85
  86        if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
  87            conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
  88            conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
  89                flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
  90
  91        if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
  92                flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
  93
  94        if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
  95                flags |= NIX_TX_MULTI_SEG_F;
  96
  97        /* Enable Inner checksum for TSO */
  98        if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
  99                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 100
 101        /* Enable Inner and Outer checksum for Tunnel TSO */
 102        if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 103                    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
 104                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
 105                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 106
 107        if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
 108                flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 109
 110        if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 111                flags |= NIX_TX_OFFLOAD_SECURITY_F;
 112
 113        if (dev->tx_mark)
 114                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 115
 116        return flags;
 117}
 118
 119static int
 120cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
 121{
 122        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 123
 124        if (ptype_mask) {
 125                dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
 126                dev->ptype_disable = 0;
 127        } else {
 128                dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
 129                dev->ptype_disable = 1;
 130        }
 131
 132        cn9k_eth_set_rx_function(eth_dev);
 133        return 0;
 134}
 135
 136static void
 137nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
 138                      uint16_t qid)
 139{
 140        union nix_send_hdr_w0_u send_hdr_w0;
 141
 142        /* Initialize the fields based on basic single segment packet */
 143        send_hdr_w0.u = 0;
 144        if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
 145                /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
 146                send_hdr_w0.sizem1 = 2;
 147                if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
 148                        /* Default: one seg packet would have:
 149                         * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
 150                         * => 8/2 - 1 = 3
 151                         */
 152                        send_hdr_w0.sizem1 = 3;
 153
 154                        /* To calculate the offset for send_mem,
 155                         * send_hdr->w0.sizem1 * 2
 156                         */
 157                        txq->ts_mem = dev->tstamp.tx_tstamp_iova;
 158                }
 159        } else {
 160                /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
 161                send_hdr_w0.sizem1 = 1;
 162        }
 163        send_hdr_w0.sq = qid;
 164        txq->send_hdr_w0 = send_hdr_w0.u;
 165        rte_wmb();
 166}
 167
 168static int
 169cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 170                        uint16_t nb_desc, unsigned int socket,
 171                        const struct rte_eth_txconf *tx_conf)
 172{
 173        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 174        uint64_t mark_fmt, mark_flag;
 175        struct roc_cpt_lf *inl_lf;
 176        struct cn9k_eth_txq *txq;
 177        struct roc_nix_sq *sq;
 178        uint16_t crypto_qid;
 179        int rc;
 180
 181        RTE_SET_USED(socket);
 182
 183        /* Common Tx queue setup */
 184        rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
 185                                     sizeof(struct cn9k_eth_txq), tx_conf);
 186        if (rc)
 187                return rc;
 188
 189        sq = &dev->sqs[qid];
 190        /* Update fast path queue */
 191        txq = eth_dev->data->tx_queues[qid];
 192        txq->fc_mem = sq->fc;
 193        txq->lmt_addr = sq->lmt_addr;
 194        txq->io_addr = sq->io_addr;
 195        txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 196        txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
 197
 198        /* Fetch CPT LF info for outbound if present */
 199        if (dev->outb.lf_base) {
 200                crypto_qid = qid % dev->outb.nb_crypto_qs;
 201                inl_lf = dev->outb.lf_base + crypto_qid;
 202
 203                txq->cpt_io_addr = inl_lf->io_addr;
 204                txq->cpt_fc = inl_lf->fc_addr;
 205                txq->cpt_desc = inl_lf->nb_desc * 0.7;
 206                txq->sa_base = (uint64_t)dev->outb.sa_base;
 207                txq->sa_base |= eth_dev->data->port_id;
 208                PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
 209        }
 210
 211        mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
 212        txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
 213        txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
 214
 215        nix_form_default_desc(dev, txq, qid);
 216        txq->lso_tun_fmt = dev->lso_tun_fmt;
 217        return 0;
 218}
 219
 220static int
 221cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 222                        uint16_t nb_desc, unsigned int socket,
 223                        const struct rte_eth_rxconf *rx_conf,
 224                        struct rte_mempool *mp)
 225{
 226        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 227        struct cn9k_eth_rxq *rxq;
 228        struct roc_nix_rq *rq;
 229        struct roc_nix_cq *cq;
 230        int rc;
 231
 232        RTE_SET_USED(socket);
 233
 234        /* CQ Errata needs min 4K ring */
 235        if (dev->cq_min_4k && nb_desc < 4096)
 236                nb_desc = 4096;
 237
 238        /* Common Rx queue setup */
 239        rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
 240                                     sizeof(struct cn9k_eth_rxq), rx_conf, mp);
 241        if (rc)
 242                return rc;
 243
 244        /* Do initial mtu setup for RQ0 before device start */
 245        if (!qid) {
 246                rc = nix_recalc_mtu(eth_dev);
 247                if (rc)
 248                        return rc;
 249
 250                /* Update offload flags */
 251                dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
 252                dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
 253        }
 254
 255        rq = &dev->rqs[qid];
 256        cq = &dev->cqs[qid];
 257
 258        /* Update fast path queue */
 259        rxq = eth_dev->data->rx_queues[qid];
 260        rxq->rq = qid;
 261        rxq->desc = (uintptr_t)cq->desc_base;
 262        rxq->cq_door = cq->door;
 263        rxq->cq_status = cq->status;
 264        rxq->wdata = cq->wdata;
 265        rxq->head = cq->head;
 266        rxq->qmask = cq->qmask;
 267        rxq->tstamp = &dev->tstamp;
 268
 269        /* Data offset from data to start of mbuf is first_skip */
 270        rxq->data_off = rq->first_skip;
 271        rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
 272
 273        /* Lookup mem */
 274        rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
 275        return 0;
 276}
 277
 278static int
 279cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 280{
 281        struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
 282        int rc;
 283
 284        rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
 285        if (rc)
 286                return rc;
 287
 288        /* Clear fc cache pkts to trigger worker stop */
 289        txq->fc_cache_pkts = 0;
 290        return 0;
 291}
 292
 293static int
 294cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 295{
 296        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 297        struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
 298        struct rte_eth_txmode *txmode = &conf->txmode;
 299        int rc;
 300
 301        /* Platform specific checks */
 302        if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
 303            (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
 304            ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
 305             (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
 306                plt_err("Outer IP and SCTP checksum unsupported");
 307                return -EINVAL;
 308        }
 309
 310        /* Common nix configure */
 311        rc = cnxk_nix_configure(eth_dev);
 312        if (rc)
 313                return rc;
 314
 315        /* Update offload flags */
 316        dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
 317        dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
 318
 319        plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
 320                    " tx_offload_flags=0x%x",
 321                    eth_dev->data->port_id, dev->rx_offload_flags,
 322                    dev->tx_offload_flags);
 323        return 0;
 324}
 325
 326/* Function to enable ptp config for VFs */
 327static void
 328nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
 329{
 330        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 331
 332        if (nix_recalc_mtu(eth_dev))
 333                plt_err("Failed to set MTU size for ptp");
 334
 335        dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 336
 337        /* Setting up the function pointers as per new offload flags */
 338        cn9k_eth_set_rx_function(eth_dev);
 339        cn9k_eth_set_tx_function(eth_dev);
 340}
 341
 342static uint16_t
 343nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
 344{
 345        struct cn9k_eth_rxq *rxq = queue;
 346        struct cnxk_eth_rxq_sp *rxq_sp;
 347        struct rte_eth_dev *eth_dev;
 348
 349        RTE_SET_USED(mbufs);
 350        RTE_SET_USED(pkts);
 351
 352        rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 353        eth_dev = rxq_sp->dev->eth_dev;
 354        nix_ptp_enable_vf(eth_dev);
 355
 356        return 0;
 357}
 358
 359static int
 360cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
 361{
 362        struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
 363        struct rte_eth_dev *eth_dev;
 364        struct cn9k_eth_rxq *rxq;
 365        int i;
 366
 367        if (!dev)
 368                return -EINVAL;
 369
 370        eth_dev = dev->eth_dev;
 371        if (!eth_dev)
 372                return -EINVAL;
 373
 374        dev->ptp_en = ptp_en;
 375
 376        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 377                rxq = eth_dev->data->rx_queues[i];
 378                rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
 379        }
 380
 381        if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
 382            !(roc_nix_is_lbk(nix))) {
 383                /* In case of VF, setting of MTU cannot be done directly in this
 384                 * function as this is running as part of MBOX request(PF->VF)
 385                 * and MTU setting also requires MBOX message to be
 386                 * sent(VF->PF)
 387                 */
 388                eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
 389                rte_mb();
 390        }
 391
 392        return 0;
 393}
 394
 395static int
 396cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
 397{
 398        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 399        int i, rc;
 400
 401        rc = cnxk_nix_timesync_enable(eth_dev);
 402        if (rc)
 403                return rc;
 404
 405        dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 406        dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 407
 408        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
 409                nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
 410
 411        /* Setting up the rx[tx]_offload_flags due to change
 412         * in rx[tx]_offloads.
 413         */
 414        cn9k_eth_set_rx_function(eth_dev);
 415        cn9k_eth_set_tx_function(eth_dev);
 416        return 0;
 417}
 418
 419static int
 420cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
 421{
 422        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 423        int i, rc;
 424
 425        rc = cnxk_nix_timesync_disable(eth_dev);
 426        if (rc)
 427                return rc;
 428
 429        dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
 430        dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
 431
 432        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
 433                nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
 434
 435        /* Setting up the rx[tx]_offload_flags due to change
 436         * in rx[tx]_offloads.
 437         */
 438        cn9k_eth_set_rx_function(eth_dev);
 439        cn9k_eth_set_tx_function(eth_dev);
 440        return 0;
 441}
 442
 443static int
 444cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
 445{
 446        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 447        struct roc_nix *nix = &dev->nix;
 448        int rc;
 449
 450        /* Common eth dev start */
 451        rc = cnxk_nix_dev_start(eth_dev);
 452        if (rc)
 453                return rc;
 454
 455        /* Update VF about data off shifted by 8 bytes if PTP already
 456         * enabled in PF owning this VF
 457         */
 458        if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
 459                nix_ptp_enable_vf(eth_dev);
 460
 461        /* Setting up the rx[tx]_offload_flags due to change
 462         * in rx[tx]_offloads.
 463         */
 464        dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
 465        dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
 466
 467        cn9k_eth_set_tx_function(eth_dev);
 468        cn9k_eth_set_rx_function(eth_dev);
 469        return 0;
 470}
 471
 472static int
 473cn9k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
 474                                    struct timespec *timestamp)
 475{
 476        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 477        struct cnxk_timesync_info *tstamp = &dev->tstamp;
 478        uint64_t ns;
 479
 480        if (*tstamp->tx_tstamp == 0)
 481                return -EINVAL;
 482
 483        ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
 484        *timestamp = rte_ns_to_timespec(ns);
 485        *tstamp->tx_tstamp = 0;
 486        rte_wmb();
 487
 488        return 0;
 489}
 490
 491static int
 492cn9k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
 493{
 494        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 495
 496        *features &=
 497                (RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
 498
 499        if (*features) {
 500                dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
 501                dev->rx_mark_update = true;
 502        } else {
 503                dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
 504                dev->rx_mark_update = false;
 505        }
 506
 507        cn9k_eth_set_rx_function(eth_dev);
 508
 509        return 0;
 510}
 511
 512static int
 513cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
 514                          int mark_yellow, int mark_red,
 515                          struct rte_tm_error *error)
 516{
 517        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 518        struct roc_nix *roc_nix = &dev->nix;
 519        uint64_t mark_fmt, mark_flag;
 520        int rc, i;
 521
 522        rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
 523                                       mark_red, error);
 524
 525        if (rc)
 526                goto exit;
 527
 528        mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
 529        if (mark_flag) {
 530                dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 531                dev->tx_mark = true;
 532        } else {
 533                dev->tx_mark = false;
 534                if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
 535                      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
 536                        dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
 537        }
 538
 539        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 540                struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
 541
 542                txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
 543                txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
 544        }
 545        cn9k_eth_set_tx_function(eth_dev);
 546exit:
 547        return rc;
 548}
 549
 550static int
 551cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 552                        int mark_yellow, int mark_red,
 553                        struct rte_tm_error *error)
 554{
 555        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 556        struct roc_nix *roc_nix = &dev->nix;
 557        uint64_t mark_fmt, mark_flag;
 558        int rc, i;
 559
 560        rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
 561                                     error);
 562        if (rc)
 563                goto exit;
 564
 565        mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
 566        if (mark_flag) {
 567                dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 568                dev->tx_mark = true;
 569        } else {
 570                dev->tx_mark = false;
 571                if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
 572                      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
 573                        dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
 574        }
 575
 576        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 577                struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
 578
 579                txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
 580                txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
 581        }
 582        cn9k_eth_set_tx_function(eth_dev);
 583exit:
 584        return rc;
 585}
 586
 587static int
 588cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
 589                         int mark_yellow, int mark_red,
 590                         struct rte_tm_error *error)
 591{
 592        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 593        struct roc_nix *roc_nix = &dev->nix;
 594        uint64_t mark_fmt, mark_flag;
 595        int rc, i;
 596
 597        rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
 598                                      mark_red, error);
 599        if (rc)
 600                goto exit;
 601
 602        mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
 603        if (mark_flag) {
 604                dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 605                dev->tx_mark = true;
 606        } else {
 607                dev->tx_mark = false;
 608                if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
 609                      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
 610                        dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
 611        }
 612
 613        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 614                struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
 615
 616                txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
 617                txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
 618        }
 619        cn9k_eth_set_tx_function(eth_dev);
 620exit:
 621        return rc;
 622}
 623
 624/* Update platform specific eth dev ops */
 625static void
 626nix_eth_dev_ops_override(void)
 627{
 628        static int init_once;
 629
 630        if (init_once)
 631                return;
 632        init_once = 1;
 633
 634        /* Update platform specific ops */
 635        cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
 636        cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
 637        cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
 638        cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
 639        cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
 640        cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
 641        cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable;
 642        cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable;
 643        cnxk_eth_dev_ops.mtr_ops_get = NULL;
 644        cnxk_eth_dev_ops.rx_metadata_negotiate = cn9k_nix_rx_metadata_negotiate;
 645        cnxk_eth_dev_ops.timesync_read_tx_timestamp =
 646                cn9k_nix_timesync_read_tx_timestamp;
 647}
 648
 649/* Update platform specific eth dev ops */
 650static void
 651nix_tm_ops_override(void)
 652{
 653        static int init_once;
 654
 655        if (init_once)
 656                return;
 657        init_once = 1;
 658
 659        /* Update platform specific ops */
 660        cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
 661        cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
 662        cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
 663}
 664
 665static void
 666npc_flow_ops_override(void)
 667{
 668        static int init_once;
 669
 670        if (init_once)
 671                return;
 672        init_once = 1;
 673
 674        /* Update platform specific ops */
 675        cnxk_flow_ops.create = cn9k_flow_create;
 676        cnxk_flow_ops.destroy = cn9k_flow_destroy;
 677}
 678
 679static int
 680cn9k_nix_remove(struct rte_pci_device *pci_dev)
 681{
 682        return cnxk_nix_remove(pci_dev);
 683}
 684
 685static int
 686cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 687{
 688        struct rte_eth_dev *eth_dev;
 689        struct cnxk_eth_dev *dev;
 690        int rc;
 691
 692        rc = roc_plt_init();
 693        if (rc) {
 694                plt_err("Failed to initialize platform model, rc=%d", rc);
 695                return rc;
 696        }
 697
 698        nix_eth_dev_ops_override();
 699        nix_tm_ops_override();
 700        npc_flow_ops_override();
 701
 702        cn9k_eth_sec_ops_override();
 703
 704        /* Common probe */
 705        rc = cnxk_nix_probe(pci_drv, pci_dev);
 706        if (rc)
 707                return rc;
 708
 709        /* Find eth dev allocated */
 710        eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
 711        if (!eth_dev) {
 712                /* Ignore if ethdev is in mid of detach state in secondary */
 713                if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 714                        return 0;
 715                return -ENOENT;
 716        }
 717
 718        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 719                /* Setup callbacks for secondary process */
 720                cn9k_eth_set_tx_function(eth_dev);
 721                cn9k_eth_set_rx_function(eth_dev);
 722                return 0;
 723        }
 724
 725        dev = cnxk_eth_pmd_priv(eth_dev);
 726        /* Update capabilities already set for TSO.
 727         * TSO not supported for earlier chip revisions
 728         */
 729        if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
 730                dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
 731                                          RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 732                                          RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
 733                                          RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
 734
 735        /* 50G and 100G to be supported for board version C0
 736         * and above of CN9K.
 737         */
 738        if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
 739                dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
 740                dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
 741        }
 742
 743        dev->hwcap = 0;
 744        dev->inb.no_inl_dev = 1;
 745
 746        /* Register up msg callbacks for PTP information */
 747        roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
 748
 749        /* Update HW erratas */
 750        if (roc_errata_nix_has_cq_min_size_4k())
 751                dev->cq_min_4k = 1;
 752
 753        if (dev->nix.custom_sa_action) {
 754                dev->nix.custom_sa_action = 0;
 755                plt_info("WARNING: Custom SA action is enabled. It's not supported"
 756                         " on cn9k device. Disabling it");
 757        }
 758        return 0;
 759}
 760
 761static const struct rte_pci_id cn9k_pci_nix_map[] = {
 762        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_PF),
 763        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_PF),
 764        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_PF),
 765        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_PF),
 766        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_PF),
 767        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_PF),
 768        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_VF),
 769        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_VF),
 770        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_VF),
 771        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_VF),
 772        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_VF),
 773        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_VF),
 774        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_AF_VF),
 775        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_AF_VF),
 776        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_AF_VF),
 777        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_AF_VF),
 778        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_AF_VF),
 779        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_AF_VF),
 780        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
 781        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SDP_VF),
 782        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SDP_VF),
 783        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SDP_VF),
 784        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SDP_VF),
 785        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF9KA, PCI_DEVID_CNXK_RVU_SDP_VF),
 786        {
 787                .vendor_id = 0,
 788        },
 789};
 790
 791static struct rte_pci_driver cn9k_pci_nix = {
 792        .id_table = cn9k_pci_nix_map,
 793        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
 794                     RTE_PCI_DRV_INTR_LSC,
 795        .probe = cn9k_nix_probe,
 796        .remove = cn9k_nix_remove,
 797};
 798
 799RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
 800RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
 801RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");
 802