dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2014-2018 Chelsio Communications.
   3 * All rights reserved.
   4 */
   5
   6#include <sys/queue.h>
   7#include <stdio.h>
   8#include <errno.h>
   9#include <stdint.h>
  10#include <string.h>
  11#include <unistd.h>
  12#include <stdarg.h>
  13#include <inttypes.h>
  14#include <netinet/in.h>
  15
  16#include <rte_byteorder.h>
  17#include <rte_common.h>
  18#include <rte_cycles.h>
  19#include <rte_interrupts.h>
  20#include <rte_log.h>
  21#include <rte_debug.h>
  22#include <rte_pci.h>
  23#include <rte_bus_pci.h>
  24#include <rte_atomic.h>
  25#include <rte_branch_prediction.h>
  26#include <rte_memory.h>
  27#include <rte_tailq.h>
  28#include <rte_eal.h>
  29#include <rte_alarm.h>
  30#include <rte_ether.h>
  31#include <rte_ethdev_driver.h>
  32#include <rte_ethdev_pci.h>
  33#include <rte_malloc.h>
  34#include <rte_random.h>
  35#include <rte_dev.h>
  36
  37#include "cxgbe.h"
  38#include "cxgbe_pfvf.h"
  39#include "cxgbe_flow.h"
  40
  41/*
  42 * Macros needed to support the PCI Device ID Table ...
  43 */
  44#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
  45        static const struct rte_pci_id cxgb4_pci_tbl[] = {
  46#define CH_PCI_DEVICE_ID_FUNCTION 0x4
  47
  48#define PCI_VENDOR_ID_CHELSIO 0x1425
  49
  50#define CH_PCI_ID_TABLE_ENTRY(devid) \
  51                { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
  52
  53#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
  54                { .vendor_id = 0, } \
  55        }
  56
  57/*
  58 *... and the PCI ID Table itself ...
  59 */
  60#include "base/t4_pci_id_tbl.h"
  61
  62uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  63                         uint16_t nb_pkts)
  64{
  65        struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
  66        uint16_t pkts_sent, pkts_remain;
  67        uint16_t total_sent = 0;
  68        uint16_t idx = 0;
  69        int ret = 0;
  70
  71        t4_os_lock(&txq->txq_lock);
  72        /* free up desc from already completed tx */
  73        reclaim_completed_tx(&txq->q);
  74        rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
  75        while (total_sent < nb_pkts) {
  76                pkts_remain = nb_pkts - total_sent;
  77
  78                for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
  79                        idx = total_sent + pkts_sent;
  80                        if ((idx + 1) < nb_pkts)
  81                                rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
  82                                                        volatile void *));
  83                        ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
  84                        if (ret < 0)
  85                                break;
  86                }
  87                if (!pkts_sent)
  88                        break;
  89                total_sent += pkts_sent;
  90                /* reclaim as much as possible */
  91                reclaim_completed_tx(&txq->q);
  92        }
  93
  94        t4_os_unlock(&txq->txq_lock);
  95        return total_sent;
  96}
  97
  98uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
  99                         uint16_t nb_pkts)
 100{
 101        struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
 102        unsigned int work_done;
 103
 104        if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
 105                dev_err(adapter, "error in cxgbe poll\n");
 106
 107        return work_done;
 108}
 109
 110int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
 111                        struct rte_eth_dev_info *device_info)
 112{
 113        struct port_info *pi = eth_dev->data->dev_private;
 114        struct adapter *adapter = pi->adapter;
 115        int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
 116
 117        static const struct rte_eth_desc_lim cxgbe_desc_lim = {
 118                .nb_max = CXGBE_MAX_RING_DESC_SIZE,
 119                .nb_min = CXGBE_MIN_RING_DESC_SIZE,
 120                .nb_align = 1,
 121        };
 122
 123        device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
 124        device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
 125        device_info->max_rx_queues = max_queues;
 126        device_info->max_tx_queues = max_queues;
 127        device_info->max_mac_addrs = 1;
 128        /* XXX: For now we support one MAC/port */
 129        device_info->max_vfs = adapter->params.arch.vfcount;
 130        device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
 131
 132        device_info->rx_queue_offload_capa = 0UL;
 133        device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
 134
 135        device_info->tx_queue_offload_capa = 0UL;
 136        device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
 137
 138        device_info->reta_size = pi->rss_size;
 139        device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
 140        device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
 141
 142        device_info->rx_desc_lim = cxgbe_desc_lim;
 143        device_info->tx_desc_lim = cxgbe_desc_lim;
 144        cxgbe_get_speed_caps(pi, &device_info->speed_capa);
 145
 146        return 0;
 147}
 148
 149int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
 150{
 151        struct port_info *pi = eth_dev->data->dev_private;
 152        struct adapter *adapter = pi->adapter;
 153
 154        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
 155                             1, -1, 1, -1, false);
 156}
 157
 158int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
 159{
 160        struct port_info *pi = eth_dev->data->dev_private;
 161        struct adapter *adapter = pi->adapter;
 162
 163        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
 164                             0, -1, 1, -1, false);
 165}
 166
 167int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
 168{
 169        struct port_info *pi = eth_dev->data->dev_private;
 170        struct adapter *adapter = pi->adapter;
 171
 172        /* TODO: address filters ?? */
 173
 174        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
 175                             -1, 1, 1, -1, false);
 176}
 177
 178int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
 179{
 180        struct port_info *pi = eth_dev->data->dev_private;
 181        struct adapter *adapter = pi->adapter;
 182
 183        /* TODO: address filters ?? */
 184
 185        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
 186                             -1, 0, 1, -1, false);
 187}
 188
 189int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 190                          int wait_to_complete)
 191{
 192        struct port_info *pi = eth_dev->data->dev_private;
 193        struct adapter *adapter = pi->adapter;
 194        struct sge *s = &adapter->sge;
 195        struct rte_eth_link new_link = { 0 };
 196        unsigned int i, work_done, budget = 32;
 197        u8 old_link = pi->link_cfg.link_ok;
 198
 199        for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
 200                if (!s->fw_evtq.desc)
 201                        break;
 202
 203                cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
 204
 205                /* Exit if link status changed or always forced up */
 206                if (pi->link_cfg.link_ok != old_link ||
 207                    cxgbe_force_linkup(adapter))
 208                        break;
 209
 210                if (!wait_to_complete)
 211                        break;
 212
 213                rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
 214        }
 215
 216        new_link.link_status = cxgbe_force_linkup(adapter) ?
 217                               ETH_LINK_UP : pi->link_cfg.link_ok;
 218        new_link.link_autoneg = pi->link_cfg.autoneg;
 219        new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
 220        new_link.link_speed = pi->link_cfg.speed;
 221
 222        return rte_eth_linkstatus_set(eth_dev, &new_link);
 223}
 224
 225/**
 226 * Set device link up.
 227 */
 228int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
 229{
 230        struct port_info *pi = dev->data->dev_private;
 231        struct adapter *adapter = pi->adapter;
 232        unsigned int work_done, budget = 32;
 233        struct sge *s = &adapter->sge;
 234        int ret;
 235
 236        if (!s->fw_evtq.desc)
 237                return -ENOMEM;
 238
 239        /* Flush all link events */
 240        cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
 241
 242        /* If link already up, nothing to do */
 243        if (pi->link_cfg.link_ok)
 244                return 0;
 245
 246        ret = cxgbe_set_link_status(pi, true);
 247        if (ret)
 248                return ret;
 249
 250        cxgbe_dev_link_update(dev, 1);
 251        return 0;
 252}
 253
 254/**
 255 * Set device link down.
 256 */
 257int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
 258{
 259        struct port_info *pi = dev->data->dev_private;
 260        struct adapter *adapter = pi->adapter;
 261        unsigned int work_done, budget = 32;
 262        struct sge *s = &adapter->sge;
 263        int ret;
 264
 265        if (!s->fw_evtq.desc)
 266                return -ENOMEM;
 267
 268        /* Flush all link events */
 269        cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
 270
 271        /* If link already down, nothing to do */
 272        if (!pi->link_cfg.link_ok)
 273                return 0;
 274
 275        ret = cxgbe_set_link_status(pi, false);
 276        if (ret)
 277                return ret;
 278
 279        cxgbe_dev_link_update(dev, 0);
 280        return 0;
 281}
 282
 283int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 284{
 285        struct port_info *pi = eth_dev->data->dev_private;
 286        struct adapter *adapter = pi->adapter;
 287        struct rte_eth_dev_info dev_info;
 288        int err;
 289        uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 290
 291        err = cxgbe_dev_info_get(eth_dev, &dev_info);
 292        if (err != 0)
 293                return err;
 294
 295        /* Must accommodate at least RTE_ETHER_MIN_MTU */
 296        if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
 297                return -EINVAL;
 298
 299        /* set to jumbo mode if needed */
 300        if (new_mtu > RTE_ETHER_MAX_LEN)
 301                eth_dev->data->dev_conf.rxmode.offloads |=
 302                        DEV_RX_OFFLOAD_JUMBO_FRAME;
 303        else
 304                eth_dev->data->dev_conf.rxmode.offloads &=
 305                        ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 306
 307        err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
 308                            -1, -1, true);
 309        if (!err)
 310                eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
 311
 312        return err;
 313}
 314
 315/*
 316 * Stop device.
 317 */
 318void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
 319{
 320        struct port_info *pi = eth_dev->data->dev_private;
 321        struct adapter *adapter = pi->adapter;
 322
 323        CXGBE_FUNC_TRACE();
 324
 325        if (!(adapter->flags & FULL_INIT_DONE))
 326                return;
 327
 328        cxgbe_down(pi);
 329
 330        /*
 331         *  We clear queues only if both tx and rx path of the port
 332         *  have been disabled
 333         */
 334        t4_sge_eth_clear_queues(pi);
 335}
 336
 337/* Start the device.
 338 * It returns 0 on success.
 339 */
 340int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
 341{
 342        struct port_info *pi = eth_dev->data->dev_private;
 343        struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
 344        struct adapter *adapter = pi->adapter;
 345        int err = 0, i;
 346
 347        CXGBE_FUNC_TRACE();
 348
 349        /*
 350         * If we don't have a connection to the firmware there's nothing we
 351         * can do.
 352         */
 353        if (!(adapter->flags & FW_OK)) {
 354                err = -ENXIO;
 355                goto out;
 356        }
 357
 358        if (!(adapter->flags & FULL_INIT_DONE)) {
 359                err = cxgbe_up(adapter);
 360                if (err < 0)
 361                        goto out;
 362        }
 363
 364        if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 365                eth_dev->data->scattered_rx = 1;
 366        else
 367                eth_dev->data->scattered_rx = 0;
 368
 369        cxgbe_enable_rx_queues(pi);
 370
 371        err = cxgbe_setup_rss(pi);
 372        if (err)
 373                goto out;
 374
 375        for (i = 0; i < pi->n_tx_qsets; i++) {
 376                err = cxgbe_dev_tx_queue_start(eth_dev, i);
 377                if (err)
 378                        goto out;
 379        }
 380
 381        for (i = 0; i < pi->n_rx_qsets; i++) {
 382                err = cxgbe_dev_rx_queue_start(eth_dev, i);
 383                if (err)
 384                        goto out;
 385        }
 386
 387        err = cxgbe_link_start(pi);
 388        if (err)
 389                goto out;
 390
 391out:
 392        return err;
 393}
 394
 395/*
 396 * Stop device: disable rx and tx functions to allow for reconfiguring.
 397 */
 398void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
 399{
 400        struct port_info *pi = eth_dev->data->dev_private;
 401        struct adapter *adapter = pi->adapter;
 402
 403        CXGBE_FUNC_TRACE();
 404
 405        if (!(adapter->flags & FULL_INIT_DONE))
 406                return;
 407
 408        cxgbe_down(pi);
 409
 410        /*
 411         *  We clear queues only if both tx and rx path of the port
 412         *  have been disabled
 413         */
 414        t4_sge_eth_clear_queues(pi);
 415        eth_dev->data->scattered_rx = 0;
 416}
 417
 418int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 419{
 420        struct port_info *pi = eth_dev->data->dev_private;
 421        struct adapter *adapter = pi->adapter;
 422        int err;
 423
 424        CXGBE_FUNC_TRACE();
 425
 426        if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
 427                eth_dev->data->dev_conf.rxmode.offloads |=
 428                        DEV_RX_OFFLOAD_RSS_HASH;
 429
 430        if (!(adapter->flags & FW_QUEUE_BOUND)) {
 431                err = cxgbe_setup_sge_fwevtq(adapter);
 432                if (err)
 433                        return err;
 434                adapter->flags |= FW_QUEUE_BOUND;
 435                if (is_pf4(adapter)) {
 436                        err = cxgbe_setup_sge_ctrl_txq(adapter);
 437                        if (err)
 438                                return err;
 439                }
 440        }
 441
 442        err = cxgbe_cfg_queue_count(eth_dev);
 443        if (err)
 444                return err;
 445
 446        return 0;
 447}
 448
 449int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 450{
 451        int ret;
 452        struct sge_eth_txq *txq = (struct sge_eth_txq *)
 453                                  (eth_dev->data->tx_queues[tx_queue_id]);
 454
 455        dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
 456
 457        ret = t4_sge_eth_txq_start(txq);
 458        if (ret == 0)
 459                eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 460
 461        return ret;
 462}
 463
 464int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 465{
 466        int ret;
 467        struct sge_eth_txq *txq = (struct sge_eth_txq *)
 468                                  (eth_dev->data->tx_queues[tx_queue_id]);
 469
 470        dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
 471
 472        ret = t4_sge_eth_txq_stop(txq);
 473        if (ret == 0)
 474                eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 475
 476        return ret;
 477}
 478
 479int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 480                             uint16_t queue_idx, uint16_t nb_desc,
 481                             unsigned int socket_id,
 482                             const struct rte_eth_txconf *tx_conf __rte_unused)
 483{
 484        struct port_info *pi = eth_dev->data->dev_private;
 485        struct adapter *adapter = pi->adapter;
 486        struct sge *s = &adapter->sge;
 487        struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 488        int err = 0;
 489        unsigned int temp_nb_desc;
 490
 491        dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 492                  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
 493                  socket_id, pi->first_qset);
 494
 495        /*  Free up the existing queue  */
 496        if (eth_dev->data->tx_queues[queue_idx]) {
 497                cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
 498                eth_dev->data->tx_queues[queue_idx] = NULL;
 499        }
 500
 501        eth_dev->data->tx_queues[queue_idx] = (void *)txq;
 502
 503        /* Sanity Checking
 504         *
 505         * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
 506         */
 507        temp_nb_desc = nb_desc;
 508        if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
 509                dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
 510                         __func__, CXGBE_MIN_RING_DESC_SIZE,
 511                         CXGBE_DEFAULT_TX_DESC_SIZE);
 512                temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
 513        } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
 514                dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
 515                        __func__, CXGBE_MIN_RING_DESC_SIZE,
 516                        CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
 517                return -(EINVAL);
 518        }
 519
 520        txq->q.size = temp_nb_desc;
 521
 522        err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
 523                                   s->fw_evtq.cntxt_id, socket_id);
 524
 525        dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
 526                  __func__, txq->q.cntxt_id, txq->q.abs_id, err);
 527        return err;
 528}
 529
 530void cxgbe_dev_tx_queue_release(void *q)
 531{
 532        struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
 533
 534        if (txq) {
 535                struct port_info *pi = (struct port_info *)
 536                                       (txq->eth_dev->data->dev_private);
 537                struct adapter *adap = pi->adapter;
 538
 539                dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
 540                          __func__, pi->port_id, txq->q.cntxt_id);
 541
 542                t4_sge_eth_txq_release(adap, txq);
 543        }
 544}
 545
 546int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 547{
 548        int ret;
 549        struct port_info *pi = eth_dev->data->dev_private;
 550        struct adapter *adap = pi->adapter;
 551        struct sge_rspq *q;
 552
 553        dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
 554                  __func__, pi->port_id, rx_queue_id);
 555
 556        q = eth_dev->data->rx_queues[rx_queue_id];
 557
 558        ret = t4_sge_eth_rxq_start(adap, q);
 559        if (ret == 0)
 560                eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 561
 562        return ret;
 563}
 564
 565int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 566{
 567        int ret;
 568        struct port_info *pi = eth_dev->data->dev_private;
 569        struct adapter *adap = pi->adapter;
 570        struct sge_rspq *q;
 571
 572        dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
 573                  __func__, pi->port_id, rx_queue_id);
 574
 575        q = eth_dev->data->rx_queues[rx_queue_id];
 576        ret = t4_sge_eth_rxq_stop(adap, q);
 577        if (ret == 0)
 578                eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 579
 580        return ret;
 581}
 582
 583int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 584                             uint16_t queue_idx, uint16_t nb_desc,
 585                             unsigned int socket_id,
 586                             const struct rte_eth_rxconf *rx_conf __rte_unused,
 587                             struct rte_mempool *mp)
 588{
 589        struct port_info *pi = eth_dev->data->dev_private;
 590        struct adapter *adapter = pi->adapter;
 591        struct sge *s = &adapter->sge;
 592        struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
 593        int err = 0;
 594        int msi_idx = 0;
 595        unsigned int temp_nb_desc;
 596        struct rte_eth_dev_info dev_info;
 597        unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 598
 599        dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 600                  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
 601                  socket_id, mp);
 602
 603        err = cxgbe_dev_info_get(eth_dev, &dev_info);
 604        if (err != 0) {
 605                dev_err(adap, "%s: error during getting ethernet device info",
 606                        __func__);
 607                return err;
 608        }
 609
 610        /* Must accommodate at least RTE_ETHER_MIN_MTU */
 611        if ((pkt_len < dev_info.min_rx_bufsize) ||
 612            (pkt_len > dev_info.max_rx_pktlen)) {
 613                dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
 614                        __func__, dev_info.min_rx_bufsize,
 615                        dev_info.max_rx_pktlen);
 616                return -EINVAL;
 617        }
 618
 619        /*  Free up the existing queue  */
 620        if (eth_dev->data->rx_queues[queue_idx]) {
 621                cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
 622                eth_dev->data->rx_queues[queue_idx] = NULL;
 623        }
 624
 625        eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
 626
 627        /* Sanity Checking
 628         *
 629         * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
 630         */
 631        temp_nb_desc = nb_desc;
 632        if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
 633                dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
 634                         __func__, CXGBE_MIN_RING_DESC_SIZE,
 635                         CXGBE_DEFAULT_RX_DESC_SIZE);
 636                temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
 637        } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
 638                dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
 639                        __func__, CXGBE_MIN_RING_DESC_SIZE,
 640                        CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
 641                return -(EINVAL);
 642        }
 643
 644        rxq->rspq.size = temp_nb_desc;
 645        if ((&rxq->fl) != NULL)
 646                rxq->fl.size = temp_nb_desc;
 647
 648        /* Set to jumbo mode if necessary */
 649        if (pkt_len > RTE_ETHER_MAX_LEN)
 650                eth_dev->data->dev_conf.rxmode.offloads |=
 651                        DEV_RX_OFFLOAD_JUMBO_FRAME;
 652        else
 653                eth_dev->data->dev_conf.rxmode.offloads &=
 654                        ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 655
 656        err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
 657                               &rxq->fl, NULL,
 658                               is_pf4(adapter) ?
 659                               t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
 660                               queue_idx, socket_id);
 661
 662        dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
 663                  __func__, err, pi->port_id, rxq->rspq.cntxt_id,
 664                  rxq->rspq.abs_id);
 665        return err;
 666}
 667
 668void cxgbe_dev_rx_queue_release(void *q)
 669{
 670        struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
 671        struct sge_rspq *rq = &rxq->rspq;
 672
 673        if (rq) {
 674                struct port_info *pi = (struct port_info *)
 675                                       (rq->eth_dev->data->dev_private);
 676                struct adapter *adap = pi->adapter;
 677
 678                dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
 679                          __func__, pi->port_id, rxq->rspq.cntxt_id);
 680
 681                t4_sge_eth_rxq_release(adap, rxq);
 682        }
 683}
 684
 685/*
 686 * Get port statistics.
 687 */
 688static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
 689                                struct rte_eth_stats *eth_stats)
 690{
 691        struct port_info *pi = eth_dev->data->dev_private;
 692        struct adapter *adapter = pi->adapter;
 693        struct sge *s = &adapter->sge;
 694        struct port_stats ps;
 695        unsigned int i;
 696
 697        cxgbe_stats_get(pi, &ps);
 698
 699        /* RX Stats */
 700        eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
 701                              ps.rx_ovflow2 + ps.rx_ovflow3 +
 702                              ps.rx_trunc0 + ps.rx_trunc1 +
 703                              ps.rx_trunc2 + ps.rx_trunc3;
 704        eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
 705                              ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
 706                              ps.rx_len_err;
 707
 708        /* TX Stats */
 709        eth_stats->opackets = ps.tx_frames;
 710        eth_stats->obytes   = ps.tx_octets;
 711        eth_stats->oerrors  = ps.tx_error_frames;
 712
 713        for (i = 0; i < pi->n_rx_qsets; i++) {
 714                struct sge_eth_rxq *rxq =
 715                        &s->ethrxq[pi->first_qset + i];
 716
 717                eth_stats->q_ipackets[i] = rxq->stats.pkts;
 718                eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
 719                eth_stats->ipackets += eth_stats->q_ipackets[i];
 720                eth_stats->ibytes += eth_stats->q_ibytes[i];
 721        }
 722
 723        for (i = 0; i < pi->n_tx_qsets; i++) {
 724                struct sge_eth_txq *txq =
 725                        &s->ethtxq[pi->first_qset + i];
 726
 727                eth_stats->q_opackets[i] = txq->stats.pkts;
 728                eth_stats->q_obytes[i] = txq->stats.tx_bytes;
 729        }
 730        return 0;
 731}
 732
 733/*
 734 * Reset port statistics.
 735 */
 736static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
 737{
 738        struct port_info *pi = eth_dev->data->dev_private;
 739        struct adapter *adapter = pi->adapter;
 740        struct sge *s = &adapter->sge;
 741        unsigned int i;
 742
 743        cxgbe_stats_reset(pi);
 744        for (i = 0; i < pi->n_rx_qsets; i++) {
 745                struct sge_eth_rxq *rxq =
 746                        &s->ethrxq[pi->first_qset + i];
 747
 748                rxq->stats.pkts = 0;
 749                rxq->stats.rx_bytes = 0;
 750        }
 751        for (i = 0; i < pi->n_tx_qsets; i++) {
 752                struct sge_eth_txq *txq =
 753                        &s->ethtxq[pi->first_qset + i];
 754
 755                txq->stats.pkts = 0;
 756                txq->stats.tx_bytes = 0;
 757                txq->stats.mapping_err = 0;
 758        }
 759
 760        return 0;
 761}
 762
 763static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 764                               struct rte_eth_fc_conf *fc_conf)
 765{
 766        struct port_info *pi = eth_dev->data->dev_private;
 767        struct link_config *lc = &pi->link_cfg;
 768        int rx_pause, tx_pause;
 769
 770        fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
 771        rx_pause = lc->fc & PAUSE_RX;
 772        tx_pause = lc->fc & PAUSE_TX;
 773
 774        if (rx_pause && tx_pause)
 775                fc_conf->mode = RTE_FC_FULL;
 776        else if (rx_pause)
 777                fc_conf->mode = RTE_FC_RX_PAUSE;
 778        else if (tx_pause)
 779                fc_conf->mode = RTE_FC_TX_PAUSE;
 780        else
 781                fc_conf->mode = RTE_FC_NONE;
 782        return 0;
 783}
 784
 785static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 786                               struct rte_eth_fc_conf *fc_conf)
 787{
 788        struct port_info *pi = eth_dev->data->dev_private;
 789        struct adapter *adapter = pi->adapter;
 790        struct link_config *lc = &pi->link_cfg;
 791
 792        if (lc->pcaps & FW_PORT_CAP32_ANEG) {
 793                if (fc_conf->autoneg)
 794                        lc->requested_fc |= PAUSE_AUTONEG;
 795                else
 796                        lc->requested_fc &= ~PAUSE_AUTONEG;
 797        }
 798
 799        if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
 800            (fc_conf->mode & RTE_FC_RX_PAUSE))
 801                lc->requested_fc |= PAUSE_RX;
 802        else
 803                lc->requested_fc &= ~PAUSE_RX;
 804
 805        if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
 806            (fc_conf->mode & RTE_FC_TX_PAUSE))
 807                lc->requested_fc |= PAUSE_TX;
 808        else
 809                lc->requested_fc &= ~PAUSE_TX;
 810
 811        return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
 812                             &pi->link_cfg);
 813}
 814
 815const uint32_t *
 816cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 817{
 818        static const uint32_t ptypes[] = {
 819                RTE_PTYPE_L3_IPV4,
 820                RTE_PTYPE_L3_IPV6,
 821                RTE_PTYPE_UNKNOWN
 822        };
 823
 824        if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
 825                return ptypes;
 826        return NULL;
 827}
 828
 829/* Update RSS hash configuration
 830 */
 831static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 832                                     struct rte_eth_rss_conf *rss_conf)
 833{
 834        struct port_info *pi = dev->data->dev_private;
 835        struct adapter *adapter = pi->adapter;
 836        int err;
 837
 838        err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
 839        if (err)
 840                return err;
 841
 842        pi->rss_hf = rss_conf->rss_hf;
 843
 844        if (rss_conf->rss_key) {
 845                u32 key[10], mod_key[10];
 846                int i, j;
 847
 848                memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
 849
 850                for (i = 9, j = 0; i >= 0; i--, j++)
 851                        mod_key[j] = cpu_to_be32(key[i]);
 852
 853                t4_write_rss_key(adapter, mod_key, -1);
 854        }
 855
 856        return 0;
 857}
 858
 859/* Get RSS hash configuration
 860 */
 861static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 862                                       struct rte_eth_rss_conf *rss_conf)
 863{
 864        struct port_info *pi = dev->data->dev_private;
 865        struct adapter *adapter = pi->adapter;
 866        u64 rss_hf = 0;
 867        u64 flags = 0;
 868        int err;
 869
 870        err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
 871                                    &flags, NULL);
 872
 873        if (err)
 874                return err;
 875
 876        if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
 877                rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
 878                if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
 879                        rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
 880        }
 881
 882        if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
 883                rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
 884
 885        if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
 886                rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
 887                if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
 888                        rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
 889        }
 890
 891        if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
 892                rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
 893
 894        rss_conf->rss_hf = rss_hf;
 895
 896        if (rss_conf->rss_key) {
 897                u32 key[10], mod_key[10];
 898                int i, j;
 899
 900                t4_read_rss_key(adapter, key);
 901
 902                for (i = 9, j = 0; i >= 0; i--, j++)
 903                        mod_key[j] = be32_to_cpu(key[i]);
 904
 905                memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
 906        }
 907
 908        return 0;
 909}
 910
 911static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
 912{
 913        RTE_SET_USED(dev);
 914        return EEPROMSIZE;
 915}
 916
 917/**
 918 * eeprom_ptov - translate a physical EEPROM address to virtual
 919 * @phys_addr: the physical EEPROM address
 920 * @fn: the PCI function number
 921 * @sz: size of function-specific area
 922 *
 923 * Translate a physical EEPROM address to virtual.  The first 1K is
 924 * accessed through virtual addresses starting at 31K, the rest is
 925 * accessed through virtual addresses starting at 0.
 926 *
 927 * The mapping is as follows:
 928 * [0..1K) -> [31K..32K)
 929 * [1K..1K+A) -> [31K-A..31K)
 930 * [1K+A..ES) -> [0..ES-A-1K)
 931 *
 932 * where A = @fn * @sz, and ES = EEPROM size.
 933 */
 934static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
 935{
 936        fn *= sz;
 937        if (phys_addr < 1024)
 938                return phys_addr + (31 << 10);
 939        if (phys_addr < 1024 + fn)
 940                return fn + phys_addr - 1024;
 941        if (phys_addr < EEPROMSIZE)
 942                return phys_addr - 1024 - fn;
 943        if (phys_addr < EEPROMVSIZE)
 944                return phys_addr - 1024;
 945        return -EINVAL;
 946}
 947
 948/* The next two routines implement eeprom read/write from physical addresses.
 949 */
 950static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
 951{
 952        int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 953
 954        if (vaddr >= 0)
 955                vaddr = t4_seeprom_read(adap, vaddr, v);
 956        return vaddr < 0 ? vaddr : 0;
 957}
 958
 959static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
 960{
 961        int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 962
 963        if (vaddr >= 0)
 964                vaddr = t4_seeprom_write(adap, vaddr, v);
 965        return vaddr < 0 ? vaddr : 0;
 966}
 967
 968#define EEPROM_MAGIC 0x38E2F10C
 969
 970static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
 971                            struct rte_dev_eeprom_info *e)
 972{
 973        struct port_info *pi = dev->data->dev_private;
 974        struct adapter *adapter = pi->adapter;
 975        u32 i, err = 0;
 976        u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
 977
 978        if (!buf)
 979                return -ENOMEM;
 980
 981        e->magic = EEPROM_MAGIC;
 982        for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
 983                err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
 984
 985        if (!err)
 986                rte_memcpy(e->data, buf + e->offset, e->length);
 987        rte_free(buf);
 988        return err;
 989}
 990
 991static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
 992                            struct rte_dev_eeprom_info *eeprom)
 993{
 994        struct port_info *pi = dev->data->dev_private;
 995        struct adapter *adapter = pi->adapter;
 996        u8 *buf;
 997        int err = 0;
 998        u32 aligned_offset, aligned_len, *p;
 999
1000        if (eeprom->magic != EEPROM_MAGIC)
1001                return -EINVAL;
1002
1003        aligned_offset = eeprom->offset & ~3;
1004        aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1005
1006        if (adapter->pf > 0) {
1007                u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1008
1009                if (aligned_offset < start ||
1010                    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1011                        return -EPERM;
1012        }
1013
1014        if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1015                /* RMW possibly needed for first or last words.
1016                 */
1017                buf = rte_zmalloc(NULL, aligned_len, 0);
1018                if (!buf)
1019                        return -ENOMEM;
1020                err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1021                if (!err && aligned_len > 4)
1022                        err = eeprom_rd_phys(adapter,
1023                                             aligned_offset + aligned_len - 4,
1024                                             (u32 *)&buf[aligned_len - 4]);
1025                if (err)
1026                        goto out;
1027                rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1028                           eeprom->length);
1029        } else {
1030                buf = eeprom->data;
1031        }
1032
1033        err = t4_seeprom_wp(adapter, false);
1034        if (err)
1035                goto out;
1036
1037        for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1038                err = eeprom_wr_phys(adapter, aligned_offset, *p);
1039                aligned_offset += 4;
1040        }
1041
1042        if (!err)
1043                err = t4_seeprom_wp(adapter, true);
1044out:
1045        if (buf != eeprom->data)
1046                rte_free(buf);
1047        return err;
1048}
1049
1050static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1051{
1052        struct port_info *pi = eth_dev->data->dev_private;
1053        struct adapter *adapter = pi->adapter;
1054
1055        return t4_get_regs_len(adapter) / sizeof(uint32_t);
1056}
1057
1058static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1059                          struct rte_dev_reg_info *regs)
1060{
1061        struct port_info *pi = eth_dev->data->dev_private;
1062        struct adapter *adapter = pi->adapter;
1063
1064        regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1065                (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1066                (1 << 16);
1067
1068        if (regs->data == NULL) {
1069                regs->length = cxgbe_get_regs_len(eth_dev);
1070                regs->width = sizeof(uint32_t);
1071
1072                return 0;
1073        }
1074
1075        t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1076
1077        return 0;
1078}
1079
1080int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1081{
1082        struct port_info *pi = dev->data->dev_private;
1083        int ret;
1084
1085        ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1086        if (ret < 0) {
1087                dev_err(adapter, "failed to set mac addr; err = %d\n",
1088                        ret);
1089                return ret;
1090        }
1091        pi->xact_addr_filt = ret;
1092        return 0;
1093}
1094
1095static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1096        .dev_start              = cxgbe_dev_start,
1097        .dev_stop               = cxgbe_dev_stop,
1098        .dev_close              = cxgbe_dev_close,
1099        .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1100        .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1101        .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1102        .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1103        .dev_configure          = cxgbe_dev_configure,
1104        .dev_infos_get          = cxgbe_dev_info_get,
1105        .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1106        .link_update            = cxgbe_dev_link_update,
1107        .dev_set_link_up        = cxgbe_dev_set_link_up,
1108        .dev_set_link_down      = cxgbe_dev_set_link_down,
1109        .mtu_set                = cxgbe_dev_mtu_set,
1110        .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1111        .tx_queue_start         = cxgbe_dev_tx_queue_start,
1112        .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1113        .tx_queue_release       = cxgbe_dev_tx_queue_release,
1114        .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1115        .rx_queue_start         = cxgbe_dev_rx_queue_start,
1116        .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1117        .rx_queue_release       = cxgbe_dev_rx_queue_release,
1118        .filter_ctrl            = cxgbe_dev_filter_ctrl,
1119        .stats_get              = cxgbe_dev_stats_get,
1120        .stats_reset            = cxgbe_dev_stats_reset,
1121        .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1122        .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1123        .get_eeprom_length      = cxgbe_get_eeprom_length,
1124        .get_eeprom             = cxgbe_get_eeprom,
1125        .set_eeprom             = cxgbe_set_eeprom,
1126        .get_reg                = cxgbe_get_regs,
1127        .rss_hash_update        = cxgbe_dev_rss_hash_update,
1128        .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1129        .mac_addr_set           = cxgbe_mac_addr_set,
1130};
1131
1132/*
1133 * Initialize driver
1134 * It returns 0 on success.
1135 */
1136static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1137{
1138        struct rte_pci_device *pci_dev;
1139        struct port_info *pi = eth_dev->data->dev_private;
1140        struct adapter *adapter = NULL;
1141        char name[RTE_ETH_NAME_MAX_LEN];
1142        int err = 0;
1143
1144        CXGBE_FUNC_TRACE();
1145
1146        eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1147        eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1148        eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1149        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1150
1151        /* for secondary processes, we attach to ethdevs allocated by primary
1152         * and do minimal initialization.
1153         */
1154        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1155                int i;
1156
1157                for (i = 1; i < MAX_NPORTS; i++) {
1158                        struct rte_eth_dev *rest_eth_dev;
1159                        char namei[RTE_ETH_NAME_MAX_LEN];
1160
1161                        snprintf(namei, sizeof(namei), "%s_%d",
1162                                 pci_dev->device.name, i);
1163                        rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1164                        if (rest_eth_dev) {
1165                                rest_eth_dev->device = &pci_dev->device;
1166                                rest_eth_dev->dev_ops =
1167                                        eth_dev->dev_ops;
1168                                rest_eth_dev->rx_pkt_burst =
1169                                        eth_dev->rx_pkt_burst;
1170                                rest_eth_dev->tx_pkt_burst =
1171                                        eth_dev->tx_pkt_burst;
1172                                rte_eth_dev_probing_finish(rest_eth_dev);
1173                        }
1174                }
1175                return 0;
1176        }
1177
1178        snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1179        adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1180        if (!adapter)
1181                return -1;
1182
1183        adapter->use_unpacked_mode = 1;
1184        adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1185        if (!adapter->regs) {
1186                dev_err(adapter, "%s: cannot map device registers\n", __func__);
1187                err = -ENOMEM;
1188                goto out_free_adapter;
1189        }
1190        adapter->pdev = pci_dev;
1191        adapter->eth_dev = eth_dev;
1192        pi->adapter = adapter;
1193
1194        cxgbe_process_devargs(adapter);
1195
1196        err = cxgbe_probe(adapter);
1197        if (err) {
1198                dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1199                        __func__, err);
1200                goto out_free_adapter;
1201        }
1202
1203        return 0;
1204
1205out_free_adapter:
1206        rte_free(adapter);
1207        return err;
1208}
1209
1210static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1211{
1212        struct port_info *pi = eth_dev->data->dev_private;
1213        struct adapter *adap = pi->adapter;
1214
1215        /* Free up other ports and all resources */
1216        cxgbe_close(adap);
1217        return 0;
1218}
1219
1220static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1221        struct rte_pci_device *pci_dev)
1222{
1223        return rte_eth_dev_pci_generic_probe(pci_dev,
1224                sizeof(struct port_info), eth_cxgbe_dev_init);
1225}
1226
1227static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1228{
1229        return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1230}
1231
1232static struct rte_pci_driver rte_cxgbe_pmd = {
1233        .id_table = cxgb4_pci_tbl,
1234        .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1235        .probe = eth_cxgbe_pci_probe,
1236        .remove = eth_cxgbe_pci_remove,
1237};
1238
1239RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1240RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1241RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1242RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1243                              CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1244                              CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1245                              CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1246                              CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1247RTE_LOG_REGISTER(cxgbe_logtype, pmd.net.cxgbe, NOTICE);
1248RTE_LOG_REGISTER(cxgbe_mbox_logtype, pmd.net.cxgbe.mbox, NOTICE);
1249