dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018 Chelsio Communications.
   3 * All rights reserved.
   4 */
   5
   6#include <ethdev_driver.h>
   7#include <ethdev_pci.h>
   8
   9#include "cxgbe.h"
  10#include "cxgbe_pfvf.h"
  11
  12/*
  13 * Macros needed to support the PCI Device ID Table ...
  14 */
  15#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
  16        static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
  17#define CH_PCI_DEVICE_ID_FUNCTION 0x8
  18
  19#define PCI_VENDOR_ID_CHELSIO 0x1425
  20
  21#define CH_PCI_ID_TABLE_ENTRY(devid) \
  22                { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
  23
  24#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
  25                { .vendor_id = 0, } \
  26        }
  27
  28/*
  29 *... and the PCI ID Table itself ...
  30 */
  31#include "base/t4_pci_id_tbl.h"
  32
  33/*
  34 * Get port statistics.
  35 */
  36static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
  37                                 struct rte_eth_stats *eth_stats)
  38{
  39        struct port_info *pi = eth_dev->data->dev_private;
  40        struct adapter *adapter = pi->adapter;
  41        struct sge *s = &adapter->sge;
  42        struct port_stats ps;
  43        unsigned int i;
  44
  45        cxgbevf_stats_get(pi, &ps);
  46
  47        /* RX Stats */
  48        eth_stats->ierrors  = ps.rx_len_err;
  49
  50        /* TX Stats */
  51        eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
  52                              ps.tx_ucast_frames;
  53        eth_stats->obytes = ps.tx_octets;
  54        eth_stats->oerrors  = ps.tx_drop;
  55
  56        for (i = 0; i < pi->n_rx_qsets; i++) {
  57                struct sge_eth_rxq *rxq =
  58                        &s->ethrxq[pi->first_rxqset + i];
  59
  60                eth_stats->q_ipackets[i] = rxq->stats.pkts;
  61                eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
  62                eth_stats->ipackets += eth_stats->q_ipackets[i];
  63                eth_stats->ibytes += eth_stats->q_ibytes[i];
  64        }
  65
  66        for (i = 0; i < pi->n_tx_qsets; i++) {
  67                struct sge_eth_txq *txq =
  68                        &s->ethtxq[pi->first_txqset + i];
  69
  70                eth_stats->q_opackets[i] = txq->stats.pkts;
  71                eth_stats->q_obytes[i] = txq->stats.tx_bytes;
  72        }
  73        return 0;
  74}
  75
  76static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
  77        .dev_start              = cxgbe_dev_start,
  78        .dev_stop               = cxgbe_dev_stop,
  79        .dev_close              = cxgbe_dev_close,
  80        .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
  81        .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
  82        .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
  83        .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
  84        .dev_configure          = cxgbe_dev_configure,
  85        .dev_infos_get          = cxgbe_dev_info_get,
  86        .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
  87        .link_update            = cxgbe_dev_link_update,
  88        .dev_set_link_up        = cxgbe_dev_set_link_up,
  89        .dev_set_link_down      = cxgbe_dev_set_link_down,
  90        .mtu_set                = cxgbe_dev_mtu_set,
  91        .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
  92        .tx_queue_start         = cxgbe_dev_tx_queue_start,
  93        .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
  94        .tx_queue_release       = cxgbe_dev_tx_queue_release,
  95        .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
  96        .rx_queue_start         = cxgbe_dev_rx_queue_start,
  97        .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
  98        .rx_queue_release       = cxgbe_dev_rx_queue_release,
  99        .stats_get              = cxgbevf_dev_stats_get,
 100        .mac_addr_set           = cxgbe_mac_addr_set,
 101};
 102
 103/*
 104 * Initialize driver
 105 * It returns 0 on success.
 106 */
 107static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
 108{
 109        struct port_info *pi = eth_dev->data->dev_private;
 110        struct rte_pci_device *pci_dev;
 111        char name[RTE_ETH_NAME_MAX_LEN];
 112        struct adapter *adapter = NULL;
 113        int err = 0;
 114
 115        CXGBE_FUNC_TRACE();
 116
 117        eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
 118        eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
 119        eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
 120        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 121
 122        /* for secondary processes, we attach to ethdevs allocated by primary
 123         * and do minimal initialization.
 124         */
 125        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 126                int i;
 127
 128                for (i = 1; i < MAX_NPORTS; i++) {
 129                        struct rte_eth_dev *rest_eth_dev;
 130                        char namei[RTE_ETH_NAME_MAX_LEN];
 131
 132                        snprintf(namei, sizeof(namei), "%s_%d",
 133                                 pci_dev->device.name, i);
 134                        rest_eth_dev = rte_eth_dev_attach_secondary(namei);
 135                        if (rest_eth_dev) {
 136                                rest_eth_dev->device = &pci_dev->device;
 137                                rest_eth_dev->dev_ops =
 138                                        eth_dev->dev_ops;
 139                                rest_eth_dev->rx_pkt_burst =
 140                                        eth_dev->rx_pkt_burst;
 141                                rest_eth_dev->tx_pkt_burst =
 142                                        eth_dev->tx_pkt_burst;
 143                                rte_eth_dev_probing_finish(rest_eth_dev);
 144                        }
 145                }
 146                return 0;
 147        }
 148
 149        snprintf(name, sizeof(name), "cxgbevfadapter%d",
 150                 eth_dev->data->port_id);
 151        adapter = rte_zmalloc(name, sizeof(*adapter), 0);
 152        if (!adapter)
 153                return -1;
 154
 155        adapter->use_unpacked_mode = 1;
 156        adapter->regs = (void *)pci_dev->mem_resource[0].addr;
 157        if (!adapter->regs) {
 158                dev_err(adapter, "%s: cannot map device registers\n", __func__);
 159                err = -ENOMEM;
 160                goto out_free_adapter;
 161        }
 162        adapter->pdev = pci_dev;
 163        adapter->eth_dev = eth_dev;
 164        pi->adapter = adapter;
 165
 166        cxgbe_process_devargs(adapter);
 167
 168        err = cxgbevf_probe(adapter);
 169        if (err) {
 170                dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
 171                        __func__, err);
 172                goto out_free_adapter;
 173        }
 174
 175        return 0;
 176
 177out_free_adapter:
 178        rte_free(adapter);
 179        return err;
 180}
 181
 182static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
 183{
 184        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 185        uint16_t port_id;
 186        int err = 0;
 187
 188        /* Free up other ports and all resources */
 189        RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
 190                err |= rte_eth_dev_close(port_id);
 191
 192        return err == 0 ? 0 : -EIO;
 193}
 194
 195static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 196                                 struct rte_pci_device *pci_dev)
 197{
 198        return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
 199                                             eth_cxgbevf_dev_init);
 200}
 201
 202static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
 203{
 204        return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit);
 205}
 206
 207static struct rte_pci_driver rte_cxgbevf_pmd = {
 208        .id_table = cxgb4vf_pci_tbl,
 209        .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
 210        .probe = eth_cxgbevf_pci_probe,
 211        .remove = eth_cxgbevf_pci_remove,
 212};
 213
 214RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
 215RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
 216RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
 217RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf,
 218                              CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
 219                              CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
 220                              CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> ");
 221