dpdk/drivers/net/virtio/virtio_pci_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2016 Intel Corporation
   3 */
   4
   5#include <stdint.h>
   6#include <string.h>
   7#include <stdio.h>
   8#include <errno.h>
   9#include <unistd.h>
  10
  11#include <ethdev_driver.h>
  12#include <ethdev_pci.h>
  13#include <rte_pci.h>
  14#include <rte_bus_pci.h>
  15#include <rte_errno.h>
  16
  17#include <rte_memory.h>
  18#include <rte_eal.h>
  19#include <rte_dev.h>
  20#include <rte_kvargs.h>
  21
  22#include "virtio.h"
  23#include "virtio_ethdev.h"
  24#include "virtio_pci.h"
  25#include "virtio_logs.h"
  26
  27/*
  28 * The set of PCI devices this driver supports
  29 */
  30static const struct rte_pci_id pci_id_virtio_map[] = {
  31        { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
  32        { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
  33        { .vendor_id = 0, /* sentinel */ },
  34};
  35
  36
  37/*
  38 * Remap the PCI device again (IO port map for legacy device and
  39 * memory map for modern device), so that the secondary process
  40 * could have the PCI initiated correctly.
  41 */
  42static int
  43virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev)
  44{
  45        struct virtio_hw *hw = &dev->hw;
  46
  47        if (dev->modern) {
  48                /*
  49                 * We don't have to re-parse the PCI config space, since
  50                 * rte_pci_map_device() makes sure the mapped address
  51                 * in secondary process would equal to the one mapped in
  52                 * the primary process: error will be returned if that
  53                 * requirement is not met.
  54                 *
  55                 * That said, we could simply reuse all cap pointers
  56                 * (such as dev_cfg, common_cfg, etc.) parsed from the
  57                 * primary process, which is stored in shared memory.
  58                 */
  59                if (rte_pci_map_device(pci_dev)) {
  60                        PMD_INIT_LOG(DEBUG, "failed to map pci device!");
  61                        return -1;
  62                }
  63        } else {
  64                if (vtpci_legacy_ioport_map(hw) < 0)
  65                        return -1;
  66        }
  67
  68        return 0;
  69}
  70
  71static int
  72eth_virtio_pci_init(struct rte_eth_dev *eth_dev)
  73{
  74        struct virtio_pci_dev *dev = eth_dev->data->dev_private;
  75        struct virtio_hw *hw = &dev->hw;
  76        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
  77        int ret;
  78
  79        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
  80                hw->port_id = eth_dev->data->port_id;
  81                VTPCI_DEV(hw) = pci_dev;
  82                ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), dev);
  83                if (ret) {
  84                        PMD_INIT_LOG(ERR, "Failed to init PCI device\n");
  85                        return -1;
  86                }
  87        } else {
  88                VTPCI_DEV(hw) = pci_dev;
  89                if (dev->modern)
  90                        VIRTIO_OPS(hw) = &modern_ops;
  91                else
  92                        VIRTIO_OPS(hw) = &legacy_ops;
  93
  94                ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), dev);
  95                if (ret < 0) {
  96                        PMD_INIT_LOG(ERR, "Failed to remap PCI device\n");
  97                        return -1;
  98                }
  99        }
 100
 101        ret = eth_virtio_dev_init(eth_dev);
 102        if (ret < 0) {
 103                PMD_INIT_LOG(ERR, "Failed to init virtio device\n");
 104                goto err_unmap;
 105        }
 106
 107        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
 108                eth_dev->data->port_id, pci_dev->id.vendor_id,
 109                pci_dev->id.device_id);
 110
 111        return 0;
 112
 113err_unmap:
 114        rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
 115        if (!dev->modern)
 116                vtpci_legacy_ioport_unmap(hw);
 117
 118        return ret;
 119}
 120
 121static int
 122eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev)
 123{
 124        int ret;
 125        PMD_INIT_FUNC_TRACE();
 126
 127        if (rte_eal_process_type() == RTE_PROC_SECONDARY)
 128                return 0;
 129
 130        ret = virtio_dev_stop(eth_dev);
 131        virtio_dev_close(eth_dev);
 132
 133        PMD_INIT_LOG(DEBUG, "dev_uninit completed");
 134
 135        return ret;
 136}
 137
 138static int vdpa_check_handler(__rte_unused const char *key,
 139                const char *value, void *ret_val)
 140{
 141        if (strcmp(value, "1") == 0)
 142                *(int *)ret_val = 1;
 143        else
 144                *(int *)ret_val = 0;
 145
 146        return 0;
 147}
 148
 149#define VIRTIO_ARG_VDPA       "vdpa"
 150
 151static int
 152virtio_pci_devargs_parse(struct rte_devargs *devargs, int *vdpa)
 153{
 154        struct rte_kvargs *kvlist;
 155        int ret = 0;
 156
 157        if (devargs == NULL)
 158                return 0;
 159
 160        kvlist = rte_kvargs_parse(devargs->args, NULL);
 161        if (kvlist == NULL) {
 162                PMD_INIT_LOG(ERR, "error when parsing param");
 163                return 0;
 164        }
 165
 166        if (rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
 167                /* vdpa mode selected when there's a key-value pair:
 168                 * vdpa=1
 169                 */
 170                ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
 171                                vdpa_check_handler, vdpa);
 172                if (ret < 0)
 173                        PMD_INIT_LOG(ERR, "Failed to parse %s", VIRTIO_ARG_VDPA);
 174        }
 175
 176        rte_kvargs_free(kvlist);
 177
 178        return ret;
 179}
 180
 181static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 182        struct rte_pci_device *pci_dev)
 183{
 184        int vdpa = 0;
 185        int ret = 0;
 186
 187        ret = virtio_pci_devargs_parse(pci_dev->device.devargs, &vdpa);
 188        if (ret < 0) {
 189                PMD_INIT_LOG(ERR, "devargs parsing is failed");
 190                return ret;
 191        }
 192        /* virtio pmd skips probe if device needs to work in vdpa mode */
 193        if (vdpa == 1)
 194                return 1;
 195
 196        return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_pci_dev),
 197                eth_virtio_pci_init);
 198}
 199
 200static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
 201{
 202        int ret;
 203
 204        ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_pci_uninit);
 205        /* Port has already been released by close. */
 206        if (ret == -ENODEV)
 207                ret = 0;
 208        return ret;
 209}
 210
 211static struct rte_pci_driver rte_virtio_net_pci_pmd = {
 212        .driver = {
 213                .name = "net_virtio",
 214        },
 215        .id_table = pci_id_virtio_map,
 216        .drv_flags = 0,
 217        .probe = eth_virtio_pci_probe,
 218        .remove = eth_virtio_pci_remove,
 219};
 220
 221RTE_INIT(rte_virtio_net_pci_pmd_init)
 222{
 223        rte_eal_iopl_init();
 224        rte_pci_register(&rte_virtio_net_pci_pmd);
 225}
 226
 227RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
 228RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
 229RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
 230