linux/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3
   4/*
   5 * nfp_netvf_main.c
   6 * Netronome virtual function network device driver: Main entry point
   7 * Author: Jason McMullan <jason.mcmullan@netronome.com>
   8 *         Rolf Neugebauer <rolf.neugebauer@netronome.com>
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/etherdevice.h>
  15
  16#include "nfp_net_ctrl.h"
  17#include "nfp_net.h"
  18#include "nfp_main.h"
  19
  20/**
  21 * struct nfp_net_vf - NFP VF-specific device structure
  22 * @nn:         NFP Net structure for this device
  23 * @irq_entries: Pre-allocated array of MSI-X entries
  24 * @q_bar:      Pointer to mapped QC memory (NULL if TX/RX mapped directly)
  25 * @ddir:       Per-device debugfs directory
  26 */
  27struct nfp_net_vf {
  28        struct nfp_net *nn;
  29
  30        struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
  31                                      NFP_NET_MAX_TX_RINGS];
  32        u8 __iomem *q_bar;
  33
  34        struct dentry *ddir;
  35};
  36
  37static const char nfp_net_driver_name[] = "nfp_netvf";
  38
  39#define PCI_DEVICE_NFP6000VF            0x6003
  40static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
  41        { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
  42          PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
  43          PCI_ANY_ID, 0,
  44        },
  45        { 0, } /* Required last entry. */
  46};
  47MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids);
  48
  49static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
  50{
  51        u8 mac_addr[ETH_ALEN];
  52
  53        put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
  54        put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
  55
  56        if (!is_valid_ether_addr(mac_addr)) {
  57                eth_hw_addr_random(nn->dp.netdev);
  58                return;
  59        }
  60
  61        ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
  62        ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
  63}
  64
  65static int nfp_netvf_pci_probe(struct pci_dev *pdev,
  66                               const struct pci_device_id *pci_id)
  67{
  68        struct nfp_net_fw_version fw_ver;
  69        int max_tx_rings, max_rx_rings;
  70        u32 tx_bar_off, rx_bar_off;
  71        u32 tx_bar_sz, rx_bar_sz;
  72        int tx_bar_no, rx_bar_no;
  73        struct nfp_net_vf *vf;
  74        unsigned int num_irqs;
  75        u8 __iomem *ctrl_bar;
  76        struct nfp_net *nn;
  77        u32 startq;
  78        int stride;
  79        int err;
  80
  81        vf = kzalloc(sizeof(*vf), GFP_KERNEL);
  82        if (!vf)
  83                return -ENOMEM;
  84        pci_set_drvdata(pdev, vf);
  85
  86        err = pci_enable_device_mem(pdev);
  87        if (err)
  88                goto err_free_vf;
  89
  90        err = pci_request_regions(pdev, nfp_net_driver_name);
  91        if (err) {
  92                dev_err(&pdev->dev, "Unable to allocate device memory.\n");
  93                goto err_pci_disable;
  94        }
  95
  96        pci_set_master(pdev);
  97
  98        err = dma_set_mask_and_coherent(&pdev->dev,
  99                                        DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
 100        if (err)
 101                goto err_pci_regions;
 102
 103        /* Map the Control BAR.
 104         *
 105         * Irrespective of the advertised BAR size we only map the
 106         * first NFP_NET_CFG_BAR_SZ of the BAR.  This keeps the code
 107         * the identical for PF and VF drivers.
 108         */
 109        ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
 110                                   NFP_NET_CFG_BAR_SZ);
 111        if (!ctrl_bar) {
 112                dev_err(&pdev->dev,
 113                        "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
 114                err = -EIO;
 115                goto err_pci_regions;
 116        }
 117
 118        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
 119        if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
 120                dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
 121                        fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
 122                err = -EINVAL;
 123                goto err_ctrl_unmap;
 124        }
 125
 126        /* Determine stride */
 127        if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
 128                stride = 2;
 129                tx_bar_no = NFP_NET_Q0_BAR;
 130                rx_bar_no = NFP_NET_Q1_BAR;
 131                dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
 132        } else {
 133                switch (fw_ver.major) {
 134                case 1 ... 5:
 135                        stride = 4;
 136                        tx_bar_no = NFP_NET_Q0_BAR;
 137                        rx_bar_no = tx_bar_no;
 138                        break;
 139                default:
 140                        dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
 141                                fw_ver.resv, fw_ver.class,
 142                                fw_ver.major, fw_ver.minor);
 143                        err = -EINVAL;
 144                        goto err_ctrl_unmap;
 145                }
 146        }
 147
 148        /* Find out how many rings are supported */
 149        max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
 150        max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 151
 152        tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride;
 153        rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride;
 154
 155        /* Sanity checks */
 156        if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) {
 157                dev_err(&pdev->dev,
 158                        "TX BAR too small for number of TX rings. Adjusting\n");
 159                tx_bar_sz = pci_resource_len(pdev, tx_bar_no);
 160                max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
 161        }
 162        if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) {
 163                dev_err(&pdev->dev,
 164                        "RX BAR too small for number of RX rings. Adjusting\n");
 165                rx_bar_sz = pci_resource_len(pdev, rx_bar_no);
 166                max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
 167        }
 168
 169        startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
 170        tx_bar_off = NFP_PCIE_QUEUE(startq);
 171        startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
 172        rx_bar_off = NFP_PCIE_QUEUE(startq);
 173
 174        /* Allocate and initialise the netdev */
 175        nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
 176        if (IS_ERR(nn)) {
 177                err = PTR_ERR(nn);
 178                goto err_ctrl_unmap;
 179        }
 180        vf->nn = nn;
 181
 182        nn->fw_ver = fw_ver;
 183        nn->dp.is_vf = 1;
 184        nn->stride_tx = stride;
 185        nn->stride_rx = stride;
 186
 187        if (rx_bar_no == tx_bar_no) {
 188                u32 bar_off, bar_sz;
 189                resource_size_t map_addr;
 190
 191                /* Make a single overlapping BAR mapping */
 192                if (tx_bar_off < rx_bar_off)
 193                        bar_off = tx_bar_off;
 194                else
 195                        bar_off = rx_bar_off;
 196
 197                if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz))
 198                        bar_sz = (tx_bar_off + tx_bar_sz) - bar_off;
 199                else
 200                        bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
 201
 202                map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
 203                vf->q_bar = ioremap(map_addr, bar_sz);
 204                if (!vf->q_bar) {
 205                        nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
 206                        err = -EIO;
 207                        goto err_netdev_free;
 208                }
 209
 210                /* TX queues */
 211                nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
 212                /* RX queues */
 213                nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
 214        } else {
 215                resource_size_t map_addr;
 216
 217                /* TX queues */
 218                map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
 219                nn->tx_bar = ioremap(map_addr, tx_bar_sz);
 220                if (!nn->tx_bar) {
 221                        nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
 222                        err = -EIO;
 223                        goto err_netdev_free;
 224                }
 225
 226                /* RX queues */
 227                map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
 228                nn->rx_bar = ioremap(map_addr, rx_bar_sz);
 229                if (!nn->rx_bar) {
 230                        nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
 231                        err = -EIO;
 232                        goto err_unmap_tx;
 233                }
 234        }
 235
 236        nfp_netvf_get_mac_addr(nn);
 237
 238        num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
 239                                      NFP_NET_MIN_VNIC_IRQS,
 240                                      NFP_NET_NON_Q_VECTORS +
 241                                      nn->dp.num_r_vecs);
 242        if (!num_irqs) {
 243                nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
 244                err = -EIO;
 245                goto err_unmap_rx;
 246        }
 247        nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
 248
 249        err = nfp_net_init(nn);
 250        if (err)
 251                goto err_irqs_disable;
 252
 253        nfp_net_info(nn);
 254        vf->ddir = nfp_net_debugfs_device_add(pdev);
 255        nfp_net_debugfs_vnic_add(nn, vf->ddir);
 256
 257        return 0;
 258
 259err_irqs_disable:
 260        nfp_net_irqs_disable(pdev);
 261err_unmap_rx:
 262        if (!vf->q_bar)
 263                iounmap(nn->rx_bar);
 264err_unmap_tx:
 265        if (!vf->q_bar)
 266                iounmap(nn->tx_bar);
 267        else
 268                iounmap(vf->q_bar);
 269err_netdev_free:
 270        nfp_net_free(nn);
 271err_ctrl_unmap:
 272        iounmap(ctrl_bar);
 273err_pci_regions:
 274        pci_release_regions(pdev);
 275err_pci_disable:
 276        pci_disable_device(pdev);
 277err_free_vf:
 278        pci_set_drvdata(pdev, NULL);
 279        kfree(vf);
 280        return err;
 281}
 282
 283static void nfp_netvf_pci_remove(struct pci_dev *pdev)
 284{
 285        struct nfp_net_vf *vf;
 286        struct nfp_net *nn;
 287
 288        vf = pci_get_drvdata(pdev);
 289        if (!vf)
 290                return;
 291
 292        nn = vf->nn;
 293
 294        /* Note, the order is slightly different from above as we need
 295         * to keep the nn pointer around till we have freed everything.
 296         */
 297        nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 298        nfp_net_debugfs_dir_clean(&vf->ddir);
 299
 300        nfp_net_clean(nn);
 301
 302        nfp_net_irqs_disable(pdev);
 303
 304        if (!vf->q_bar) {
 305                iounmap(nn->rx_bar);
 306                iounmap(nn->tx_bar);
 307        } else {
 308                iounmap(vf->q_bar);
 309        }
 310        iounmap(nn->dp.ctrl_bar);
 311
 312        nfp_net_free(nn);
 313
 314        pci_release_regions(pdev);
 315        pci_disable_device(pdev);
 316
 317        pci_set_drvdata(pdev, NULL);
 318        kfree(vf);
 319}
 320
 321struct pci_driver nfp_netvf_pci_driver = {
 322        .name        = nfp_net_driver_name,
 323        .id_table    = nfp_netvf_pci_device_ids,
 324        .probe       = nfp_netvf_pci_probe,
 325        .remove      = nfp_netvf_pci_remove,
 326        .shutdown    = nfp_netvf_pci_remove,
 327};
 328