linux/drivers/vdpa/ifcvf/ifcvf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel IFC VF NIC driver for virtio dataplane offloading
   4 *
   5 * Copyright (C) 2020 Intel Corporation.
   6 *
   7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
   8 *
   9 */
  10
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/sysfs.h>
  15#include "ifcvf_base.h"
  16
  17#define DRIVER_AUTHOR   "Intel Corporation"
  18#define IFCVF_DRIVER_NAME       "ifcvf"
  19
  20static irqreturn_t ifcvf_config_changed(int irq, void *arg)
  21{
  22        struct ifcvf_hw *vf = arg;
  23
  24        if (vf->config_cb.callback)
  25                return vf->config_cb.callback(vf->config_cb.private);
  26
  27        return IRQ_HANDLED;
  28}
  29
  30static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
  31{
  32        struct vring_info *vring = arg;
  33
  34        if (vring->cb.callback)
  35                return vring->cb.callback(vring->cb.private);
  36
  37        return IRQ_HANDLED;
  38}
  39
  40static void ifcvf_free_irq_vectors(void *data)
  41{
  42        pci_free_irq_vectors(data);
  43}
  44
  45static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
  46{
  47        struct pci_dev *pdev = adapter->pdev;
  48        struct ifcvf_hw *vf = &adapter->vf;
  49        int i;
  50
  51
  52        for (i = 0; i < queues; i++) {
  53                devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
  54                vf->vring[i].irq = -EINVAL;
  55        }
  56
  57        devm_free_irq(&pdev->dev, vf->config_irq, vf);
  58        ifcvf_free_irq_vectors(pdev);
  59}
  60
  61static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
  62{
  63        struct pci_dev *pdev = adapter->pdev;
  64        struct ifcvf_hw *vf = &adapter->vf;
  65        int vector, i, ret, irq;
  66        u16 max_intr;
  67
  68        /* all queues and config interrupt  */
  69        max_intr = vf->nr_vring + 1;
  70
  71        ret = pci_alloc_irq_vectors(pdev, max_intr,
  72                                    max_intr, PCI_IRQ_MSIX);
  73        if (ret < 0) {
  74                IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
  75                return ret;
  76        }
  77
  78        snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
  79                 pci_name(pdev));
  80        vector = 0;
  81        vf->config_irq = pci_irq_vector(pdev, vector);
  82        ret = devm_request_irq(&pdev->dev, vf->config_irq,
  83                               ifcvf_config_changed, 0,
  84                               vf->config_msix_name, vf);
  85        if (ret) {
  86                IFCVF_ERR(pdev, "Failed to request config irq\n");
  87                return ret;
  88        }
  89
  90        for (i = 0; i < vf->nr_vring; i++) {
  91                snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
  92                         pci_name(pdev), i);
  93                vector = i + IFCVF_MSI_QUEUE_OFF;
  94                irq = pci_irq_vector(pdev, vector);
  95                ret = devm_request_irq(&pdev->dev, irq,
  96                                       ifcvf_intr_handler, 0,
  97                                       vf->vring[i].msix_name,
  98                                       &vf->vring[i]);
  99                if (ret) {
 100                        IFCVF_ERR(pdev,
 101                                  "Failed to request irq for vq %d\n", i);
 102                        ifcvf_free_irq(adapter, i);
 103
 104                        return ret;
 105                }
 106
 107                vf->vring[i].irq = irq;
 108        }
 109
 110        return 0;
 111}
 112
 113static int ifcvf_start_datapath(void *private)
 114{
 115        struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
 116        u8 status;
 117        int ret;
 118
 119        ret = ifcvf_start_hw(vf);
 120        if (ret < 0) {
 121                status = ifcvf_get_status(vf);
 122                status |= VIRTIO_CONFIG_S_FAILED;
 123                ifcvf_set_status(vf, status);
 124        }
 125
 126        return ret;
 127}
 128
 129static int ifcvf_stop_datapath(void *private)
 130{
 131        struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
 132        int i;
 133
 134        for (i = 0; i < vf->nr_vring; i++)
 135                vf->vring[i].cb.callback = NULL;
 136
 137        ifcvf_stop_hw(vf);
 138
 139        return 0;
 140}
 141
 142static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
 143{
 144        struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
 145        int i;
 146
 147        for (i = 0; i < vf->nr_vring; i++) {
 148                vf->vring[i].last_avail_idx = 0;
 149                vf->vring[i].desc = 0;
 150                vf->vring[i].avail = 0;
 151                vf->vring[i].used = 0;
 152                vf->vring[i].ready = 0;
 153                vf->vring[i].cb.callback = NULL;
 154                vf->vring[i].cb.private = NULL;
 155        }
 156
 157        ifcvf_reset(vf);
 158}
 159
 160static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
 161{
 162        return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
 163}
 164
 165static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
 166{
 167        struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
 168
 169        return &adapter->vf;
 170}
 171
 172static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
 173{
 174        struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
 175        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 176        struct pci_dev *pdev = adapter->pdev;
 177        u32 type = vf->dev_type;
 178        u64 features;
 179
 180        if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
 181                features = ifcvf_get_features(vf);
 182        else {
 183                features = 0;
 184                IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
 185        }
 186
 187        return features;
 188}
 189
 190static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
 191{
 192        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 193        int ret;
 194
 195        ret = ifcvf_verify_min_features(vf, features);
 196        if (ret)
 197                return ret;
 198
 199        vf->req_features = features;
 200
 201        return 0;
 202}
 203
 204static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
 205{
 206        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 207
 208        return ifcvf_get_status(vf);
 209}
 210
 211static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 212{
 213        struct ifcvf_adapter *adapter;
 214        struct ifcvf_hw *vf;
 215        u8 status_old;
 216        int ret;
 217
 218        vf  = vdpa_to_vf(vdpa_dev);
 219        adapter = vdpa_to_adapter(vdpa_dev);
 220        status_old = ifcvf_get_status(vf);
 221
 222        if (status_old == status)
 223                return;
 224
 225        if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
 226            !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
 227                ret = ifcvf_request_irq(adapter);
 228                if (ret) {
 229                        status = ifcvf_get_status(vf);
 230                        status |= VIRTIO_CONFIG_S_FAILED;
 231                        ifcvf_set_status(vf, status);
 232                        return;
 233                }
 234
 235                if (ifcvf_start_datapath(adapter) < 0)
 236                        IFCVF_ERR(adapter->pdev,
 237                                  "Failed to set ifcvf vdpa  status %u\n",
 238                                  status);
 239        }
 240
 241        ifcvf_set_status(vf, status);
 242}
 243
 244static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 245{
 246        struct ifcvf_adapter *adapter;
 247        struct ifcvf_hw *vf;
 248        u8 status_old;
 249
 250        vf  = vdpa_to_vf(vdpa_dev);
 251        adapter = vdpa_to_adapter(vdpa_dev);
 252        status_old = ifcvf_get_status(vf);
 253
 254        if (status_old == 0)
 255                return 0;
 256
 257        if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
 258                ifcvf_stop_datapath(adapter);
 259                ifcvf_free_irq(adapter, vf->nr_vring);
 260        }
 261
 262        ifcvf_reset_vring(adapter);
 263
 264        return 0;
 265}
 266
 267static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
 268{
 269        return IFCVF_QUEUE_MAX;
 270}
 271
 272static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
 273                                   struct vdpa_vq_state *state)
 274{
 275        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 276
 277        state->split.avail_index = ifcvf_get_vq_state(vf, qid);
 278        return 0;
 279}
 280
 281static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
 282                                   const struct vdpa_vq_state *state)
 283{
 284        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 285
 286        return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
 287}
 288
 289static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
 290                                 struct vdpa_callback *cb)
 291{
 292        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 293
 294        vf->vring[qid].cb = *cb;
 295}
 296
 297static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
 298                                    u16 qid, bool ready)
 299{
 300        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 301
 302        vf->vring[qid].ready = ready;
 303}
 304
 305static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
 306{
 307        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 308
 309        return vf->vring[qid].ready;
 310}
 311
 312static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
 313                                  u32 num)
 314{
 315        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 316
 317        vf->vring[qid].size = num;
 318}
 319
 320static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
 321                                     u64 desc_area, u64 driver_area,
 322                                     u64 device_area)
 323{
 324        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 325
 326        vf->vring[qid].desc = desc_area;
 327        vf->vring[qid].avail = driver_area;
 328        vf->vring[qid].used = device_area;
 329
 330        return 0;
 331}
 332
 333static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
 334{
 335        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 336
 337        ifcvf_notify_queue(vf, qid);
 338}
 339
 340static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
 341{
 342        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 343
 344        return ioread8(&vf->common_cfg->config_generation);
 345}
 346
 347static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
 348{
 349        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 350
 351        return vf->dev_type;
 352}
 353
 354static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
 355{
 356        struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
 357        struct pci_dev *pdev = adapter->pdev;
 358
 359        return pdev->subsystem_vendor;
 360}
 361
 362static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
 363{
 364        return IFCVF_QUEUE_ALIGNMENT;
 365}
 366
 367static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
 368{
 369        struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
 370        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 371        struct pci_dev *pdev = adapter->pdev;
 372        size_t size;
 373
 374        switch (vf->dev_type) {
 375        case VIRTIO_ID_NET:
 376                size = sizeof(struct virtio_net_config);
 377                break;
 378        case VIRTIO_ID_BLOCK:
 379                size = sizeof(struct virtio_blk_config);
 380                break;
 381        default:
 382                size = 0;
 383                IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
 384        }
 385
 386        return size;
 387}
 388
 389static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
 390                                  unsigned int offset,
 391                                  void *buf, unsigned int len)
 392{
 393        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 394
 395        WARN_ON(offset + len > sizeof(struct virtio_net_config));
 396        ifcvf_read_net_config(vf, offset, buf, len);
 397}
 398
 399static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
 400                                  unsigned int offset, const void *buf,
 401                                  unsigned int len)
 402{
 403        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 404
 405        WARN_ON(offset + len > sizeof(struct virtio_net_config));
 406        ifcvf_write_net_config(vf, offset, buf, len);
 407}
 408
 409static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
 410                                     struct vdpa_callback *cb)
 411{
 412        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 413
 414        vf->config_cb.callback = cb->callback;
 415        vf->config_cb.private = cb->private;
 416}
 417
 418static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
 419                                 u16 qid)
 420{
 421        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 422
 423        return vf->vring[qid].irq;
 424}
 425
 426static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
 427                                                               u16 idx)
 428{
 429        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 430        struct vdpa_notification_area area;
 431
 432        area.addr = vf->vring[idx].notify_pa;
 433        if (!vf->notify_off_multiplier)
 434                area.size = PAGE_SIZE;
 435        else
 436                area.size = vf->notify_off_multiplier;
 437
 438        return area;
 439}
 440
 441/*
 442 * IFCVF currently does't have on-chip IOMMU, so not
 443 * implemented set_map()/dma_map()/dma_unmap()
 444 */
 445static const struct vdpa_config_ops ifc_vdpa_ops = {
 446        .get_features   = ifcvf_vdpa_get_features,
 447        .set_features   = ifcvf_vdpa_set_features,
 448        .get_status     = ifcvf_vdpa_get_status,
 449        .set_status     = ifcvf_vdpa_set_status,
 450        .reset          = ifcvf_vdpa_reset,
 451        .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
 452        .get_vq_state   = ifcvf_vdpa_get_vq_state,
 453        .set_vq_state   = ifcvf_vdpa_set_vq_state,
 454        .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
 455        .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
 456        .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
 457        .set_vq_num     = ifcvf_vdpa_set_vq_num,
 458        .set_vq_address = ifcvf_vdpa_set_vq_address,
 459        .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
 460        .kick_vq        = ifcvf_vdpa_kick_vq,
 461        .get_generation = ifcvf_vdpa_get_generation,
 462        .get_device_id  = ifcvf_vdpa_get_device_id,
 463        .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
 464        .get_vq_align   = ifcvf_vdpa_get_vq_align,
 465        .get_config_size        = ifcvf_vdpa_get_config_size,
 466        .get_config     = ifcvf_vdpa_get_config,
 467        .set_config     = ifcvf_vdpa_set_config,
 468        .set_config_cb  = ifcvf_vdpa_set_config_cb,
 469        .get_vq_notification = ifcvf_get_vq_notification,
 470};
 471
 472static struct virtio_device_id id_table_net[] = {
 473        {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
 474        {0},
 475};
 476
 477static struct virtio_device_id id_table_blk[] = {
 478        {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
 479        {0},
 480};
 481
 482static u32 get_dev_type(struct pci_dev *pdev)
 483{
 484        u32 dev_type;
 485
 486        /* This drirver drives both modern virtio devices and transitional
 487         * devices in modern mode.
 488         * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
 489         * so legacy devices and transitional devices in legacy
 490         * mode will not work for vDPA, this driver will not
 491         * drive devices with legacy interface.
 492         */
 493
 494        if (pdev->device < 0x1040)
 495                dev_type =  pdev->subsystem_device;
 496        else
 497                dev_type =  pdev->device - 0x1040;
 498
 499        return dev_type;
 500}
 501
 502static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
 503{
 504        struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
 505        struct ifcvf_adapter *adapter;
 506        struct pci_dev *pdev;
 507        struct ifcvf_hw *vf;
 508        struct device *dev;
 509        int ret, i;
 510
 511        ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
 512        if (ifcvf_mgmt_dev->adapter)
 513                return -EOPNOTSUPP;
 514
 515        pdev = ifcvf_mgmt_dev->pdev;
 516        dev = &pdev->dev;
 517        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
 518                                    dev, &ifc_vdpa_ops, name, false);
 519        if (IS_ERR(adapter)) {
 520                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
 521                return PTR_ERR(adapter);
 522        }
 523
 524        ifcvf_mgmt_dev->adapter = adapter;
 525        pci_set_drvdata(pdev, ifcvf_mgmt_dev);
 526
 527        vf = &adapter->vf;
 528        vf->dev_type = get_dev_type(pdev);
 529        vf->base = pcim_iomap_table(pdev);
 530
 531        adapter->pdev = pdev;
 532        adapter->vdpa.dma_dev = &pdev->dev;
 533
 534        ret = ifcvf_init_hw(vf, pdev);
 535        if (ret) {
 536                IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
 537                goto err;
 538        }
 539
 540        for (i = 0; i < vf->nr_vring; i++)
 541                vf->vring[i].irq = -EINVAL;
 542
 543        vf->hw_features = ifcvf_get_hw_features(vf);
 544
 545        adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
 546        ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
 547        if (ret) {
 548                IFCVF_ERR(pdev, "Failed to register to vDPA bus");
 549                goto err;
 550        }
 551
 552        return 0;
 553
 554err:
 555        put_device(&adapter->vdpa.dev);
 556        return ret;
 557}
 558
 559static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
 560{
 561        struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
 562
 563        ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
 564        _vdpa_unregister_device(dev);
 565        ifcvf_mgmt_dev->adapter = NULL;
 566}
 567
 568static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
 569        .dev_add = ifcvf_vdpa_dev_add,
 570        .dev_del = ifcvf_vdpa_dev_del
 571};
 572
 573static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 574{
 575        struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
 576        struct device *dev = &pdev->dev;
 577        u32 dev_type;
 578        int ret;
 579
 580        ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
 581        if (!ifcvf_mgmt_dev) {
 582                IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
 583                return -ENOMEM;
 584        }
 585
 586        dev_type = get_dev_type(pdev);
 587        switch (dev_type) {
 588        case VIRTIO_ID_NET:
 589                ifcvf_mgmt_dev->mdev.id_table = id_table_net;
 590                break;
 591        case VIRTIO_ID_BLOCK:
 592                ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
 593                break;
 594        default:
 595                IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
 596                ret = -EOPNOTSUPP;
 597                goto err;
 598        }
 599
 600        ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
 601        ifcvf_mgmt_dev->mdev.device = dev;
 602        ifcvf_mgmt_dev->pdev = pdev;
 603
 604        ret = pcim_enable_device(pdev);
 605        if (ret) {
 606                IFCVF_ERR(pdev, "Failed to enable device\n");
 607                goto err;
 608        }
 609
 610        ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
 611                                 IFCVF_DRIVER_NAME);
 612        if (ret) {
 613                IFCVF_ERR(pdev, "Failed to request MMIO region\n");
 614                goto err;
 615        }
 616
 617        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
 618        if (ret) {
 619                IFCVF_ERR(pdev, "No usable DMA configuration\n");
 620                goto err;
 621        }
 622
 623        ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
 624        if (ret) {
 625                IFCVF_ERR(pdev,
 626                          "Failed for adding devres for freeing irq vectors\n");
 627                goto err;
 628        }
 629
 630        pci_set_master(pdev);
 631
 632        ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
 633        if (ret) {
 634                IFCVF_ERR(pdev,
 635                          "Failed to initialize the management interfaces\n");
 636                goto err;
 637        }
 638
 639        return 0;
 640
 641err:
 642        kfree(ifcvf_mgmt_dev);
 643        return ret;
 644}
 645
 646static void ifcvf_remove(struct pci_dev *pdev)
 647{
 648        struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
 649
 650        ifcvf_mgmt_dev = pci_get_drvdata(pdev);
 651        vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
 652        kfree(ifcvf_mgmt_dev);
 653}
 654
 655static struct pci_device_id ifcvf_pci_ids[] = {
 656        /* N3000 network device */
 657        { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
 658                         N3000_DEVICE_ID,
 659                         PCI_VENDOR_ID_INTEL,
 660                         N3000_SUBSYS_DEVICE_ID) },
 661        /* C5000X-PL network device */
 662        { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
 663                         VIRTIO_TRANS_ID_NET,
 664                         PCI_VENDOR_ID_INTEL,
 665                         VIRTIO_ID_NET) },
 666        /* C5000X-PL block device */
 667        { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
 668                         VIRTIO_TRANS_ID_BLOCK,
 669                         PCI_VENDOR_ID_INTEL,
 670                         VIRTIO_ID_BLOCK) },
 671
 672        { 0 },
 673};
 674MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
 675
 676static struct pci_driver ifcvf_driver = {
 677        .name     = IFCVF_DRIVER_NAME,
 678        .id_table = ifcvf_pci_ids,
 679        .probe    = ifcvf_probe,
 680        .remove   = ifcvf_remove,
 681};
 682
 683module_pci_driver(ifcvf_driver);
 684
 685MODULE_LICENSE("GPL v2");
 686