linux/drivers/scsi/snic/vnic_dev.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
   3 *
   4 * This program is free software; you may redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; version 2 of the License.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15 * SOFTWARE.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/errno.h>
  20#include <linux/types.h>
  21#include <linux/pci.h>
  22#include <linux/delay.h>
  23#include <linux/if_ether.h>
  24#include <linux/slab.h>
  25#include "vnic_resource.h"
  26#include "vnic_devcmd.h"
  27#include "vnic_dev.h"
  28#include "vnic_stats.h"
  29#include "vnic_wq.h"
  30
  31#define VNIC_DVCMD_TMO  10000   /* Devcmd Timeout value */
  32#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
  33
  34struct devcmd2_controller {
  35        struct vnic_wq_ctrl __iomem *wq_ctrl;
  36        struct vnic_dev_ring results_ring;
  37        struct vnic_wq wq;
  38        struct vnic_devcmd2 *cmd_ring;
  39        struct devcmd2_result *result;
  40        u16 next_result;
  41        u16 result_size;
  42        int color;
  43};
  44
  45struct vnic_res {
  46        void __iomem *vaddr;
  47        unsigned int count;
  48};
  49
  50struct vnic_dev {
  51        void *priv;
  52        struct pci_dev *pdev;
  53        struct vnic_res res[RES_TYPE_MAX];
  54        enum vnic_dev_intr_mode intr_mode;
  55        struct vnic_devcmd __iomem *devcmd;
  56        struct vnic_devcmd_notify *notify;
  57        struct vnic_devcmd_notify notify_copy;
  58        dma_addr_t notify_pa;
  59        u32 *linkstatus;
  60        dma_addr_t linkstatus_pa;
  61        struct vnic_stats *stats;
  62        dma_addr_t stats_pa;
  63        struct vnic_devcmd_fw_info *fw_info;
  64        dma_addr_t fw_info_pa;
  65        u64 args[VNIC_DEVCMD_NARGS];
  66        struct devcmd2_controller *devcmd2;
  67
  68        int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  69                          int wait);
  70};
  71
  72#define VNIC_MAX_RES_HDR_SIZE \
  73        (sizeof(struct vnic_resource_header) + \
  74        sizeof(struct vnic_resource) * RES_TYPE_MAX)
  75#define VNIC_RES_STRIDE 128
  76
  77void *svnic_dev_priv(struct vnic_dev *vdev)
  78{
  79        return vdev->priv;
  80}
  81
  82static int vnic_dev_discover_res(struct vnic_dev *vdev,
  83        struct vnic_dev_bar *bar, unsigned int num_bars)
  84{
  85        struct vnic_resource_header __iomem *rh;
  86        struct vnic_resource __iomem *r;
  87        u8 type;
  88
  89        if (num_bars == 0)
  90                return -EINVAL;
  91
  92        if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  93                pr_err("vNIC BAR0 res hdr length error\n");
  94
  95                return -EINVAL;
  96        }
  97
  98        rh = bar->vaddr;
  99        if (!rh) {
 100                pr_err("vNIC BAR0 res hdr not mem-mapped\n");
 101
 102                return -EINVAL;
 103        }
 104
 105        if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
 106            ioread32(&rh->version) != VNIC_RES_VERSION) {
 107                pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
 108                        VNIC_RES_MAGIC, VNIC_RES_VERSION,
 109                        ioread32(&rh->magic), ioread32(&rh->version));
 110
 111                return -EINVAL;
 112        }
 113
 114        r = (struct vnic_resource __iomem *)(rh + 1);
 115
 116        while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
 117
 118                u8 bar_num = ioread8(&r->bar);
 119                u32 bar_offset = ioread32(&r->bar_offset);
 120                u32 count = ioread32(&r->count);
 121                u32 len;
 122
 123                r++;
 124
 125                if (bar_num >= num_bars)
 126                        continue;
 127
 128                if (!bar[bar_num].len || !bar[bar_num].vaddr)
 129                        continue;
 130
 131                switch (type) {
 132                case RES_TYPE_WQ:
 133                case RES_TYPE_RQ:
 134                case RES_TYPE_CQ:
 135                case RES_TYPE_INTR_CTRL:
 136                        /* each count is stride bytes long */
 137                        len = count * VNIC_RES_STRIDE;
 138                        if (len + bar_offset > bar->len) {
 139                                pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
 140                                        type, bar_offset,
 141                                        len,
 142                                        bar->len);
 143
 144                                return -EINVAL;
 145                        }
 146                        break;
 147
 148                case RES_TYPE_INTR_PBA_LEGACY:
 149                case RES_TYPE_DEVCMD:
 150                case RES_TYPE_DEVCMD2:
 151                        len = count;
 152                        break;
 153
 154                default:
 155                        continue;
 156                }
 157
 158                vdev->res[type].count = count;
 159                vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
 160        }
 161
 162        return 0;
 163}
 164
 165unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
 166        enum vnic_res_type type)
 167{
 168        return vdev->res[type].count;
 169}
 170
 171void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
 172        unsigned int index)
 173{
 174        if (!vdev->res[type].vaddr)
 175                return NULL;
 176
 177        switch (type) {
 178        case RES_TYPE_WQ:
 179        case RES_TYPE_RQ:
 180        case RES_TYPE_CQ:
 181        case RES_TYPE_INTR_CTRL:
 182                return (char __iomem *)vdev->res[type].vaddr +
 183                                        index * VNIC_RES_STRIDE;
 184
 185        default:
 186                return (char __iomem *)vdev->res[type].vaddr;
 187        }
 188}
 189
 190unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
 191                                      unsigned int desc_count,
 192                                      unsigned int desc_size)
 193{
 194        /* The base address of the desc rings must be 512 byte aligned.
 195         * Descriptor count is aligned to groups of 32 descriptors.  A
 196         * count of 0 means the maximum 4096 descriptors.  Descriptor
 197         * size is aligned to 16 bytes.
 198         */
 199
 200        unsigned int count_align = 32;
 201        unsigned int desc_align = 16;
 202
 203        ring->base_align = 512;
 204
 205        if (desc_count == 0)
 206                desc_count = 4096;
 207
 208        ring->desc_count = ALIGN(desc_count, count_align);
 209
 210        ring->desc_size = ALIGN(desc_size, desc_align);
 211
 212        ring->size = ring->desc_count * ring->desc_size;
 213        ring->size_unaligned = ring->size + ring->base_align;
 214
 215        return ring->size_unaligned;
 216}
 217
 218void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
 219{
 220        memset(ring->descs, 0, ring->size);
 221}
 222
 223int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
 224        unsigned int desc_count, unsigned int desc_size)
 225{
 226        svnic_dev_desc_ring_size(ring, desc_count, desc_size);
 227
 228        ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
 229                ring->size_unaligned,
 230                &ring->base_addr_unaligned);
 231
 232        if (!ring->descs_unaligned) {
 233                pr_err("Failed to allocate ring (size=%d), aborting\n",
 234                        (int)ring->size);
 235
 236                return -ENOMEM;
 237        }
 238
 239        ring->base_addr = ALIGN(ring->base_addr_unaligned,
 240                ring->base_align);
 241        ring->descs = (u8 *)ring->descs_unaligned +
 242                (ring->base_addr - ring->base_addr_unaligned);
 243
 244        svnic_dev_clear_desc_ring(ring);
 245
 246        ring->desc_avail = ring->desc_count - 1;
 247
 248        return 0;
 249}
 250
 251void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
 252{
 253        if (ring->descs) {
 254                pci_free_consistent(vdev->pdev,
 255                        ring->size_unaligned,
 256                        ring->descs_unaligned,
 257                        ring->base_addr_unaligned);
 258                ring->descs = NULL;
 259        }
 260}
 261
 262static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
 263        int wait)
 264{
 265        struct devcmd2_controller *dc2c = vdev->devcmd2;
 266        struct devcmd2_result *result = dc2c->result + dc2c->next_result;
 267        unsigned int i;
 268        int delay;
 269        int err;
 270        u32 posted;
 271        u32 new_posted;
 272
 273        posted = ioread32(&dc2c->wq_ctrl->posted_index);
 274
 275        if (posted == 0xFFFFFFFF) { /* check for hardware gone  */
 276                /* Hardware surprise removal: return error */
 277                return -ENODEV;
 278        }
 279
 280        new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
 281        dc2c->cmd_ring[posted].cmd = cmd;
 282        dc2c->cmd_ring[posted].flags = 0;
 283
 284        if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
 285                dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
 286
 287        if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
 288                for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
 289                        dc2c->cmd_ring[posted].args[i] = vdev->args[i];
 290        }
 291        /* Adding write memory barrier prevents compiler and/or CPU
 292         * reordering, thus avoiding descriptor posting before
 293         * descriptor is initialized. Otherwise, hardware can read
 294         * stale descriptor fields.
 295         */
 296        wmb();
 297        iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
 298
 299        if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
 300                return 0;
 301
 302        for (delay = 0; delay < wait; delay++) {
 303                udelay(100);
 304                if (result->color == dc2c->color) {
 305                        dc2c->next_result++;
 306                        if (dc2c->next_result == dc2c->result_size) {
 307                                dc2c->next_result = 0;
 308                                dc2c->color = dc2c->color ? 0 : 1;
 309                        }
 310                        if (result->error) {
 311                                err = (int) result->error;
 312                                if (err != ERR_ECMDUNKNOWN ||
 313                                    cmd != CMD_CAPABILITY)
 314                                        pr_err("Error %d devcmd %d\n",
 315                                                err, _CMD_N(cmd));
 316
 317                                return err;
 318                        }
 319                        if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
 320                                /*
 321                                 * Adding the rmb() prevents the compiler
 322                                 * and/or CPU from reordering the reads which
 323                                 * would potentially result in reading stale
 324                                 * values.
 325                                 */
 326                                rmb();
 327                                for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
 328                                        vdev->args[i] = result->results[i];
 329                        }
 330
 331                        return 0;
 332                }
 333        }
 334
 335        pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
 336
 337        return -ETIMEDOUT;
 338}
 339
 340static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
 341{
 342        struct devcmd2_controller *dc2c = NULL;
 343        unsigned int fetch_idx;
 344        int ret;
 345        void __iomem *p;
 346
 347        if (vdev->devcmd2)
 348                return 0;
 349
 350        p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
 351        if (!p)
 352                return -ENODEV;
 353
 354        dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
 355        if (!dc2c)
 356                return -ENOMEM;
 357
 358        vdev->devcmd2 = dc2c;
 359
 360        dc2c->color = 1;
 361        dc2c->result_size = DEVCMD2_RING_SIZE;
 362
 363        ret  = vnic_wq_devcmd2_alloc(vdev,
 364                                     &dc2c->wq,
 365                                     DEVCMD2_RING_SIZE,
 366                                     DEVCMD2_DESC_SIZE);
 367        if (ret)
 368                goto err_free_devcmd2;
 369
 370        fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
 371        if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone  */
 372                /* Hardware surprise removal: reset fetch_index */
 373                fetch_idx = 0;
 374        }
 375
 376        /*
 377         * Don't change fetch_index ever and
 378         * set posted_index same as fetch_index
 379         * when setting up the WQ for devcmd2.
 380         */
 381        vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
 382        svnic_wq_enable(&dc2c->wq);
 383        ret = svnic_dev_alloc_desc_ring(vdev,
 384                                        &dc2c->results_ring,
 385                                        DEVCMD2_RING_SIZE,
 386                                        DEVCMD2_DESC_SIZE);
 387        if (ret)
 388                goto err_free_wq;
 389
 390        dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
 391        dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
 392        dc2c->wq_ctrl = dc2c->wq.ctrl;
 393        vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
 394        vdev->args[1] = DEVCMD2_RING_SIZE;
 395
 396        ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
 397        if (ret < 0)
 398                goto err_free_desc_ring;
 399
 400        vdev->devcmd_rtn = &_svnic_dev_cmd2;
 401        pr_info("DEVCMD2 Initialized.\n");
 402
 403        return ret;
 404
 405err_free_desc_ring:
 406        svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
 407
 408err_free_wq:
 409        svnic_wq_disable(&dc2c->wq);
 410        svnic_wq_free(&dc2c->wq);
 411
 412err_free_devcmd2:
 413        kfree(dc2c);
 414        vdev->devcmd2 = NULL;
 415
 416        return ret;
 417} /* end of svnic_dev_init_devcmd2 */
 418
 419static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
 420{
 421        struct devcmd2_controller *dc2c = vdev->devcmd2;
 422
 423        vdev->devcmd2 = NULL;
 424        vdev->devcmd_rtn = NULL;
 425
 426        svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
 427        svnic_wq_disable(&dc2c->wq);
 428        svnic_wq_free(&dc2c->wq);
 429        kfree(dc2c);
 430}
 431
 432int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
 433        u64 *a0, u64 *a1, int wait)
 434{
 435        int err;
 436
 437        memset(vdev->args, 0, sizeof(vdev->args));
 438        vdev->args[0] = *a0;
 439        vdev->args[1] = *a1;
 440
 441        err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
 442
 443        *a0 = vdev->args[0];
 444        *a1 = vdev->args[1];
 445
 446        return  err;
 447}
 448
 449int svnic_dev_fw_info(struct vnic_dev *vdev,
 450        struct vnic_devcmd_fw_info **fw_info)
 451{
 452        u64 a0, a1 = 0;
 453        int wait = VNIC_DVCMD_TMO;
 454        int err = 0;
 455
 456        if (!vdev->fw_info) {
 457                vdev->fw_info = pci_alloc_consistent(vdev->pdev,
 458                        sizeof(struct vnic_devcmd_fw_info),
 459                        &vdev->fw_info_pa);
 460                if (!vdev->fw_info)
 461                        return -ENOMEM;
 462
 463                a0 = vdev->fw_info_pa;
 464
 465                /* only get fw_info once and cache it */
 466                err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
 467        }
 468
 469        *fw_info = vdev->fw_info;
 470
 471        return err;
 472}
 473
 474int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
 475        unsigned int size, void *value)
 476{
 477        u64 a0, a1;
 478        int wait = VNIC_DVCMD_TMO;
 479        int err;
 480
 481        a0 = offset;
 482        a1 = size;
 483
 484        err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
 485
 486        switch (size) {
 487        case 1:
 488                *(u8 *)value = (u8)a0;
 489                break;
 490        case 2:
 491                *(u16 *)value = (u16)a0;
 492                break;
 493        case 4:
 494                *(u32 *)value = (u32)a0;
 495                break;
 496        case 8:
 497                *(u64 *)value = a0;
 498                break;
 499        default:
 500                BUG();
 501                break;
 502        }
 503
 504        return err;
 505}
 506
 507int svnic_dev_stats_clear(struct vnic_dev *vdev)
 508{
 509        u64 a0 = 0, a1 = 0;
 510        int wait = VNIC_DVCMD_TMO;
 511
 512        return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
 513}
 514
 515int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
 516{
 517        u64 a0, a1;
 518        int wait = VNIC_DVCMD_TMO;
 519
 520        if (!vdev->stats) {
 521                vdev->stats = pci_alloc_consistent(vdev->pdev,
 522                        sizeof(struct vnic_stats), &vdev->stats_pa);
 523                if (!vdev->stats)
 524                        return -ENOMEM;
 525        }
 526
 527        *stats = vdev->stats;
 528        a0 = vdev->stats_pa;
 529        a1 = sizeof(struct vnic_stats);
 530
 531        return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
 532}
 533
 534int svnic_dev_close(struct vnic_dev *vdev)
 535{
 536        u64 a0 = 0, a1 = 0;
 537        int wait = VNIC_DVCMD_TMO;
 538
 539        return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
 540}
 541
 542int svnic_dev_enable_wait(struct vnic_dev *vdev)
 543{
 544        u64 a0 = 0, a1 = 0;
 545        int wait = VNIC_DVCMD_TMO;
 546        int err = 0;
 547
 548        err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
 549        if (err == ERR_ECMDUNKNOWN)
 550                return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
 551
 552        return err;
 553}
 554
 555int svnic_dev_disable(struct vnic_dev *vdev)
 556{
 557        u64 a0 = 0, a1 = 0;
 558        int wait = VNIC_DVCMD_TMO;
 559
 560        return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
 561}
 562
 563int svnic_dev_open(struct vnic_dev *vdev, int arg)
 564{
 565        u64 a0 = (u32)arg, a1 = 0;
 566        int wait = VNIC_DVCMD_TMO;
 567
 568        return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
 569}
 570
 571int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
 572{
 573        u64 a0 = 0, a1 = 0;
 574        int wait = VNIC_DVCMD_TMO;
 575        int err;
 576
 577        *done = 0;
 578
 579        err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
 580        if (err)
 581                return err;
 582
 583        *done = (a0 == 0);
 584
 585        return 0;
 586}
 587
 588int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
 589{
 590        u64 a0, a1;
 591        int wait = VNIC_DVCMD_TMO;
 592
 593        if (!vdev->notify) {
 594                vdev->notify = pci_alloc_consistent(vdev->pdev,
 595                        sizeof(struct vnic_devcmd_notify),
 596                        &vdev->notify_pa);
 597                if (!vdev->notify)
 598                        return -ENOMEM;
 599        }
 600
 601        a0 = vdev->notify_pa;
 602        a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
 603        a1 += sizeof(struct vnic_devcmd_notify);
 604
 605        return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
 606}
 607
 608void svnic_dev_notify_unset(struct vnic_dev *vdev)
 609{
 610        u64 a0, a1;
 611        int wait = VNIC_DVCMD_TMO;
 612
 613        a0 = 0;  /* paddr = 0 to unset notify buffer */
 614        a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
 615        a1 += sizeof(struct vnic_devcmd_notify);
 616
 617        svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
 618}
 619
 620static int vnic_dev_notify_ready(struct vnic_dev *vdev)
 621{
 622        u32 *words;
 623        unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
 624        unsigned int i;
 625        u32 csum;
 626
 627        if (!vdev->notify)
 628                return 0;
 629
 630        do {
 631                csum = 0;
 632                memcpy(&vdev->notify_copy, vdev->notify,
 633                        sizeof(struct vnic_devcmd_notify));
 634                words = (u32 *)&vdev->notify_copy;
 635                for (i = 1; i < nwords; i++)
 636                        csum += words[i];
 637        } while (csum != words[0]);
 638
 639        return 1;
 640}
 641
 642int svnic_dev_init(struct vnic_dev *vdev, int arg)
 643{
 644        u64 a0 = (u32)arg, a1 = 0;
 645        int wait = VNIC_DVCMD_TMO;
 646
 647        return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 648}
 649
 650int svnic_dev_link_status(struct vnic_dev *vdev)
 651{
 652        if (vdev->linkstatus)
 653                return *vdev->linkstatus;
 654
 655        if (!vnic_dev_notify_ready(vdev))
 656                return 0;
 657
 658        return vdev->notify_copy.link_state;
 659}
 660
 661u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
 662{
 663        if (!vnic_dev_notify_ready(vdev))
 664                return 0;
 665
 666        return vdev->notify_copy.link_down_cnt;
 667}
 668
 669void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
 670        enum vnic_dev_intr_mode intr_mode)
 671{
 672        vdev->intr_mode = intr_mode;
 673}
 674
 675enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
 676{
 677        return vdev->intr_mode;
 678}
 679
 680void svnic_dev_unregister(struct vnic_dev *vdev)
 681{
 682        if (vdev) {
 683                if (vdev->notify)
 684                        pci_free_consistent(vdev->pdev,
 685                                sizeof(struct vnic_devcmd_notify),
 686                                vdev->notify,
 687                                vdev->notify_pa);
 688                if (vdev->linkstatus)
 689                        pci_free_consistent(vdev->pdev,
 690                                sizeof(u32),
 691                                vdev->linkstatus,
 692                                vdev->linkstatus_pa);
 693                if (vdev->stats)
 694                        pci_free_consistent(vdev->pdev,
 695                                sizeof(struct vnic_stats),
 696                                vdev->stats, vdev->stats_pa);
 697                if (vdev->fw_info)
 698                        pci_free_consistent(vdev->pdev,
 699                                sizeof(struct vnic_devcmd_fw_info),
 700                                vdev->fw_info, vdev->fw_info_pa);
 701                if (vdev->devcmd2)
 702                        vnic_dev_deinit_devcmd2(vdev);
 703                kfree(vdev);
 704        }
 705}
 706
 707struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
 708                                          void *priv,
 709                                          struct pci_dev *pdev,
 710                                          struct vnic_dev_bar *bar,
 711                                          unsigned int num_bars)
 712{
 713        if (!vdev) {
 714                vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
 715                if (!vdev)
 716                        return NULL;
 717        }
 718
 719        vdev->priv = priv;
 720        vdev->pdev = pdev;
 721
 722        if (vnic_dev_discover_res(vdev, bar, num_bars))
 723                goto err_out;
 724
 725        return vdev;
 726
 727err_out:
 728        svnic_dev_unregister(vdev);
 729
 730        return NULL;
 731} /* end of svnic_dev_alloc_discover */
 732
 733/*
 734 * fallback option is left to keep the interface common for other vnics.
 735 */
 736int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
 737{
 738        int err = -ENODEV;
 739        void __iomem *p;
 740
 741        p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
 742        if (p)
 743                err = svnic_dev_init_devcmd2(vdev);
 744        else
 745                pr_err("DEVCMD2 resource not found.\n");
 746
 747        return err;
 748} /* end of svnic_dev_cmd_init */
 749