linux/drivers/scsi/vmw_pvscsi.c
<<
>>
Prefs
   1/*
   2 * Linux driver for VMware's para-virtualized SCSI HBA.
   3 *
   4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT.  See the GNU General Public License for more
  14 * details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 *
  20 * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
  21 *
  22 */
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/interrupt.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29#include <linux/pci.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_host.h>
  33#include <scsi/scsi_cmnd.h>
  34#include <scsi/scsi_device.h>
  35
  36#include "vmw_pvscsi.h"
  37
  38#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
  39
  40MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
  41MODULE_AUTHOR("VMware, Inc.");
  42MODULE_LICENSE("GPL");
  43MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
  44
  45#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING       8
  46#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING       1
  47#define PVSCSI_DEFAULT_QUEUE_DEPTH              64
  48#define SGL_SIZE                                PAGE_SIZE
  49
  50struct pvscsi_sg_list {
  51        struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
  52};
  53
  54struct pvscsi_ctx {
  55        /*
  56         * The index of the context in cmd_map serves as the context ID for a
  57         * 1-to-1 mapping completions back to requests.
  58         */
  59        struct scsi_cmnd        *cmd;
  60        struct pvscsi_sg_list   *sgl;
  61        struct list_head        list;
  62        dma_addr_t              dataPA;
  63        dma_addr_t              sensePA;
  64        dma_addr_t              sglPA;
  65};
  66
  67struct pvscsi_adapter {
  68        char                            *mmioBase;
  69        unsigned int                    irq;
  70        u8                              rev;
  71        bool                            use_msi;
  72        bool                            use_msix;
  73        bool                            use_msg;
  74
  75        spinlock_t                      hw_lock;
  76
  77        struct workqueue_struct         *workqueue;
  78        struct work_struct              work;
  79
  80        struct PVSCSIRingReqDesc        *req_ring;
  81        unsigned                        req_pages;
  82        unsigned                        req_depth;
  83        dma_addr_t                      reqRingPA;
  84
  85        struct PVSCSIRingCmpDesc        *cmp_ring;
  86        unsigned                        cmp_pages;
  87        dma_addr_t                      cmpRingPA;
  88
  89        struct PVSCSIRingMsgDesc        *msg_ring;
  90        unsigned                        msg_pages;
  91        dma_addr_t                      msgRingPA;
  92
  93        struct PVSCSIRingsState         *rings_state;
  94        dma_addr_t                      ringStatePA;
  95
  96        struct pci_dev                  *dev;
  97        struct Scsi_Host                *host;
  98
  99        struct list_head                cmd_pool;
 100        struct pvscsi_ctx               *cmd_map;
 101};
 102
 103
 104/* Command line parameters */
 105static int pvscsi_ring_pages     = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
 106static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
 107static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;
 108static bool pvscsi_disable_msi;
 109static bool pvscsi_disable_msix;
 110static bool pvscsi_use_msg       = true;
 111
 112#define PVSCSI_RW (S_IRUSR | S_IWUSR)
 113
 114module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
 115MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
 116                 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
 117
 118module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
 119MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
 120                 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
 121
 122module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
 123MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
 124                 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
 125
 126module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
 127MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
 128
 129module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
 130MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
 131
 132module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
 133MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
 134
 135static const struct pci_device_id pvscsi_pci_tbl[] = {
 136        { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
 137        { 0 }
 138};
 139
 140MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
 141
 142static struct device *
 143pvscsi_dev(const struct pvscsi_adapter *adapter)
 144{
 145        return &(adapter->dev->dev);
 146}
 147
 148static struct pvscsi_ctx *
 149pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
 150{
 151        struct pvscsi_ctx *ctx, *end;
 152
 153        end = &adapter->cmd_map[adapter->req_depth];
 154        for (ctx = adapter->cmd_map; ctx < end; ctx++)
 155                if (ctx->cmd == cmd)
 156                        return ctx;
 157
 158        return NULL;
 159}
 160
 161static struct pvscsi_ctx *
 162pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
 163{
 164        struct pvscsi_ctx *ctx;
 165
 166        if (list_empty(&adapter->cmd_pool))
 167                return NULL;
 168
 169        ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
 170        ctx->cmd = cmd;
 171        list_del(&ctx->list);
 172
 173        return ctx;
 174}
 175
 176static void pvscsi_release_context(struct pvscsi_adapter *adapter,
 177                                   struct pvscsi_ctx *ctx)
 178{
 179        ctx->cmd = NULL;
 180        list_add(&ctx->list, &adapter->cmd_pool);
 181}
 182
 183/*
 184 * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
 185 * non-zero integer. ctx always points to an entry in cmd_map array, hence
 186 * the return value is always >=1.
 187 */
 188static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
 189                              const struct pvscsi_ctx *ctx)
 190{
 191        return ctx - adapter->cmd_map + 1;
 192}
 193
 194static struct pvscsi_ctx *
 195pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
 196{
 197        return &adapter->cmd_map[context - 1];
 198}
 199
 200static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
 201                             u32 offset, u32 val)
 202{
 203        writel(val, adapter->mmioBase + offset);
 204}
 205
 206static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
 207{
 208        return readl(adapter->mmioBase + offset);
 209}
 210
 211static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
 212{
 213        return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
 214}
 215
 216static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
 217                                     u32 val)
 218{
 219        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
 220}
 221
 222static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
 223{
 224        u32 intr_bits;
 225
 226        intr_bits = PVSCSI_INTR_CMPL_MASK;
 227        if (adapter->use_msg)
 228                intr_bits |= PVSCSI_INTR_MSG_MASK;
 229
 230        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
 231}
 232
 233static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
 234{
 235        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
 236}
 237
 238static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
 239                                  u32 cmd, const void *desc, size_t len)
 240{
 241        const u32 *ptr = desc;
 242        size_t i;
 243
 244        len /= sizeof(*ptr);
 245        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
 246        for (i = 0; i < len; i++)
 247                pvscsi_reg_write(adapter,
 248                                 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
 249}
 250
 251static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
 252                             const struct pvscsi_ctx *ctx)
 253{
 254        struct PVSCSICmdDescAbortCmd cmd = { 0 };
 255
 256        cmd.target = ctx->cmd->device->id;
 257        cmd.context = pvscsi_map_context(adapter, ctx);
 258
 259        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
 260}
 261
 262static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
 263{
 264        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
 265}
 266
 267static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
 268{
 269        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
 270}
 271
 272static int scsi_is_rw(unsigned char op)
 273{
 274        return op == READ_6  || op == WRITE_6 ||
 275               op == READ_10 || op == WRITE_10 ||
 276               op == READ_12 || op == WRITE_12 ||
 277               op == READ_16 || op == WRITE_16;
 278}
 279
 280static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
 281                           unsigned char op)
 282{
 283        if (scsi_is_rw(op))
 284                pvscsi_kick_rw_io(adapter);
 285        else
 286                pvscsi_process_request_ring(adapter);
 287}
 288
 289static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
 290{
 291        dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
 292
 293        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
 294}
 295
 296static void ll_bus_reset(const struct pvscsi_adapter *adapter)
 297{
 298        dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter);
 299
 300        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
 301}
 302
 303static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
 304{
 305        struct PVSCSICmdDescResetDevice cmd = { 0 };
 306
 307        dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target);
 308
 309        cmd.target = target;
 310
 311        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
 312                              &cmd, sizeof(cmd));
 313}
 314
 315static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
 316                             struct scatterlist *sg, unsigned count)
 317{
 318        unsigned i;
 319        struct PVSCSISGElement *sge;
 320
 321        BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
 322
 323        sge = &ctx->sgl->sge[0];
 324        for (i = 0; i < count; i++, sg++) {
 325                sge[i].addr   = sg_dma_address(sg);
 326                sge[i].length = sg_dma_len(sg);
 327                sge[i].flags  = 0;
 328        }
 329}
 330
 331/*
 332 * Map all data buffers for a command into PCI space and
 333 * setup the scatter/gather list if needed.
 334 */
 335static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
 336                               struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
 337                               struct PVSCSIRingReqDesc *e)
 338{
 339        unsigned count;
 340        unsigned bufflen = scsi_bufflen(cmd);
 341        struct scatterlist *sg;
 342
 343        e->dataLen = bufflen;
 344        e->dataAddr = 0;
 345        if (bufflen == 0)
 346                return;
 347
 348        sg = scsi_sglist(cmd);
 349        count = scsi_sg_count(cmd);
 350        if (count != 0) {
 351                int segs = scsi_dma_map(cmd);
 352                if (segs > 1) {
 353                        pvscsi_create_sg(ctx, sg, segs);
 354
 355                        e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
 356                        ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
 357                                                    SGL_SIZE, PCI_DMA_TODEVICE);
 358                        e->dataAddr = ctx->sglPA;
 359                } else
 360                        e->dataAddr = sg_dma_address(sg);
 361        } else {
 362                /*
 363                 * In case there is no S/G list, scsi_sglist points
 364                 * directly to the buffer.
 365                 */
 366                ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
 367                                             cmd->sc_data_direction);
 368                e->dataAddr = ctx->dataPA;
 369        }
 370}
 371
 372static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
 373                                 struct pvscsi_ctx *ctx)
 374{
 375        struct scsi_cmnd *cmd;
 376        unsigned bufflen;
 377
 378        cmd = ctx->cmd;
 379        bufflen = scsi_bufflen(cmd);
 380
 381        if (bufflen != 0) {
 382                unsigned count = scsi_sg_count(cmd);
 383
 384                if (count != 0) {
 385                        scsi_dma_unmap(cmd);
 386                        if (ctx->sglPA) {
 387                                pci_unmap_single(adapter->dev, ctx->sglPA,
 388                                                 SGL_SIZE, PCI_DMA_TODEVICE);
 389                                ctx->sglPA = 0;
 390                        }
 391                } else
 392                        pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
 393                                         cmd->sc_data_direction);
 394        }
 395        if (cmd->sense_buffer)
 396                pci_unmap_single(adapter->dev, ctx->sensePA,
 397                                 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
 398}
 399
 400static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
 401{
 402        adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
 403                                                    &adapter->ringStatePA);
 404        if (!adapter->rings_state)
 405                return -ENOMEM;
 406
 407        adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
 408                                 pvscsi_ring_pages);
 409        adapter->req_depth = adapter->req_pages
 410                                        * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 411        adapter->req_ring = pci_alloc_consistent(adapter->dev,
 412                                                 adapter->req_pages * PAGE_SIZE,
 413                                                 &adapter->reqRingPA);
 414        if (!adapter->req_ring)
 415                return -ENOMEM;
 416
 417        adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
 418                                 pvscsi_ring_pages);
 419        adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
 420                                                 adapter->cmp_pages * PAGE_SIZE,
 421                                                 &adapter->cmpRingPA);
 422        if (!adapter->cmp_ring)
 423                return -ENOMEM;
 424
 425        BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
 426        BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
 427        BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
 428
 429        if (!adapter->use_msg)
 430                return 0;
 431
 432        adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
 433                                 pvscsi_msg_ring_pages);
 434        adapter->msg_ring = pci_alloc_consistent(adapter->dev,
 435                                                 adapter->msg_pages * PAGE_SIZE,
 436                                                 &adapter->msgRingPA);
 437        if (!adapter->msg_ring)
 438                return -ENOMEM;
 439        BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
 440
 441        return 0;
 442}
 443
 444static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
 445{
 446        struct PVSCSICmdDescSetupRings cmd = { 0 };
 447        dma_addr_t base;
 448        unsigned i;
 449
 450        cmd.ringsStatePPN   = adapter->ringStatePA >> PAGE_SHIFT;
 451        cmd.reqRingNumPages = adapter->req_pages;
 452        cmd.cmpRingNumPages = adapter->cmp_pages;
 453
 454        base = adapter->reqRingPA;
 455        for (i = 0; i < adapter->req_pages; i++) {
 456                cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
 457                base += PAGE_SIZE;
 458        }
 459
 460        base = adapter->cmpRingPA;
 461        for (i = 0; i < adapter->cmp_pages; i++) {
 462                cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
 463                base += PAGE_SIZE;
 464        }
 465
 466        memset(adapter->rings_state, 0, PAGE_SIZE);
 467        memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
 468        memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
 469
 470        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
 471                              &cmd, sizeof(cmd));
 472
 473        if (adapter->use_msg) {
 474                struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
 475
 476                cmd_msg.numPages = adapter->msg_pages;
 477
 478                base = adapter->msgRingPA;
 479                for (i = 0; i < adapter->msg_pages; i++) {
 480                        cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
 481                        base += PAGE_SIZE;
 482                }
 483                memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
 484
 485                pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
 486                                      &cmd_msg, sizeof(cmd_msg));
 487        }
 488}
 489
 490/*
 491 * Pull a completion descriptor off and pass the completion back
 492 * to the SCSI mid layer.
 493 */
 494static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
 495                                    const struct PVSCSIRingCmpDesc *e)
 496{
 497        struct pvscsi_ctx *ctx;
 498        struct scsi_cmnd *cmd;
 499        u32 btstat = e->hostStatus;
 500        u32 sdstat = e->scsiStatus;
 501
 502        ctx = pvscsi_get_context(adapter, e->context);
 503        cmd = ctx->cmd;
 504        pvscsi_unmap_buffers(adapter, ctx);
 505        pvscsi_release_context(adapter, ctx);
 506        cmd->result = 0;
 507
 508        if (sdstat != SAM_STAT_GOOD &&
 509            (btstat == BTSTAT_SUCCESS ||
 510             btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
 511             btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
 512                cmd->result = (DID_OK << 16) | sdstat;
 513                if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
 514                        cmd->result |= (DRIVER_SENSE << 24);
 515        } else
 516                switch (btstat) {
 517                case BTSTAT_SUCCESS:
 518                case BTSTAT_LINKED_COMMAND_COMPLETED:
 519                case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
 520                        /* If everything went fine, let's move on..  */
 521                        cmd->result = (DID_OK << 16);
 522                        break;
 523
 524                case BTSTAT_DATARUN:
 525                case BTSTAT_DATA_UNDERRUN:
 526                        /* Report residual data in underruns */
 527                        scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
 528                        cmd->result = (DID_ERROR << 16);
 529                        break;
 530
 531                case BTSTAT_SELTIMEO:
 532                        /* Our emulation returns this for non-connected devs */
 533                        cmd->result = (DID_BAD_TARGET << 16);
 534                        break;
 535
 536                case BTSTAT_LUNMISMATCH:
 537                case BTSTAT_TAGREJECT:
 538                case BTSTAT_BADMSG:
 539                        cmd->result = (DRIVER_INVALID << 24);
 540                        /* fall through */
 541
 542                case BTSTAT_HAHARDWARE:
 543                case BTSTAT_INVPHASE:
 544                case BTSTAT_HATIMEOUT:
 545                case BTSTAT_NORESPONSE:
 546                case BTSTAT_DISCONNECT:
 547                case BTSTAT_HASOFTWARE:
 548                case BTSTAT_BUSFREE:
 549                case BTSTAT_SENSFAILED:
 550                        cmd->result |= (DID_ERROR << 16);
 551                        break;
 552
 553                case BTSTAT_SENTRST:
 554                case BTSTAT_RECVRST:
 555                case BTSTAT_BUSRESET:
 556                        cmd->result = (DID_RESET << 16);
 557                        break;
 558
 559                case BTSTAT_ABORTQUEUE:
 560                        cmd->result = (DID_ABORT << 16);
 561                        break;
 562
 563                case BTSTAT_SCSIPARITY:
 564                        cmd->result = (DID_PARITY << 16);
 565                        break;
 566
 567                default:
 568                        cmd->result = (DID_ERROR << 16);
 569                        scmd_printk(KERN_DEBUG, cmd,
 570                                    "Unknown completion status: 0x%x\n",
 571                                    btstat);
 572        }
 573
 574        dev_dbg(&cmd->device->sdev_gendev,
 575                "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
 576                cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
 577
 578        cmd->scsi_done(cmd);
 579}
 580
 581/*
 582 * barrier usage : Since the PVSCSI device is emulated, there could be cases
 583 * where we may want to serialize some accesses between the driver and the
 584 * emulation layer. We use compiler barriers instead of the more expensive
 585 * memory barriers because PVSCSI is only supported on X86 which has strong
 586 * memory access ordering.
 587 */
 588static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
 589{
 590        struct PVSCSIRingsState *s = adapter->rings_state;
 591        struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
 592        u32 cmp_entries = s->cmpNumEntriesLog2;
 593
 594        while (s->cmpConsIdx != s->cmpProdIdx) {
 595                struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
 596                                                      MASK(cmp_entries));
 597                /*
 598                 * This barrier() ensures that *e is not dereferenced while
 599                 * the device emulation still writes data into the slot.
 600                 * Since the device emulation advances s->cmpProdIdx only after
 601                 * updating the slot we want to check it first.
 602                 */
 603                barrier();
 604                pvscsi_complete_request(adapter, e);
 605                /*
 606                 * This barrier() ensures that compiler doesn't reorder write
 607                 * to s->cmpConsIdx before the read of (*e) inside
 608                 * pvscsi_complete_request. Otherwise, device emulation may
 609                 * overwrite *e before we had a chance to read it.
 610                 */
 611                barrier();
 612                s->cmpConsIdx++;
 613        }
 614}
 615
 616/*
 617 * Translate a Linux SCSI request into a request ring entry.
 618 */
 619static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
 620                             struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
 621{
 622        struct PVSCSIRingsState *s;
 623        struct PVSCSIRingReqDesc *e;
 624        struct scsi_device *sdev;
 625        u32 req_entries;
 626
 627        s = adapter->rings_state;
 628        sdev = cmd->device;
 629        req_entries = s->reqNumEntriesLog2;
 630
 631        /*
 632         * If this condition holds, we might have room on the request ring, but
 633         * we might not have room on the completion ring for the response.
 634         * However, we have already ruled out this possibility - we would not
 635         * have successfully allocated a context if it were true, since we only
 636         * have one context per request entry.  Check for it anyway, since it
 637         * would be a serious bug.
 638         */
 639        if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
 640                scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
 641                            "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
 642                            s->reqProdIdx, s->cmpConsIdx);
 643                return -1;
 644        }
 645
 646        e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
 647
 648        e->bus    = sdev->channel;
 649        e->target = sdev->id;
 650        memset(e->lun, 0, sizeof(e->lun));
 651        e->lun[1] = sdev->lun;
 652
 653        if (cmd->sense_buffer) {
 654                ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
 655                                              SCSI_SENSE_BUFFERSIZE,
 656                                              PCI_DMA_FROMDEVICE);
 657                e->senseAddr = ctx->sensePA;
 658                e->senseLen = SCSI_SENSE_BUFFERSIZE;
 659        } else {
 660                e->senseLen  = 0;
 661                e->senseAddr = 0;
 662        }
 663        e->cdbLen   = cmd->cmd_len;
 664        e->vcpuHint = smp_processor_id();
 665        memcpy(e->cdb, cmd->cmnd, e->cdbLen);
 666
 667        e->tag = SIMPLE_QUEUE_TAG;
 668        if (sdev->tagged_supported &&
 669            (cmd->tag == HEAD_OF_QUEUE_TAG ||
 670             cmd->tag == ORDERED_QUEUE_TAG))
 671                e->tag = cmd->tag;
 672
 673        if (cmd->sc_data_direction == DMA_FROM_DEVICE)
 674                e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
 675        else if (cmd->sc_data_direction == DMA_TO_DEVICE)
 676                e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
 677        else if (cmd->sc_data_direction == DMA_NONE)
 678                e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
 679        else
 680                e->flags = 0;
 681
 682        pvscsi_map_buffers(adapter, ctx, cmd, e);
 683
 684        e->context = pvscsi_map_context(adapter, ctx);
 685
 686        barrier();
 687
 688        s->reqProdIdx++;
 689
 690        return 0;
 691}
 692
 693static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 694{
 695        struct Scsi_Host *host = cmd->device->host;
 696        struct pvscsi_adapter *adapter = shost_priv(host);
 697        struct pvscsi_ctx *ctx;
 698        unsigned long flags;
 699
 700        spin_lock_irqsave(&adapter->hw_lock, flags);
 701
 702        ctx = pvscsi_acquire_context(adapter, cmd);
 703        if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
 704                if (ctx)
 705                        pvscsi_release_context(adapter, ctx);
 706                spin_unlock_irqrestore(&adapter->hw_lock, flags);
 707                return SCSI_MLQUEUE_HOST_BUSY;
 708        }
 709
 710        cmd->scsi_done = done;
 711
 712        dev_dbg(&cmd->device->sdev_gendev,
 713                "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
 714
 715        spin_unlock_irqrestore(&adapter->hw_lock, flags);
 716
 717        pvscsi_kick_io(adapter, cmd->cmnd[0]);
 718
 719        return 0;
 720}
 721
 722static DEF_SCSI_QCMD(pvscsi_queue)
 723
 724static int pvscsi_abort(struct scsi_cmnd *cmd)
 725{
 726        struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
 727        struct pvscsi_ctx *ctx;
 728        unsigned long flags;
 729
 730        scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
 731                    adapter->host->host_no, cmd);
 732
 733        spin_lock_irqsave(&adapter->hw_lock, flags);
 734
 735        /*
 736         * Poll the completion ring first - we might be trying to abort
 737         * a command that is waiting to be dispatched in the completion ring.
 738         */
 739        pvscsi_process_completion_ring(adapter);
 740
 741        /*
 742         * If there is no context for the command, it either already succeeded
 743         * or else was never properly issued.  Not our problem.
 744         */
 745        ctx = pvscsi_find_context(adapter, cmd);
 746        if (!ctx) {
 747                scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
 748                goto out;
 749        }
 750
 751        pvscsi_abort_cmd(adapter, ctx);
 752
 753        pvscsi_process_completion_ring(adapter);
 754
 755out:
 756        spin_unlock_irqrestore(&adapter->hw_lock, flags);
 757        return SUCCESS;
 758}
 759
 760/*
 761 * Abort all outstanding requests.  This is only safe to use if the completion
 762 * ring will never be walked again or the device has been reset, because it
 763 * destroys the 1-1 mapping between context field passed to emulation and our
 764 * request structure.
 765 */
 766static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
 767{
 768        unsigned i;
 769
 770        for (i = 0; i < adapter->req_depth; i++) {
 771                struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
 772                struct scsi_cmnd *cmd = ctx->cmd;
 773                if (cmd) {
 774                        scmd_printk(KERN_ERR, cmd,
 775                                    "Forced reset on cmd %p\n", cmd);
 776                        pvscsi_unmap_buffers(adapter, ctx);
 777                        pvscsi_release_context(adapter, ctx);
 778                        cmd->result = (DID_RESET << 16);
 779                        cmd->scsi_done(cmd);
 780                }
 781        }
 782}
 783
 784static int pvscsi_host_reset(struct scsi_cmnd *cmd)
 785{
 786        struct Scsi_Host *host = cmd->device->host;
 787        struct pvscsi_adapter *adapter = shost_priv(host);
 788        unsigned long flags;
 789        bool use_msg;
 790
 791        scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
 792
 793        spin_lock_irqsave(&adapter->hw_lock, flags);
 794
 795        use_msg = adapter->use_msg;
 796
 797        if (use_msg) {
 798                adapter->use_msg = 0;
 799                spin_unlock_irqrestore(&adapter->hw_lock, flags);
 800
 801                /*
 802                 * Now that we know that the ISR won't add more work on the
 803                 * workqueue we can safely flush any outstanding work.
 804                 */
 805                flush_workqueue(adapter->workqueue);
 806                spin_lock_irqsave(&adapter->hw_lock, flags);
 807        }
 808
 809        /*
 810         * We're going to tear down the entire ring structure and set it back
 811         * up, so stalling new requests until all completions are flushed and
 812         * the rings are back in place.
 813         */
 814
 815        pvscsi_process_request_ring(adapter);
 816
 817        ll_adapter_reset(adapter);
 818
 819        /*
 820         * Now process any completions.  Note we do this AFTER adapter reset,
 821         * which is strange, but stops races where completions get posted
 822         * between processing the ring and issuing the reset.  The backend will
 823         * not touch the ring memory after reset, so the immediately pre-reset
 824         * completion ring state is still valid.
 825         */
 826        pvscsi_process_completion_ring(adapter);
 827
 828        pvscsi_reset_all(adapter);
 829        adapter->use_msg = use_msg;
 830        pvscsi_setup_all_rings(adapter);
 831        pvscsi_unmask_intr(adapter);
 832
 833        spin_unlock_irqrestore(&adapter->hw_lock, flags);
 834
 835        return SUCCESS;
 836}
 837
 838static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
 839{
 840        struct Scsi_Host *host = cmd->device->host;
 841        struct pvscsi_adapter *adapter = shost_priv(host);
 842        unsigned long flags;
 843
 844        scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
 845
 846        /*
 847         * We don't want to queue new requests for this bus after
 848         * flushing all pending requests to emulation, since new
 849         * requests could then sneak in during this bus reset phase,
 850         * so take the lock now.
 851         */
 852        spin_lock_irqsave(&adapter->hw_lock, flags);
 853
 854        pvscsi_process_request_ring(adapter);
 855        ll_bus_reset(adapter);
 856        pvscsi_process_completion_ring(adapter);
 857
 858        spin_unlock_irqrestore(&adapter->hw_lock, flags);
 859
 860        return SUCCESS;
 861}
 862
 863static int pvscsi_device_reset(struct scsi_cmnd *cmd)
 864{
 865        struct Scsi_Host *host = cmd->device->host;
 866        struct pvscsi_adapter *adapter = shost_priv(host);
 867        unsigned long flags;
 868
 869        scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
 870                    host->host_no, cmd->device->id);
 871
 872        /*
 873         * We don't want to queue new requests for this device after flushing
 874         * all pending requests to emulation, since new requests could then
 875         * sneak in during this device reset phase, so take the lock now.
 876         */
 877        spin_lock_irqsave(&adapter->hw_lock, flags);
 878
 879        pvscsi_process_request_ring(adapter);
 880        ll_device_reset(adapter, cmd->device->id);
 881        pvscsi_process_completion_ring(adapter);
 882
 883        spin_unlock_irqrestore(&adapter->hw_lock, flags);
 884
 885        return SUCCESS;
 886}
 887
 888static struct scsi_host_template pvscsi_template;
 889
 890static const char *pvscsi_info(struct Scsi_Host *host)
 891{
 892        struct pvscsi_adapter *adapter = shost_priv(host);
 893        static char buf[256];
 894
 895        sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
 896                "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
 897                adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
 898                pvscsi_template.cmd_per_lun);
 899
 900        return buf;
 901}
 902
 903static struct scsi_host_template pvscsi_template = {
 904        .module                         = THIS_MODULE,
 905        .name                           = "VMware PVSCSI Host Adapter",
 906        .proc_name                      = "vmw_pvscsi",
 907        .info                           = pvscsi_info,
 908        .queuecommand                   = pvscsi_queue,
 909        .this_id                        = -1,
 910        .sg_tablesize                   = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
 911        .dma_boundary                   = UINT_MAX,
 912        .max_sectors                    = 0xffff,
 913        .use_clustering                 = ENABLE_CLUSTERING,
 914        .eh_abort_handler               = pvscsi_abort,
 915        .eh_device_reset_handler        = pvscsi_device_reset,
 916        .eh_bus_reset_handler           = pvscsi_bus_reset,
 917        .eh_host_reset_handler          = pvscsi_host_reset,
 918};
 919
 920static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
 921                               const struct PVSCSIRingMsgDesc *e)
 922{
 923        struct PVSCSIRingsState *s = adapter->rings_state;
 924        struct Scsi_Host *host = adapter->host;
 925        struct scsi_device *sdev;
 926
 927        printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
 928               e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
 929
 930        BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
 931
 932        if (e->type == PVSCSI_MSG_DEV_ADDED) {
 933                struct PVSCSIMsgDescDevStatusChanged *desc;
 934                desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
 935
 936                printk(KERN_INFO
 937                       "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
 938                       desc->bus, desc->target, desc->lun[1]);
 939
 940                if (!scsi_host_get(host))
 941                        return;
 942
 943                sdev = scsi_device_lookup(host, desc->bus, desc->target,
 944                                          desc->lun[1]);
 945                if (sdev) {
 946                        printk(KERN_INFO "vmw_pvscsi: device already exists\n");
 947                        scsi_device_put(sdev);
 948                } else
 949                        scsi_add_device(adapter->host, desc->bus,
 950                                        desc->target, desc->lun[1]);
 951
 952                scsi_host_put(host);
 953        } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
 954                struct PVSCSIMsgDescDevStatusChanged *desc;
 955                desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
 956
 957                printk(KERN_INFO
 958                       "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
 959                       desc->bus, desc->target, desc->lun[1]);
 960
 961                if (!scsi_host_get(host))
 962                        return;
 963
 964                sdev = scsi_device_lookup(host, desc->bus, desc->target,
 965                                          desc->lun[1]);
 966                if (sdev) {
 967                        scsi_remove_device(sdev);
 968                        scsi_device_put(sdev);
 969                } else
 970                        printk(KERN_INFO
 971                               "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
 972                               desc->bus, desc->target, desc->lun[1]);
 973
 974                scsi_host_put(host);
 975        }
 976}
 977
 978static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
 979{
 980        struct PVSCSIRingsState *s = adapter->rings_state;
 981
 982        return s->msgProdIdx != s->msgConsIdx;
 983}
 984
 985static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
 986{
 987        struct PVSCSIRingsState *s = adapter->rings_state;
 988        struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
 989        u32 msg_entries = s->msgNumEntriesLog2;
 990
 991        while (pvscsi_msg_pending(adapter)) {
 992                struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
 993                                                      MASK(msg_entries));
 994
 995                barrier();
 996                pvscsi_process_msg(adapter, e);
 997                barrier();
 998                s->msgConsIdx++;
 999        }
1000}
1001
1002static void pvscsi_msg_workqueue_handler(struct work_struct *data)
1003{
1004        struct pvscsi_adapter *adapter;
1005
1006        adapter = container_of(data, struct pvscsi_adapter, work);
1007
1008        pvscsi_process_msg_ring(adapter);
1009}
1010
1011static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1012{
1013        char name[32];
1014
1015        if (!pvscsi_use_msg)
1016                return 0;
1017
1018        pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1019                         PVSCSI_CMD_SETUP_MSG_RING);
1020
1021        if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
1022                return 0;
1023
1024        snprintf(name, sizeof(name),
1025                 "vmw_pvscsi_wq_%u", adapter->host->host_no);
1026
1027        adapter->workqueue = create_singlethread_workqueue(name);
1028        if (!adapter->workqueue) {
1029                printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
1030                return 0;
1031        }
1032        INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
1033
1034        return 1;
1035}
1036
1037static irqreturn_t pvscsi_isr(int irq, void *devp)
1038{
1039        struct pvscsi_adapter *adapter = devp;
1040        int handled;
1041
1042        if (adapter->use_msi || adapter->use_msix)
1043                handled = true;
1044        else {
1045                u32 val = pvscsi_read_intr_status(adapter);
1046                handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1047                if (handled)
1048                        pvscsi_write_intr_status(devp, val);
1049        }
1050
1051        if (handled) {
1052                unsigned long flags;
1053
1054                spin_lock_irqsave(&adapter->hw_lock, flags);
1055
1056                pvscsi_process_completion_ring(adapter);
1057                if (adapter->use_msg && pvscsi_msg_pending(adapter))
1058                        queue_work(adapter->workqueue, &adapter->work);
1059
1060                spin_unlock_irqrestore(&adapter->hw_lock, flags);
1061        }
1062
1063        return IRQ_RETVAL(handled);
1064}
1065
1066static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1067{
1068        struct pvscsi_ctx *ctx = adapter->cmd_map;
1069        unsigned i;
1070
1071        for (i = 0; i < adapter->req_depth; ++i, ++ctx)
1072                free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1073}
1074
1075static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1076                             unsigned int *irq)
1077{
1078        struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1079        int ret;
1080
1081        ret = pci_enable_msix(adapter->dev, &entry, 1);
1082        if (ret)
1083                return ret;
1084
1085        *irq = entry.vector;
1086
1087        return 0;
1088}
1089
1090static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1091{
1092        if (adapter->irq) {
1093                free_irq(adapter->irq, adapter);
1094                adapter->irq = 0;
1095        }
1096        if (adapter->use_msi) {
1097                pci_disable_msi(adapter->dev);
1098                adapter->use_msi = 0;
1099        } else if (adapter->use_msix) {
1100                pci_disable_msix(adapter->dev);
1101                adapter->use_msix = 0;
1102        }
1103}
1104
1105static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1106{
1107        pvscsi_shutdown_intr(adapter);
1108
1109        if (adapter->workqueue)
1110                destroy_workqueue(adapter->workqueue);
1111
1112        if (adapter->mmioBase)
1113                pci_iounmap(adapter->dev, adapter->mmioBase);
1114
1115        pci_release_regions(adapter->dev);
1116
1117        if (adapter->cmd_map) {
1118                pvscsi_free_sgls(adapter);
1119                kfree(adapter->cmd_map);
1120        }
1121
1122        if (adapter->rings_state)
1123                pci_free_consistent(adapter->dev, PAGE_SIZE,
1124                                    adapter->rings_state, adapter->ringStatePA);
1125
1126        if (adapter->req_ring)
1127                pci_free_consistent(adapter->dev,
1128                                    adapter->req_pages * PAGE_SIZE,
1129                                    adapter->req_ring, adapter->reqRingPA);
1130
1131        if (adapter->cmp_ring)
1132                pci_free_consistent(adapter->dev,
1133                                    adapter->cmp_pages * PAGE_SIZE,
1134                                    adapter->cmp_ring, adapter->cmpRingPA);
1135
1136        if (adapter->msg_ring)
1137                pci_free_consistent(adapter->dev,
1138                                    adapter->msg_pages * PAGE_SIZE,
1139                                    adapter->msg_ring, adapter->msgRingPA);
1140}
1141
1142/*
1143 * Allocate scatter gather lists.
1144 *
1145 * These are statically allocated.  Trying to be clever was not worth it.
1146 *
1147 * Dynamic allocation can fail, and we can't go deep into the memory
1148 * allocator, since we're a SCSI driver, and trying too hard to allocate
1149 * memory might generate disk I/O.  We also don't want to fail disk I/O
1150 * in that case because we can't get an allocation - the I/O could be
1151 * trying to swap out data to free memory.  Since that is pathological,
1152 * just use a statically allocated scatter list.
1153 *
1154 */
1155static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1156{
1157        struct pvscsi_ctx *ctx;
1158        int i;
1159
1160        ctx = adapter->cmd_map;
1161        BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
1162
1163        for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
1164                ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
1165                                                    get_order(SGL_SIZE));
1166                ctx->sglPA = 0;
1167                BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
1168                if (!ctx->sgl) {
1169                        for (; i >= 0; --i, --ctx) {
1170                                free_pages((unsigned long)ctx->sgl,
1171                                           get_order(SGL_SIZE));
1172                                ctx->sgl = NULL;
1173                        }
1174                        return -ENOMEM;
1175                }
1176        }
1177
1178        return 0;
1179}
1180
1181/*
1182 * Query the device, fetch the config info and return the
1183 * maximum number of targets on the adapter. In case of
1184 * failure due to any reason return default i.e. 16.
1185 */
1186static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1187{
1188        struct PVSCSICmdDescConfigCmd cmd;
1189        struct PVSCSIConfigPageHeader *header;
1190        struct device *dev;
1191        dma_addr_t configPagePA;
1192        void *config_page;
1193        u32 numPhys = 16;
1194
1195        dev = pvscsi_dev(adapter);
1196        config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
1197                                           &configPagePA);
1198        if (!config_page) {
1199                dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
1200                goto exit;
1201        }
1202        BUG_ON(configPagePA & ~PAGE_MASK);
1203
1204        /* Fetch config info from the device. */
1205        cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
1206        cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
1207        cmd.cmpAddr = configPagePA;
1208        cmd._pad = 0;
1209
1210        /*
1211         * Mark the completion page header with error values. If the device
1212         * completes the command successfully, it sets the status values to
1213         * indicate success.
1214         */
1215        header = config_page;
1216        memset(header, 0, sizeof *header);
1217        header->hostStatus = BTSTAT_INVPARAM;
1218        header->scsiStatus = SDSTAT_CHECK;
1219
1220        pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
1221
1222        if (header->hostStatus == BTSTAT_SUCCESS &&
1223            header->scsiStatus == SDSTAT_GOOD) {
1224                struct PVSCSIConfigPageController *config;
1225
1226                config = config_page;
1227                numPhys = config->numPhys;
1228        } else
1229                dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
1230                         header->hostStatus, header->scsiStatus);
1231        pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
1232exit:
1233        return numPhys;
1234}
1235
1236static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1237{
1238        struct pvscsi_adapter *adapter;
1239        struct Scsi_Host *host;
1240        struct device *dev;
1241        unsigned int i;
1242        unsigned long flags = 0;
1243        int error;
1244
1245        error = -ENODEV;
1246
1247        if (pci_enable_device(pdev))
1248                return error;
1249
1250        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1251            pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1252                printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1253        } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1254                   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1255                printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1256        } else {
1257                printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
1258                goto out_disable_device;
1259        }
1260
1261        pvscsi_template.can_queue =
1262                min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1263                PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1264        pvscsi_template.cmd_per_lun =
1265                min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1266        host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1267        if (!host) {
1268                printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1269                goto out_disable_device;
1270        }
1271
1272        adapter = shost_priv(host);
1273        memset(adapter, 0, sizeof(*adapter));
1274        adapter->dev  = pdev;
1275        adapter->host = host;
1276
1277        spin_lock_init(&adapter->hw_lock);
1278
1279        host->max_channel = 0;
1280        host->max_id      = 16;
1281        host->max_lun     = 1;
1282        host->max_cmd_len = 16;
1283
1284        adapter->rev = pdev->revision;
1285
1286        if (pci_request_regions(pdev, "vmw_pvscsi")) {
1287                printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1288                goto out_free_host;
1289        }
1290
1291        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1292                if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
1293                        continue;
1294
1295                if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
1296                        continue;
1297
1298                break;
1299        }
1300
1301        if (i == DEVICE_COUNT_RESOURCE) {
1302                printk(KERN_ERR
1303                       "vmw_pvscsi: adapter has no suitable MMIO region\n");
1304                goto out_release_resources;
1305        }
1306
1307        adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
1308
1309        if (!adapter->mmioBase) {
1310                printk(KERN_ERR
1311                       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1312                       i, PVSCSI_MEM_SPACE_SIZE);
1313                goto out_release_resources;
1314        }
1315
1316        pci_set_master(pdev);
1317        pci_set_drvdata(pdev, host);
1318
1319        ll_adapter_reset(adapter);
1320
1321        adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
1322
1323        error = pvscsi_allocate_rings(adapter);
1324        if (error) {
1325                printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
1326                goto out_release_resources;
1327        }
1328
1329        /*
1330         * Ask the device for max number of targets.
1331         */
1332        host->max_id = pvscsi_get_max_targets(adapter);
1333        dev = pvscsi_dev(adapter);
1334        dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
1335
1336        /*
1337         * From this point on we should reset the adapter if anything goes
1338         * wrong.
1339         */
1340        pvscsi_setup_all_rings(adapter);
1341
1342        adapter->cmd_map = kcalloc(adapter->req_depth,
1343                                   sizeof(struct pvscsi_ctx), GFP_KERNEL);
1344        if (!adapter->cmd_map) {
1345                printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
1346                error = -ENOMEM;
1347                goto out_reset_adapter;
1348        }
1349
1350        INIT_LIST_HEAD(&adapter->cmd_pool);
1351        for (i = 0; i < adapter->req_depth; i++) {
1352                struct pvscsi_ctx *ctx = adapter->cmd_map + i;
1353                list_add(&ctx->list, &adapter->cmd_pool);
1354        }
1355
1356        error = pvscsi_allocate_sg(adapter);
1357        if (error) {
1358                printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
1359                goto out_reset_adapter;
1360        }
1361
1362        if (!pvscsi_disable_msix &&
1363            pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
1364                printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
1365                adapter->use_msix = 1;
1366        } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
1367                printk(KERN_INFO "vmw_pvscsi: using MSI\n");
1368                adapter->use_msi = 1;
1369                adapter->irq = pdev->irq;
1370        } else {
1371                printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1372                adapter->irq = pdev->irq;
1373                flags = IRQF_SHARED;
1374        }
1375
1376        error = request_irq(adapter->irq, pvscsi_isr, flags,
1377                            "vmw_pvscsi", adapter);
1378        if (error) {
1379                printk(KERN_ERR
1380                       "vmw_pvscsi: unable to request IRQ: %d\n", error);
1381                adapter->irq = 0;
1382                goto out_reset_adapter;
1383        }
1384
1385        error = scsi_add_host(host, &pdev->dev);
1386        if (error) {
1387                printk(KERN_ERR
1388                       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
1389                goto out_reset_adapter;
1390        }
1391
1392        dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
1393                 adapter->rev, host->host_no);
1394
1395        pvscsi_unmask_intr(adapter);
1396
1397        scsi_scan_host(host);
1398
1399        return 0;
1400
1401out_reset_adapter:
1402        ll_adapter_reset(adapter);
1403out_release_resources:
1404        pvscsi_release_resources(adapter);
1405out_free_host:
1406        scsi_host_put(host);
1407out_disable_device:
1408        pci_set_drvdata(pdev, NULL);
1409        pci_disable_device(pdev);
1410
1411        return error;
1412}
1413
1414static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
1415{
1416        pvscsi_mask_intr(adapter);
1417
1418        if (adapter->workqueue)
1419                flush_workqueue(adapter->workqueue);
1420
1421        pvscsi_shutdown_intr(adapter);
1422
1423        pvscsi_process_request_ring(adapter);
1424        pvscsi_process_completion_ring(adapter);
1425        ll_adapter_reset(adapter);
1426}
1427
1428static void pvscsi_shutdown(struct pci_dev *dev)
1429{
1430        struct Scsi_Host *host = pci_get_drvdata(dev);
1431        struct pvscsi_adapter *adapter = shost_priv(host);
1432
1433        __pvscsi_shutdown(adapter);
1434}
1435
1436static void pvscsi_remove(struct pci_dev *pdev)
1437{
1438        struct Scsi_Host *host = pci_get_drvdata(pdev);
1439        struct pvscsi_adapter *adapter = shost_priv(host);
1440
1441        scsi_remove_host(host);
1442
1443        __pvscsi_shutdown(adapter);
1444        pvscsi_release_resources(adapter);
1445
1446        scsi_host_put(host);
1447
1448        pci_set_drvdata(pdev, NULL);
1449        pci_disable_device(pdev);
1450}
1451
1452static struct pci_driver pvscsi_pci_driver = {
1453        .name           = "vmw_pvscsi",
1454        .id_table       = pvscsi_pci_tbl,
1455        .probe          = pvscsi_probe,
1456        .remove         = pvscsi_remove,
1457        .shutdown       = pvscsi_shutdown,
1458};
1459
1460static int __init pvscsi_init(void)
1461{
1462        pr_info("%s - version %s\n",
1463                PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
1464        return pci_register_driver(&pvscsi_pci_driver);
1465}
1466
1467static void __exit pvscsi_exit(void)
1468{
1469        pci_unregister_driver(&pvscsi_pci_driver);
1470}
1471
1472module_init(pvscsi_init);
1473module_exit(pvscsi_exit);
1474