qemu/hw/scsi/vmw_pvscsi.c
<<
>>
Prefs
   1/*
   2 * QEMU VMWARE PVSCSI paravirtual SCSI bus
   3 *
   4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
   5 *
   6 * Developed by Daynix Computing LTD (http://www.daynix.com)
   7 *
   8 * Based on implementation by Paolo Bonzini
   9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
  10 *
  11 * Authors:
  12 * Paolo Bonzini <pbonzini@redhat.com>
  13 * Dmitry Fleytman <dmitry@daynix.com>
  14 * Yan Vugenfirer <yan@daynix.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.
  17 * See the COPYING file in the top-level directory.
  18 *
  19 * NOTE about MSI-X:
  20 * MSI-X support has been removed for the moment because it leads Windows OS
  21 * to crash on startup. The crash happens because Windows driver requires
  22 * MSI-X shared memory to be part of the same BAR used for rings state
  23 * registers, etc. This is not supported by QEMU infrastructure so separate
  24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
  25 *
  26 */
  27
  28#include "qemu/osdep.h"
  29#include "qapi/error.h"
  30#include "qemu/main-loop.h"
  31#include "qemu/module.h"
  32#include "hw/scsi/scsi.h"
  33#include "migration/vmstate.h"
  34#include "scsi/constants.h"
  35#include "hw/pci/msi.h"
  36#include "hw/qdev-properties.h"
  37#include "vmw_pvscsi.h"
  38#include "trace.h"
  39#include "qom/object.h"
  40
  41
  42#define PVSCSI_USE_64BIT         (true)
  43#define PVSCSI_PER_VECTOR_MASK   (false)
  44
  45#define PVSCSI_MAX_DEVS                   (64)
  46#define PVSCSI_MSIX_NUM_VECTORS           (1)
  47
  48#define PVSCSI_MAX_SG_ELEM                2048
  49
  50#define PVSCSI_MAX_CMD_DATA_WORDS \
  51    (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
  52
  53#define RS_GET_FIELD(m, field) \
  54    (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
  55                 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
  56#define RS_SET_FIELD(m, field, val) \
  57    (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
  58                 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val))
  59
  60struct PVSCSIClass {
  61    PCIDeviceClass parent_class;
  62    DeviceRealize parent_dc_realize;
  63};
  64
  65#define TYPE_PVSCSI "pvscsi"
  66OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI)
  67
  68
  69/* Compatibility flags for migration */
  70#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
  71#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
  72    (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
  73#define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1
  74#define PVSCSI_COMPAT_DISABLE_PCIE \
  75    (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT)
  76
  77#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \
  78    ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION)
  79#define PVSCSI_MSI_OFFSET(s) \
  80    (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c)
  81#define PVSCSI_EXP_EP_OFFSET (0x40)
  82
  83typedef struct PVSCSIRingInfo {
  84    uint64_t            rs_pa;
  85    uint32_t            txr_len_mask;
  86    uint32_t            rxr_len_mask;
  87    uint32_t            msg_len_mask;
  88    uint64_t            req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  89    uint64_t            cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  90    uint64_t            msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
  91    uint64_t            consumed_ptr;
  92    uint64_t            filled_cmp_ptr;
  93    uint64_t            filled_msg_ptr;
  94} PVSCSIRingInfo;
  95
  96typedef struct PVSCSISGState {
  97    hwaddr elemAddr;
  98    hwaddr dataAddr;
  99    uint32_t resid;
 100} PVSCSISGState;
 101
 102typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
 103
 104struct PVSCSIState {
 105    PCIDevice parent_obj;
 106    MemoryRegion io_space;
 107    SCSIBus bus;
 108    QEMUBH *completion_worker;
 109    PVSCSIRequestList pending_queue;
 110    PVSCSIRequestList completion_queue;
 111
 112    uint64_t reg_interrupt_status;        /* Interrupt status register value */
 113    uint64_t reg_interrupt_enabled;       /* Interrupt mask register value   */
 114    uint64_t reg_command_status;          /* Command status register value   */
 115
 116    /* Command data adoption mechanism */
 117    uint64_t curr_cmd;                   /* Last command arrived             */
 118    uint32_t curr_cmd_data_cntr;         /* Amount of data for last command  */
 119
 120    /* Collector for current command data */
 121    uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
 122
 123    uint8_t rings_info_valid;            /* Whether data rings initialized   */
 124    uint8_t msg_ring_info_valid;         /* Whether message ring initialized */
 125    uint8_t use_msg;                     /* Whether to use message ring      */
 126
 127    uint8_t msi_used;                    /* For migration compatibility      */
 128    PVSCSIRingInfo rings;                /* Data transfer rings manager      */
 129    uint32_t resetting;                  /* Reset in progress                */
 130
 131    uint32_t compat_flags;
 132};
 133
 134typedef struct PVSCSIRequest {
 135    SCSIRequest *sreq;
 136    PVSCSIState *dev;
 137    uint8_t sense_key;
 138    uint8_t completed;
 139    int lun;
 140    QEMUSGList sgl;
 141    PVSCSISGState sg;
 142    struct PVSCSIRingReqDesc req;
 143    struct PVSCSIRingCmpDesc cmp;
 144    QTAILQ_ENTRY(PVSCSIRequest) next;
 145} PVSCSIRequest;
 146
 147/* Integer binary logarithm */
 148static int
 149pvscsi_log2(uint32_t input)
 150{
 151    int log = 0;
 152    assert(input > 0);
 153    while (input >> ++log) {
 154    }
 155    return log;
 156}
 157
 158static void
 159pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
 160{
 161    int i;
 162    uint32_t txr_len_log2, rxr_len_log2;
 163    uint32_t req_ring_size, cmp_ring_size;
 164    m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
 165
 166    req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 167    cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 168    txr_len_log2 = pvscsi_log2(req_ring_size - 1);
 169    rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
 170
 171    m->txr_len_mask = MASK(txr_len_log2);
 172    m->rxr_len_mask = MASK(rxr_len_log2);
 173
 174    m->consumed_ptr = 0;
 175    m->filled_cmp_ptr = 0;
 176
 177    for (i = 0; i < ri->reqRingNumPages; i++) {
 178        m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
 179    }
 180
 181    for (i = 0; i < ri->cmpRingNumPages; i++) {
 182        m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
 183    }
 184
 185    RS_SET_FIELD(m, reqProdIdx, 0);
 186    RS_SET_FIELD(m, reqConsIdx, 0);
 187    RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2);
 188
 189    RS_SET_FIELD(m, cmpProdIdx, 0);
 190    RS_SET_FIELD(m, cmpConsIdx, 0);
 191    RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2);
 192
 193    trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
 194
 195    /* Flush ring state page changes */
 196    smp_wmb();
 197}
 198
 199static int
 200pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
 201{
 202    int i;
 203    uint32_t len_log2;
 204    uint32_t ring_size;
 205
 206    if (!ri->numPages || ri->numPages > PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES) {
 207        return -1;
 208    }
 209    ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 210    len_log2 = pvscsi_log2(ring_size - 1);
 211
 212    m->msg_len_mask = MASK(len_log2);
 213
 214    m->filled_msg_ptr = 0;
 215
 216    for (i = 0; i < ri->numPages; i++) {
 217        m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
 218    }
 219
 220    RS_SET_FIELD(m, msgProdIdx, 0);
 221    RS_SET_FIELD(m, msgConsIdx, 0);
 222    RS_SET_FIELD(m, msgNumEntriesLog2, len_log2);
 223
 224    trace_pvscsi_ring_init_msg(len_log2);
 225
 226    /* Flush ring state page changes */
 227    smp_wmb();
 228
 229    return 0;
 230}
 231
 232static void
 233pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
 234{
 235    mgr->rs_pa = 0;
 236    mgr->txr_len_mask = 0;
 237    mgr->rxr_len_mask = 0;
 238    mgr->msg_len_mask = 0;
 239    mgr->consumed_ptr = 0;
 240    mgr->filled_cmp_ptr = 0;
 241    mgr->filled_msg_ptr = 0;
 242    memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
 243    memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
 244    memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
 245}
 246
 247static hwaddr
 248pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
 249{
 250    uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
 251    uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING
 252                            * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 253
 254    if (ready_ptr != mgr->consumed_ptr
 255        && ready_ptr - mgr->consumed_ptr < ring_size) {
 256        uint32_t next_ready_ptr =
 257            mgr->consumed_ptr++ & mgr->txr_len_mask;
 258        uint32_t next_ready_page =
 259            next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 260        uint32_t inpage_idx =
 261            next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 262
 263        return mgr->req_ring_pages_pa[next_ready_page] +
 264               inpage_idx * sizeof(PVSCSIRingReqDesc);
 265    } else {
 266        return 0;
 267    }
 268}
 269
 270static void
 271pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
 272{
 273    RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr);
 274}
 275
 276static hwaddr
 277pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
 278{
 279    /*
 280     * According to Linux driver code it explicitly verifies that number
 281     * of requests being processed by device is less then the size of
 282     * completion queue, so device may omit completion queue overflow
 283     * conditions check. We assume that this is true for other (Windows)
 284     * drivers as well.
 285     */
 286
 287    uint32_t free_cmp_ptr =
 288        mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
 289    uint32_t free_cmp_page =
 290        free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 291    uint32_t inpage_idx =
 292        free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 293    return mgr->cmp_ring_pages_pa[free_cmp_page] +
 294           inpage_idx * sizeof(PVSCSIRingCmpDesc);
 295}
 296
 297static hwaddr
 298pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
 299{
 300    uint32_t free_msg_ptr =
 301        mgr->filled_msg_ptr++ & mgr->msg_len_mask;
 302    uint32_t free_msg_page =
 303        free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 304    uint32_t inpage_idx =
 305        free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 306    return mgr->msg_ring_pages_pa[free_msg_page] +
 307           inpage_idx * sizeof(PVSCSIRingMsgDesc);
 308}
 309
 310static void
 311pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
 312{
 313    /* Flush descriptor changes */
 314    smp_wmb();
 315
 316    trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
 317
 318    RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr);
 319}
 320
 321static bool
 322pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
 323{
 324    uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx);
 325    uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx);
 326
 327    return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
 328}
 329
 330static void
 331pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
 332{
 333    /* Flush descriptor changes */
 334    smp_wmb();
 335
 336    trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
 337
 338    RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr);
 339}
 340
 341static void
 342pvscsi_reset_state(PVSCSIState *s)
 343{
 344    s->curr_cmd = PVSCSI_CMD_FIRST;
 345    s->curr_cmd_data_cntr = 0;
 346    s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 347    s->reg_interrupt_status = 0;
 348    pvscsi_ring_cleanup(&s->rings);
 349    s->rings_info_valid = FALSE;
 350    s->msg_ring_info_valid = FALSE;
 351    QTAILQ_INIT(&s->pending_queue);
 352    QTAILQ_INIT(&s->completion_queue);
 353}
 354
 355static void
 356pvscsi_update_irq_status(PVSCSIState *s)
 357{
 358    PCIDevice *d = PCI_DEVICE(s);
 359    bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
 360
 361    trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
 362                                  s->reg_interrupt_status);
 363
 364    if (msi_enabled(d)) {
 365        if (should_raise) {
 366            trace_pvscsi_update_irq_msi();
 367            msi_notify(d, PVSCSI_VECTOR_COMPLETION);
 368        }
 369        return;
 370    }
 371
 372    pci_set_irq(d, !!should_raise);
 373}
 374
 375static void
 376pvscsi_raise_completion_interrupt(PVSCSIState *s)
 377{
 378    s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
 379
 380    /* Memory barrier to flush interrupt status register changes*/
 381    smp_wmb();
 382
 383    pvscsi_update_irq_status(s);
 384}
 385
 386static void
 387pvscsi_raise_message_interrupt(PVSCSIState *s)
 388{
 389    s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
 390
 391    /* Memory barrier to flush interrupt status register changes*/
 392    smp_wmb();
 393
 394    pvscsi_update_irq_status(s);
 395}
 396
 397static void
 398pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
 399{
 400    hwaddr cmp_descr_pa;
 401
 402    cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
 403    trace_pvscsi_cmp_ring_put(cmp_descr_pa);
 404    cpu_physical_memory_write(cmp_descr_pa, cmp_desc, sizeof(*cmp_desc));
 405}
 406
 407static void
 408pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
 409{
 410    hwaddr msg_descr_pa;
 411
 412    msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
 413    trace_pvscsi_msg_ring_put(msg_descr_pa);
 414    cpu_physical_memory_write(msg_descr_pa, msg_desc, sizeof(*msg_desc));
 415}
 416
 417static void
 418pvscsi_process_completion_queue(void *opaque)
 419{
 420    PVSCSIState *s = opaque;
 421    PVSCSIRequest *pvscsi_req;
 422    bool has_completed = false;
 423
 424    while (!QTAILQ_EMPTY(&s->completion_queue)) {
 425        pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
 426        QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
 427        pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
 428        g_free(pvscsi_req);
 429        has_completed = true;
 430    }
 431
 432    if (has_completed) {
 433        pvscsi_ring_flush_cmp(&s->rings);
 434        pvscsi_raise_completion_interrupt(s);
 435    }
 436}
 437
 438static void
 439pvscsi_reset_adapter(PVSCSIState *s)
 440{
 441    s->resetting++;
 442    qbus_reset_all(BUS(&s->bus));
 443    s->resetting--;
 444    pvscsi_process_completion_queue(s);
 445    assert(QTAILQ_EMPTY(&s->pending_queue));
 446    pvscsi_reset_state(s);
 447}
 448
 449static void
 450pvscsi_schedule_completion_processing(PVSCSIState *s)
 451{
 452    /* Try putting more complete requests on the ring. */
 453    if (!QTAILQ_EMPTY(&s->completion_queue)) {
 454        qemu_bh_schedule(s->completion_worker);
 455    }
 456}
 457
 458static void
 459pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
 460{
 461    assert(!r->completed);
 462
 463    trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
 464                                  r->sense_key);
 465    if (r->sreq != NULL) {
 466        scsi_req_unref(r->sreq);
 467        r->sreq = NULL;
 468    }
 469    r->completed = 1;
 470    QTAILQ_REMOVE(&s->pending_queue, r, next);
 471    QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
 472    pvscsi_schedule_completion_processing(s);
 473}
 474
 475static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
 476{
 477    PVSCSIRequest *req = r->hba_private;
 478
 479    trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
 480
 481    return &req->sgl;
 482}
 483
 484static void
 485pvscsi_get_next_sg_elem(PVSCSISGState *sg)
 486{
 487    struct PVSCSISGElement elem;
 488
 489    cpu_physical_memory_read(sg->elemAddr, &elem, sizeof(elem));
 490    if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
 491        /*
 492            * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
 493            * header file but its value is unknown. This flag requires
 494            * additional processing, so we put warning here to catch it
 495            * some day and make proper implementation
 496            */
 497        trace_pvscsi_get_next_sg_elem(elem.flags);
 498    }
 499
 500    sg->elemAddr += sizeof(elem);
 501    sg->dataAddr = elem.addr;
 502    sg->resid = elem.length;
 503}
 504
 505static void
 506pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
 507{
 508    r->cmp.senseLen = MIN(r->req.senseLen, len);
 509    r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
 510    cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
 511}
 512
 513static void
 514pvscsi_command_failed(SCSIRequest *req)
 515{
 516    PVSCSIRequest *pvscsi_req = req->hba_private;
 517    PVSCSIState *s;
 518
 519    if (!pvscsi_req) {
 520        trace_pvscsi_command_complete_not_found(req->tag);
 521        return;
 522    }
 523    s = pvscsi_req->dev;
 524
 525    switch (req->host_status) {
 526    case SCSI_HOST_NO_LUN:
 527        pvscsi_req->cmp.hostStatus = BTSTAT_LUNMISMATCH;
 528        break;
 529    case SCSI_HOST_BUSY:
 530        pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 531        break;
 532    case SCSI_HOST_TIME_OUT:
 533    case SCSI_HOST_ABORTED:
 534        pvscsi_req->cmp.hostStatus = BTSTAT_SENTRST;
 535        break;
 536    case SCSI_HOST_BAD_RESPONSE:
 537        pvscsi_req->cmp.hostStatus = BTSTAT_SELTIMEO;
 538        break;
 539    case SCSI_HOST_RESET:
 540        pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
 541        break;
 542    default:
 543        pvscsi_req->cmp.hostStatus = BTSTAT_HASOFTWARE;
 544        break;
 545    }
 546    pvscsi_req->cmp.scsiStatus = GOOD;
 547    qemu_sglist_destroy(&pvscsi_req->sgl);
 548    pvscsi_complete_request(s, pvscsi_req);
 549}
 550
 551static void
 552pvscsi_command_complete(SCSIRequest *req, size_t resid)
 553{
 554    PVSCSIRequest *pvscsi_req = req->hba_private;
 555    PVSCSIState *s;
 556
 557    if (!pvscsi_req) {
 558        trace_pvscsi_command_complete_not_found(req->tag);
 559        return;
 560    }
 561    s = pvscsi_req->dev;
 562
 563    if (resid) {
 564        /* Short transfer.  */
 565        trace_pvscsi_command_complete_data_run();
 566        pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
 567    }
 568
 569    pvscsi_req->cmp.scsiStatus = req->status;
 570    if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
 571        uint8_t sense[SCSI_SENSE_BUF_SIZE];
 572        int sense_len =
 573            scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
 574
 575        trace_pvscsi_command_complete_sense_len(sense_len);
 576        pvscsi_write_sense(pvscsi_req, sense, sense_len);
 577    }
 578    qemu_sglist_destroy(&pvscsi_req->sgl);
 579    pvscsi_complete_request(s, pvscsi_req);
 580}
 581
 582static void
 583pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
 584{
 585    if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
 586        PVSCSIMsgDescDevStatusChanged msg = {0};
 587
 588        msg.type = msg_type;
 589        msg.bus = dev->channel;
 590        msg.target = dev->id;
 591        msg.lun[1] = dev->lun;
 592
 593        pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
 594        pvscsi_ring_flush_msg(&s->rings);
 595        pvscsi_raise_message_interrupt(s);
 596    }
 597}
 598
 599static void
 600pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
 601{
 602    PVSCSIState *s = PVSCSI(hotplug_dev);
 603
 604    pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED);
 605}
 606
 607static void
 608pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
 609{
 610    PVSCSIState *s = PVSCSI(hotplug_dev);
 611
 612    pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED);
 613    qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
 614}
 615
 616static void
 617pvscsi_request_cancelled(SCSIRequest *req)
 618{
 619    PVSCSIRequest *pvscsi_req = req->hba_private;
 620    PVSCSIState *s = pvscsi_req->dev;
 621
 622    if (pvscsi_req->completed) {
 623        return;
 624    }
 625
 626   if (pvscsi_req->dev->resetting) {
 627       pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
 628    } else {
 629       pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 630    }
 631
 632    pvscsi_complete_request(s, pvscsi_req);
 633}
 634
 635static SCSIDevice*
 636pvscsi_device_find(PVSCSIState *s, int channel, int target,
 637                   uint8_t *requested_lun, uint8_t *target_lun)
 638{
 639    if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
 640        requested_lun[4] || requested_lun[5] || requested_lun[6] ||
 641        requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
 642        return NULL;
 643    } else {
 644        *target_lun = requested_lun[1];
 645        return scsi_device_find(&s->bus, channel, target, *target_lun);
 646    }
 647}
 648
 649static PVSCSIRequest *
 650pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
 651                                struct PVSCSIRingReqDesc *descr)
 652{
 653    PVSCSIRequest *pvscsi_req;
 654    uint8_t lun;
 655
 656    pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
 657    pvscsi_req->dev = s;
 658    pvscsi_req->req = *descr;
 659    pvscsi_req->cmp.context = pvscsi_req->req.context;
 660    QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
 661
 662    *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
 663    if (*d) {
 664        pvscsi_req->lun = lun;
 665    }
 666
 667    return pvscsi_req;
 668}
 669
 670static void
 671pvscsi_convert_sglist(PVSCSIRequest *r)
 672{
 673    uint32_t chunk_size, elmcnt = 0;
 674    uint64_t data_length = r->req.dataLen;
 675    PVSCSISGState sg = r->sg;
 676    while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) {
 677        while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) {
 678            pvscsi_get_next_sg_elem(&sg);
 679            trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
 680                                        r->sg.resid);
 681        }
 682        chunk_size = MIN(data_length, sg.resid);
 683        if (chunk_size) {
 684            qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
 685        }
 686
 687        sg.dataAddr += chunk_size;
 688        data_length -= chunk_size;
 689        sg.resid -= chunk_size;
 690    }
 691}
 692
 693static void
 694pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
 695{
 696    PCIDevice *d = PCI_DEVICE(s);
 697
 698    pci_dma_sglist_init(&r->sgl, d, 1);
 699    if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 700        pvscsi_convert_sglist(r);
 701    } else {
 702        qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
 703    }
 704}
 705
 706static void
 707pvscsi_process_request_descriptor(PVSCSIState *s,
 708                                  struct PVSCSIRingReqDesc *descr)
 709{
 710    SCSIDevice *d;
 711    PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
 712    int64_t n;
 713
 714    trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
 715
 716    if (!d) {
 717        r->cmp.hostStatus = BTSTAT_SELTIMEO;
 718        trace_pvscsi_process_req_descr_unknown_device();
 719        pvscsi_complete_request(s, r);
 720        return;
 721    }
 722
 723    if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 724        r->sg.elemAddr = descr->dataAddr;
 725    }
 726
 727    r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r);
 728    if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
 729        (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
 730        r->cmp.hostStatus = BTSTAT_BADMSG;
 731        trace_pvscsi_process_req_descr_invalid_dir();
 732        scsi_req_cancel(r->sreq);
 733        return;
 734    }
 735    if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
 736        (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
 737        r->cmp.hostStatus = BTSTAT_BADMSG;
 738        trace_pvscsi_process_req_descr_invalid_dir();
 739        scsi_req_cancel(r->sreq);
 740        return;
 741    }
 742
 743    pvscsi_build_sglist(s, r);
 744    n = scsi_req_enqueue(r->sreq);
 745
 746    if (n) {
 747        scsi_req_continue(r->sreq);
 748    }
 749}
 750
 751static void
 752pvscsi_process_io(PVSCSIState *s)
 753{
 754    PVSCSIRingReqDesc descr;
 755    hwaddr next_descr_pa;
 756
 757    if (!s->rings_info_valid) {
 758        return;
 759    }
 760
 761    while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
 762
 763        /* Only read after production index verification */
 764        smp_rmb();
 765
 766        trace_pvscsi_process_io(next_descr_pa);
 767        cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
 768        pvscsi_process_request_descriptor(s, &descr);
 769    }
 770
 771    pvscsi_ring_flush_req(&s->rings);
 772}
 773
 774static void
 775pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
 776{
 777    int i;
 778    trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
 779
 780    trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
 781    for (i = 0; i < rc->reqRingNumPages; i++) {
 782        trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
 783    }
 784
 785    trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
 786    for (i = 0; i < rc->cmpRingNumPages; i++) {
 787        trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]);
 788    }
 789}
 790
 791static uint64_t
 792pvscsi_on_cmd_config(PVSCSIState *s)
 793{
 794    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
 795    return PVSCSI_COMMAND_PROCESSING_FAILED;
 796}
 797
 798static uint64_t
 799pvscsi_on_cmd_unplug(PVSCSIState *s)
 800{
 801    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
 802    return PVSCSI_COMMAND_PROCESSING_FAILED;
 803}
 804
 805static uint64_t
 806pvscsi_on_issue_scsi(PVSCSIState *s)
 807{
 808    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
 809    return PVSCSI_COMMAND_PROCESSING_FAILED;
 810}
 811
 812static uint64_t
 813pvscsi_on_cmd_setup_rings(PVSCSIState *s)
 814{
 815    PVSCSICmdDescSetupRings *rc =
 816        (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
 817
 818    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
 819
 820    if (!rc->reqRingNumPages
 821        || rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
 822        || !rc->cmpRingNumPages
 823        || rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) {
 824        return PVSCSI_COMMAND_PROCESSING_FAILED;
 825    }
 826
 827    pvscsi_dbg_dump_tx_rings_config(rc);
 828    pvscsi_ring_init_data(&s->rings, rc);
 829
 830    s->rings_info_valid = TRUE;
 831    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 832}
 833
 834static uint64_t
 835pvscsi_on_cmd_abort(PVSCSIState *s)
 836{
 837    PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
 838    PVSCSIRequest *r, *next;
 839
 840    trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
 841
 842    QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
 843        if (r->req.context == cmd->context) {
 844            break;
 845        }
 846    }
 847    if (r) {
 848        assert(!r->completed);
 849        r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 850        scsi_req_cancel(r->sreq);
 851    }
 852
 853    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 854}
 855
 856static uint64_t
 857pvscsi_on_cmd_unknown(PVSCSIState *s)
 858{
 859    trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
 860    return PVSCSI_COMMAND_PROCESSING_FAILED;
 861}
 862
 863static uint64_t
 864pvscsi_on_cmd_reset_device(PVSCSIState *s)
 865{
 866    uint8_t target_lun = 0;
 867    struct PVSCSICmdDescResetDevice *cmd =
 868        (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
 869    SCSIDevice *sdev;
 870
 871    sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
 872
 873    trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
 874
 875    if (sdev != NULL) {
 876        s->resetting++;
 877        device_legacy_reset(&sdev->qdev);
 878        s->resetting--;
 879        return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 880    }
 881
 882    return PVSCSI_COMMAND_PROCESSING_FAILED;
 883}
 884
 885static uint64_t
 886pvscsi_on_cmd_reset_bus(PVSCSIState *s)
 887{
 888    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
 889
 890    s->resetting++;
 891    qbus_reset_all(BUS(&s->bus));
 892    s->resetting--;
 893    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 894}
 895
 896static uint64_t
 897pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
 898{
 899    PVSCSICmdDescSetupMsgRing *rc =
 900        (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
 901
 902    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
 903
 904    if (!s->use_msg) {
 905        return PVSCSI_COMMAND_PROCESSING_FAILED;
 906    }
 907
 908    if (s->rings_info_valid) {
 909        if (pvscsi_ring_init_msg(&s->rings, rc) < 0) {
 910            return PVSCSI_COMMAND_PROCESSING_FAILED;
 911        }
 912        s->msg_ring_info_valid = TRUE;
 913    }
 914    return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
 915}
 916
 917static uint64_t
 918pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
 919{
 920    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
 921
 922    pvscsi_reset_adapter(s);
 923    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 924}
 925
 926static const struct {
 927    int       data_size;
 928    uint64_t  (*handler_fn)(PVSCSIState *s);
 929} pvscsi_commands[] = {
 930    [PVSCSI_CMD_FIRST] = {
 931        .data_size = 0,
 932        .handler_fn = pvscsi_on_cmd_unknown,
 933    },
 934
 935    /* Not implemented, data size defined based on what arrives on windows */
 936    [PVSCSI_CMD_CONFIG] = {
 937        .data_size = 6 * sizeof(uint32_t),
 938        .handler_fn = pvscsi_on_cmd_config,
 939    },
 940
 941    /* Command not implemented, data size is unknown */
 942    [PVSCSI_CMD_ISSUE_SCSI] = {
 943        .data_size = 0,
 944        .handler_fn = pvscsi_on_issue_scsi,
 945    },
 946
 947    /* Command not implemented, data size is unknown */
 948    [PVSCSI_CMD_DEVICE_UNPLUG] = {
 949        .data_size = 0,
 950        .handler_fn = pvscsi_on_cmd_unplug,
 951    },
 952
 953    [PVSCSI_CMD_SETUP_RINGS] = {
 954        .data_size = sizeof(PVSCSICmdDescSetupRings),
 955        .handler_fn = pvscsi_on_cmd_setup_rings,
 956    },
 957
 958    [PVSCSI_CMD_RESET_DEVICE] = {
 959        .data_size = sizeof(struct PVSCSICmdDescResetDevice),
 960        .handler_fn = pvscsi_on_cmd_reset_device,
 961    },
 962
 963    [PVSCSI_CMD_RESET_BUS] = {
 964        .data_size = 0,
 965        .handler_fn = pvscsi_on_cmd_reset_bus,
 966    },
 967
 968    [PVSCSI_CMD_SETUP_MSG_RING] = {
 969        .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
 970        .handler_fn = pvscsi_on_cmd_setup_msg_ring,
 971    },
 972
 973    [PVSCSI_CMD_ADAPTER_RESET] = {
 974        .data_size = 0,
 975        .handler_fn = pvscsi_on_cmd_adapter_reset,
 976    },
 977
 978    [PVSCSI_CMD_ABORT_CMD] = {
 979        .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
 980        .handler_fn = pvscsi_on_cmd_abort,
 981    },
 982};
 983
 984static void
 985pvscsi_do_command_processing(PVSCSIState *s)
 986{
 987    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
 988
 989    assert(s->curr_cmd < PVSCSI_CMD_LAST);
 990    if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
 991        s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
 992        s->curr_cmd = PVSCSI_CMD_FIRST;
 993        s->curr_cmd_data_cntr   = 0;
 994    }
 995}
 996
 997static void
 998pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
 999{
1000    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
1001
1002    assert(bytes_arrived < sizeof(s->curr_cmd_data));
1003    s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
1004
1005    pvscsi_do_command_processing(s);
1006}
1007
1008static void
1009pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
1010{
1011    if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
1012        s->curr_cmd = cmd_id;
1013    } else {
1014        s->curr_cmd = PVSCSI_CMD_FIRST;
1015        trace_pvscsi_on_cmd_unknown(cmd_id);
1016    }
1017
1018    s->curr_cmd_data_cntr = 0;
1019    s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
1020
1021    pvscsi_do_command_processing(s);
1022}
1023
1024static void
1025pvscsi_io_write(void *opaque, hwaddr addr,
1026                uint64_t val, unsigned size)
1027{
1028    PVSCSIState *s = opaque;
1029
1030    switch (addr) {
1031    case PVSCSI_REG_OFFSET_COMMAND:
1032        pvscsi_on_command(s, val);
1033        break;
1034
1035    case PVSCSI_REG_OFFSET_COMMAND_DATA:
1036        pvscsi_on_command_data(s, (uint32_t) val);
1037        break;
1038
1039    case PVSCSI_REG_OFFSET_INTR_STATUS:
1040        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
1041        s->reg_interrupt_status &= ~val;
1042        pvscsi_update_irq_status(s);
1043        pvscsi_schedule_completion_processing(s);
1044        break;
1045
1046    case PVSCSI_REG_OFFSET_INTR_MASK:
1047        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
1048        s->reg_interrupt_enabled = val;
1049        pvscsi_update_irq_status(s);
1050        break;
1051
1052    case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
1053        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
1054        pvscsi_process_io(s);
1055        break;
1056
1057    case PVSCSI_REG_OFFSET_KICK_RW_IO:
1058        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
1059        pvscsi_process_io(s);
1060        break;
1061
1062    case PVSCSI_REG_OFFSET_DEBUG:
1063        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
1064        break;
1065
1066    default:
1067        trace_pvscsi_io_write_unknown(addr, size, val);
1068        break;
1069    }
1070
1071}
1072
1073static uint64_t
1074pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
1075{
1076    PVSCSIState *s = opaque;
1077
1078    switch (addr) {
1079    case PVSCSI_REG_OFFSET_INTR_STATUS:
1080        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
1081                             s->reg_interrupt_status);
1082        return s->reg_interrupt_status;
1083
1084    case PVSCSI_REG_OFFSET_INTR_MASK:
1085        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
1086                             s->reg_interrupt_status);
1087        return s->reg_interrupt_enabled;
1088
1089    case PVSCSI_REG_OFFSET_COMMAND_STATUS:
1090        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
1091                             s->reg_interrupt_status);
1092        return s->reg_command_status;
1093
1094    default:
1095        trace_pvscsi_io_read_unknown(addr, size);
1096        return 0;
1097    }
1098}
1099
1100
1101static void
1102pvscsi_init_msi(PVSCSIState *s)
1103{
1104    int res;
1105    PCIDevice *d = PCI_DEVICE(s);
1106
1107    res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS,
1108                   PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL);
1109    if (res < 0) {
1110        trace_pvscsi_init_msi_fail(res);
1111        s->msi_used = false;
1112    } else {
1113        s->msi_used = true;
1114    }
1115}
1116
1117static void
1118pvscsi_cleanup_msi(PVSCSIState *s)
1119{
1120    PCIDevice *d = PCI_DEVICE(s);
1121
1122    msi_uninit(d);
1123}
1124
1125static const MemoryRegionOps pvscsi_ops = {
1126        .read = pvscsi_io_read,
1127        .write = pvscsi_io_write,
1128        .endianness = DEVICE_LITTLE_ENDIAN,
1129        .impl = {
1130                .min_access_size = 4,
1131                .max_access_size = 4,
1132        },
1133};
1134
1135static const struct SCSIBusInfo pvscsi_scsi_info = {
1136        .tcq = true,
1137        .max_target = PVSCSI_MAX_DEVS,
1138        .max_channel = 0,
1139        .max_lun = 0,
1140
1141        .get_sg_list = pvscsi_get_sg_list,
1142        .complete = pvscsi_command_complete,
1143        .cancel = pvscsi_request_cancelled,
1144        .fail = pvscsi_command_failed,
1145};
1146
1147static void
1148pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
1149{
1150    PVSCSIState *s = PVSCSI(pci_dev);
1151
1152    trace_pvscsi_state("init");
1153
1154    /* PCI subsystem ID, subsystem vendor ID, revision */
1155    if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) {
1156        pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000);
1157    } else {
1158        pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1159                     PCI_VENDOR_ID_VMWARE);
1160        pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1161                     PCI_DEVICE_ID_VMWARE_PVSCSI);
1162        pci_config_set_revision(pci_dev->config, 0x2);
1163    }
1164
1165    /* PCI latency timer = 255 */
1166    pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
1167
1168    /* Interrupt pin A */
1169    pci_config_set_interrupt_pin(pci_dev->config, 1);
1170
1171    memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s,
1172                          "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
1173    pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
1174
1175    pvscsi_init_msi(s);
1176
1177    if (pci_is_express(pci_dev) && pci_bus_is_express(pci_get_bus(pci_dev))) {
1178        pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET);
1179    }
1180
1181    s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
1182
1183    scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info);
1184    /* override default SCSI bus hotplug-handler, with pvscsi's one */
1185    qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s));
1186    pvscsi_reset_state(s);
1187}
1188
1189static void
1190pvscsi_uninit(PCIDevice *pci_dev)
1191{
1192    PVSCSIState *s = PVSCSI(pci_dev);
1193
1194    trace_pvscsi_state("uninit");
1195    qemu_bh_delete(s->completion_worker);
1196
1197    pvscsi_cleanup_msi(s);
1198}
1199
1200static void
1201pvscsi_reset(DeviceState *dev)
1202{
1203    PCIDevice *d = PCI_DEVICE(dev);
1204    PVSCSIState *s = PVSCSI(d);
1205
1206    trace_pvscsi_state("reset");
1207    pvscsi_reset_adapter(s);
1208}
1209
1210static int
1211pvscsi_pre_save(void *opaque)
1212{
1213    PVSCSIState *s = (PVSCSIState *) opaque;
1214
1215    trace_pvscsi_state("presave");
1216
1217    assert(QTAILQ_EMPTY(&s->pending_queue));
1218    assert(QTAILQ_EMPTY(&s->completion_queue));
1219
1220    return 0;
1221}
1222
1223static int
1224pvscsi_post_load(void *opaque, int version_id)
1225{
1226    trace_pvscsi_state("postload");
1227    return 0;
1228}
1229
1230static bool pvscsi_vmstate_need_pcie_device(void *opaque)
1231{
1232    PVSCSIState *s = PVSCSI(opaque);
1233
1234    return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE);
1235}
1236
1237static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id)
1238{
1239    return !pvscsi_vmstate_need_pcie_device(opaque);
1240}
1241
1242static const VMStateDescription vmstate_pvscsi_pcie_device = {
1243    .name = "pvscsi/pcie",
1244    .needed = pvscsi_vmstate_need_pcie_device,
1245    .fields = (VMStateField[]) {
1246        VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
1247        VMSTATE_END_OF_LIST()
1248    }
1249};
1250
1251static const VMStateDescription vmstate_pvscsi = {
1252    .name = "pvscsi",
1253    .version_id = 0,
1254    .minimum_version_id = 0,
1255    .pre_save = pvscsi_pre_save,
1256    .post_load = pvscsi_post_load,
1257    .fields = (VMStateField[]) {
1258        VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState,
1259                            pvscsi_vmstate_test_pci_device, 0,
1260                            vmstate_pci_device, PCIDevice),
1261        VMSTATE_UINT8(msi_used, PVSCSIState),
1262        VMSTATE_UINT32(resetting, PVSCSIState),
1263        VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
1264        VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
1265        VMSTATE_UINT64(reg_command_status, PVSCSIState),
1266        VMSTATE_UINT64(curr_cmd, PVSCSIState),
1267        VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
1268        VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
1269                             ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
1270        VMSTATE_UINT8(rings_info_valid, PVSCSIState),
1271        VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
1272        VMSTATE_UINT8(use_msg, PVSCSIState),
1273
1274        VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
1275        VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
1276        VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
1277        VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
1278                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1279        VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
1280                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1281        VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
1282        VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
1283
1284        VMSTATE_END_OF_LIST()
1285    },
1286    .subsections = (const VMStateDescription*[]) {
1287        &vmstate_pvscsi_pcie_device,
1288        NULL
1289    }
1290};
1291
1292static Property pvscsi_properties[] = {
1293    DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
1294    DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags,
1295                    PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false),
1296    DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags,
1297                    PVSCSI_COMPAT_DISABLE_PCIE_BIT, false),
1298    DEFINE_PROP_END_OF_LIST(),
1299};
1300
1301static void pvscsi_realize(DeviceState *qdev, Error **errp)
1302{
1303    PVSCSIClass *pvs_c = PVSCSI_GET_CLASS(qdev);
1304    PCIDevice *pci_dev = PCI_DEVICE(qdev);
1305    PVSCSIState *s = PVSCSI(qdev);
1306
1307    if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) {
1308        pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1309    }
1310
1311    pvs_c->parent_dc_realize(qdev, errp);
1312}
1313
1314static void pvscsi_class_init(ObjectClass *klass, void *data)
1315{
1316    DeviceClass *dc = DEVICE_CLASS(klass);
1317    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1318    PVSCSIClass *pvs_k = PVSCSI_CLASS(klass);
1319    HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1320
1321    k->realize = pvscsi_realizefn;
1322    k->exit = pvscsi_uninit;
1323    k->vendor_id = PCI_VENDOR_ID_VMWARE;
1324    k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
1325    k->class_id = PCI_CLASS_STORAGE_SCSI;
1326    k->subsystem_id = 0x1000;
1327    device_class_set_parent_realize(dc, pvscsi_realize,
1328                                    &pvs_k->parent_dc_realize);
1329    dc->reset = pvscsi_reset;
1330    dc->vmsd = &vmstate_pvscsi;
1331    device_class_set_props(dc, pvscsi_properties);
1332    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1333    hc->unplug = pvscsi_hot_unplug;
1334    hc->plug = pvscsi_hotplug;
1335}
1336
1337static const TypeInfo pvscsi_info = {
1338    .name          = TYPE_PVSCSI,
1339    .parent        = TYPE_PCI_DEVICE,
1340    .class_size    = sizeof(PVSCSIClass),
1341    .instance_size = sizeof(PVSCSIState),
1342    .class_init    = pvscsi_class_init,
1343    .interfaces = (InterfaceInfo[]) {
1344        { TYPE_HOTPLUG_HANDLER },
1345        { INTERFACE_PCIE_DEVICE },
1346        { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1347        { }
1348    }
1349};
1350
1351static void
1352pvscsi_register_types(void)
1353{
1354    type_register_static(&pvscsi_info);
1355}
1356
1357type_init(pvscsi_register_types);
1358