qemu/hw/scsi/vmw_pvscsi.c
<<
>>
Prefs
   1/*
   2 * QEMU VMWARE PVSCSI paravirtual SCSI bus
   3 *
   4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
   5 *
   6 * Developed by Daynix Computing LTD (http://www.daynix.com)
   7 *
   8 * Based on implementation by Paolo Bonzini
   9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
  10 *
  11 * Authors:
  12 * Paolo Bonzini <pbonzini@redhat.com>
  13 * Dmitry Fleytman <dmitry@daynix.com>
  14 * Yan Vugenfirer <yan@daynix.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.
  17 * See the COPYING file in the top-level directory.
  18 *
  19 * NOTE about MSI-X:
  20 * MSI-X support has been removed for the moment because it leads Windows OS
  21 * to crash on startup. The crash happens because Windows driver requires
  22 * MSI-X shared memory to be part of the same BAR used for rings state
  23 * registers, etc. This is not supported by QEMU infrastructure so separate
  24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
  25 *
  26 */
  27
  28#include "hw/scsi/scsi.h"
  29#include <block/scsi.h>
  30#include "hw/pci/msi.h"
  31#include "vmw_pvscsi.h"
  32#include "trace.h"
  33
  34
  35#define PVSCSI_MSI_OFFSET        (0x50)
  36#define PVSCSI_USE_64BIT         (true)
  37#define PVSCSI_PER_VECTOR_MASK   (false)
  38
  39#define PVSCSI_MAX_DEVS                   (64)
  40#define PVSCSI_MSIX_NUM_VECTORS           (1)
  41
  42#define PVSCSI_MAX_CMD_DATA_WORDS \
  43    (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
  44
  45#define RS_GET_FIELD(rs_pa, field) \
  46    (ldl_le_phys(rs_pa + offsetof(struct PVSCSIRingsState, field)))
  47#define RS_SET_FIELD(rs_pa, field, val) \
  48    (stl_le_phys(rs_pa + offsetof(struct PVSCSIRingsState, field), val))
  49
  50#define TYPE_PVSCSI "pvscsi"
  51#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
  52
  53typedef struct PVSCSIRingInfo {
  54    uint64_t            rs_pa;
  55    uint32_t            txr_len_mask;
  56    uint32_t            rxr_len_mask;
  57    uint32_t            msg_len_mask;
  58    uint64_t            req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  59    uint64_t            cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  60    uint64_t            msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
  61    uint64_t            consumed_ptr;
  62    uint64_t            filled_cmp_ptr;
  63    uint64_t            filled_msg_ptr;
  64} PVSCSIRingInfo;
  65
  66typedef struct PVSCSISGState {
  67    hwaddr elemAddr;
  68    hwaddr dataAddr;
  69    uint32_t resid;
  70} PVSCSISGState;
  71
  72typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
  73
  74typedef struct {
  75    PCIDevice parent_obj;
  76    MemoryRegion io_space;
  77    SCSIBus bus;
  78    QEMUBH *completion_worker;
  79    PVSCSIRequestList pending_queue;
  80    PVSCSIRequestList completion_queue;
  81
  82    uint64_t reg_interrupt_status;        /* Interrupt status register value */
  83    uint64_t reg_interrupt_enabled;       /* Interrupt mask register value   */
  84    uint64_t reg_command_status;          /* Command status register value   */
  85
  86    /* Command data adoption mechanism */
  87    uint64_t curr_cmd;                   /* Last command arrived             */
  88    uint32_t curr_cmd_data_cntr;         /* Amount of data for last command  */
  89
  90    /* Collector for current command data */
  91    uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
  92
  93    uint8_t rings_info_valid;            /* Whether data rings initialized   */
  94    uint8_t msg_ring_info_valid;         /* Whether message ring initialized */
  95    uint8_t use_msg;                     /* Whether to use message ring      */
  96
  97    uint8_t msi_used;    /* Whether MSI support was installed successfully   */
  98
  99    PVSCSIRingInfo rings;                /* Data transfer rings manager      */
 100    uint32_t resetting;                  /* Reset in progress                */
 101} PVSCSIState;
 102
 103typedef struct PVSCSIRequest {
 104    SCSIRequest *sreq;
 105    PVSCSIState *dev;
 106    uint8_t sense_key;
 107    uint8_t completed;
 108    int lun;
 109    QEMUSGList sgl;
 110    PVSCSISGState sg;
 111    struct PVSCSIRingReqDesc req;
 112    struct PVSCSIRingCmpDesc cmp;
 113    QTAILQ_ENTRY(PVSCSIRequest) next;
 114} PVSCSIRequest;
 115
 116/* Integer binary logarithm */
 117static int
 118pvscsi_log2(uint32_t input)
 119{
 120    int log = 0;
 121    assert(input > 0);
 122    while (input >> ++log) {
 123    }
 124    return log;
 125}
 126
 127static void
 128pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
 129{
 130    int i;
 131    uint32_t txr_len_log2, rxr_len_log2;
 132    uint32_t req_ring_size, cmp_ring_size;
 133    m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
 134
 135    req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 136    cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 137    txr_len_log2 = pvscsi_log2(req_ring_size - 1);
 138    rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
 139
 140    m->txr_len_mask = MASK(txr_len_log2);
 141    m->rxr_len_mask = MASK(rxr_len_log2);
 142
 143    m->consumed_ptr = 0;
 144    m->filled_cmp_ptr = 0;
 145
 146    for (i = 0; i < ri->reqRingNumPages; i++) {
 147        m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
 148    }
 149
 150    for (i = 0; i < ri->cmpRingNumPages; i++) {
 151        m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
 152    }
 153
 154    RS_SET_FIELD(m->rs_pa, reqProdIdx, 0);
 155    RS_SET_FIELD(m->rs_pa, reqConsIdx, 0);
 156    RS_SET_FIELD(m->rs_pa, reqNumEntriesLog2, txr_len_log2);
 157
 158    RS_SET_FIELD(m->rs_pa, cmpProdIdx, 0);
 159    RS_SET_FIELD(m->rs_pa, cmpConsIdx, 0);
 160    RS_SET_FIELD(m->rs_pa, cmpNumEntriesLog2, rxr_len_log2);
 161
 162    trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
 163
 164    /* Flush ring state page changes */
 165    smp_wmb();
 166}
 167
 168static void
 169pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
 170{
 171    int i;
 172    uint32_t len_log2;
 173    uint32_t ring_size;
 174
 175    ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 176    len_log2 = pvscsi_log2(ring_size - 1);
 177
 178    m->msg_len_mask = MASK(len_log2);
 179
 180    m->filled_msg_ptr = 0;
 181
 182    for (i = 0; i < ri->numPages; i++) {
 183        m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
 184    }
 185
 186    RS_SET_FIELD(m->rs_pa, msgProdIdx, 0);
 187    RS_SET_FIELD(m->rs_pa, msgConsIdx, 0);
 188    RS_SET_FIELD(m->rs_pa, msgNumEntriesLog2, len_log2);
 189
 190    trace_pvscsi_ring_init_msg(len_log2);
 191
 192    /* Flush ring state page changes */
 193    smp_wmb();
 194}
 195
 196static void
 197pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
 198{
 199    mgr->rs_pa = 0;
 200    mgr->txr_len_mask = 0;
 201    mgr->rxr_len_mask = 0;
 202    mgr->msg_len_mask = 0;
 203    mgr->consumed_ptr = 0;
 204    mgr->filled_cmp_ptr = 0;
 205    mgr->filled_msg_ptr = 0;
 206    memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
 207    memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
 208    memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
 209}
 210
 211static hwaddr
 212pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
 213{
 214    uint32_t ready_ptr = RS_GET_FIELD(mgr->rs_pa, reqProdIdx);
 215
 216    if (ready_ptr != mgr->consumed_ptr) {
 217        uint32_t next_ready_ptr =
 218            mgr->consumed_ptr++ & mgr->txr_len_mask;
 219        uint32_t next_ready_page =
 220            next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 221        uint32_t inpage_idx =
 222            next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 223
 224        return mgr->req_ring_pages_pa[next_ready_page] +
 225               inpage_idx * sizeof(PVSCSIRingReqDesc);
 226    } else {
 227        return 0;
 228    }
 229}
 230
 231static void
 232pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
 233{
 234    RS_SET_FIELD(mgr->rs_pa, reqConsIdx, mgr->consumed_ptr);
 235}
 236
 237static hwaddr
 238pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
 239{
 240    /*
 241     * According to Linux driver code it explicitly verifies that number
 242     * of requests being processed by device is less then the size of
 243     * completion queue, so device may omit completion queue overflow
 244     * conditions check. We assume that this is true for other (Windows)
 245     * drivers as well.
 246     */
 247
 248    uint32_t free_cmp_ptr =
 249        mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
 250    uint32_t free_cmp_page =
 251        free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 252    uint32_t inpage_idx =
 253        free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 254    return mgr->cmp_ring_pages_pa[free_cmp_page] +
 255           inpage_idx * sizeof(PVSCSIRingCmpDesc);
 256}
 257
 258static hwaddr
 259pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
 260{
 261    uint32_t free_msg_ptr =
 262        mgr->filled_msg_ptr++ & mgr->msg_len_mask;
 263    uint32_t free_msg_page =
 264        free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 265    uint32_t inpage_idx =
 266        free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 267    return mgr->msg_ring_pages_pa[free_msg_page] +
 268           inpage_idx * sizeof(PVSCSIRingMsgDesc);
 269}
 270
 271static void
 272pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
 273{
 274    /* Flush descriptor changes */
 275    smp_wmb();
 276
 277    trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
 278
 279    RS_SET_FIELD(mgr->rs_pa, cmpProdIdx, mgr->filled_cmp_ptr);
 280}
 281
 282static bool
 283pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
 284{
 285    uint32_t prodIdx = RS_GET_FIELD(mgr->rs_pa, msgProdIdx);
 286    uint32_t consIdx = RS_GET_FIELD(mgr->rs_pa, msgConsIdx);
 287
 288    return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
 289}
 290
 291static void
 292pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
 293{
 294    /* Flush descriptor changes */
 295    smp_wmb();
 296
 297    trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
 298
 299    RS_SET_FIELD(mgr->rs_pa, msgProdIdx, mgr->filled_msg_ptr);
 300}
 301
 302static void
 303pvscsi_reset_state(PVSCSIState *s)
 304{
 305    s->curr_cmd = PVSCSI_CMD_FIRST;
 306    s->curr_cmd_data_cntr = 0;
 307    s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 308    s->reg_interrupt_status = 0;
 309    pvscsi_ring_cleanup(&s->rings);
 310    s->rings_info_valid = FALSE;
 311    s->msg_ring_info_valid = FALSE;
 312    QTAILQ_INIT(&s->pending_queue);
 313    QTAILQ_INIT(&s->completion_queue);
 314}
 315
 316static void
 317pvscsi_update_irq_status(PVSCSIState *s)
 318{
 319    PCIDevice *d = PCI_DEVICE(s);
 320    bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
 321
 322    trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
 323                                  s->reg_interrupt_status);
 324
 325    if (s->msi_used && msi_enabled(d)) {
 326        if (should_raise) {
 327            trace_pvscsi_update_irq_msi();
 328            msi_notify(d, PVSCSI_VECTOR_COMPLETION);
 329        }
 330        return;
 331    }
 332
 333    qemu_set_irq(d->irq[0], !!should_raise);
 334}
 335
 336static void
 337pvscsi_raise_completion_interrupt(PVSCSIState *s)
 338{
 339    s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
 340
 341    /* Memory barrier to flush interrupt status register changes*/
 342    smp_wmb();
 343
 344    pvscsi_update_irq_status(s);
 345}
 346
 347static void
 348pvscsi_raise_message_interrupt(PVSCSIState *s)
 349{
 350    s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
 351
 352    /* Memory barrier to flush interrupt status register changes*/
 353    smp_wmb();
 354
 355    pvscsi_update_irq_status(s);
 356}
 357
 358static void
 359pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
 360{
 361    hwaddr cmp_descr_pa;
 362
 363    cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
 364    trace_pvscsi_cmp_ring_put(cmp_descr_pa);
 365    cpu_physical_memory_write(cmp_descr_pa, (void *)cmp_desc,
 366                              sizeof(*cmp_desc));
 367}
 368
 369static void
 370pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
 371{
 372    hwaddr msg_descr_pa;
 373
 374    msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
 375    trace_pvscsi_msg_ring_put(msg_descr_pa);
 376    cpu_physical_memory_write(msg_descr_pa, (void *)msg_desc,
 377                              sizeof(*msg_desc));
 378}
 379
 380static void
 381pvscsi_process_completion_queue(void *opaque)
 382{
 383    PVSCSIState *s = opaque;
 384    PVSCSIRequest *pvscsi_req;
 385    bool has_completed = false;
 386
 387    while (!QTAILQ_EMPTY(&s->completion_queue)) {
 388        pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
 389        QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
 390        pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
 391        g_free(pvscsi_req);
 392        has_completed++;
 393    }
 394
 395    if (has_completed) {
 396        pvscsi_ring_flush_cmp(&s->rings);
 397        pvscsi_raise_completion_interrupt(s);
 398    }
 399}
 400
 401static void
 402pvscsi_reset_adapter(PVSCSIState *s)
 403{
 404    s->resetting++;
 405    qbus_reset_all_fn(&s->bus);
 406    s->resetting--;
 407    pvscsi_process_completion_queue(s);
 408    assert(QTAILQ_EMPTY(&s->pending_queue));
 409    pvscsi_reset_state(s);
 410}
 411
 412static void
 413pvscsi_schedule_completion_processing(PVSCSIState *s)
 414{
 415    /* Try putting more complete requests on the ring. */
 416    if (!QTAILQ_EMPTY(&s->completion_queue)) {
 417        qemu_bh_schedule(s->completion_worker);
 418    }
 419}
 420
 421static void
 422pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
 423{
 424    assert(!r->completed);
 425
 426    trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
 427                                  r->sense_key);
 428    if (r->sreq != NULL) {
 429        scsi_req_unref(r->sreq);
 430        r->sreq = NULL;
 431    }
 432    r->completed = 1;
 433    QTAILQ_REMOVE(&s->pending_queue, r, next);
 434    QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
 435    pvscsi_schedule_completion_processing(s);
 436}
 437
 438static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
 439{
 440    PVSCSIRequest *req = r->hba_private;
 441
 442    trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
 443
 444    return &req->sgl;
 445}
 446
 447static void
 448pvscsi_get_next_sg_elem(PVSCSISGState *sg)
 449{
 450    struct PVSCSISGElement elem;
 451
 452    cpu_physical_memory_read(sg->elemAddr, (void *)&elem, sizeof(elem));
 453    if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
 454        /*
 455            * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
 456            * header file but its value is unknown. This flag requires
 457            * additional processing, so we put warning here to catch it
 458            * some day and make proper implementation
 459            */
 460        trace_pvscsi_get_next_sg_elem(elem.flags);
 461    }
 462
 463    sg->elemAddr += sizeof(elem);
 464    sg->dataAddr = elem.addr;
 465    sg->resid = elem.length;
 466}
 467
 468static void
 469pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
 470{
 471    r->cmp.senseLen = MIN(r->req.senseLen, len);
 472    r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
 473    cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
 474}
 475
 476static void
 477pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
 478{
 479    PVSCSIRequest *pvscsi_req = req->hba_private;
 480    PVSCSIState *s = pvscsi_req->dev;
 481
 482    if (!pvscsi_req) {
 483        trace_pvscsi_command_complete_not_found(req->tag);
 484        return;
 485    }
 486
 487    if (resid) {
 488        /* Short transfer.  */
 489        trace_pvscsi_command_complete_data_run();
 490        pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
 491    }
 492
 493    pvscsi_req->cmp.scsiStatus = status;
 494    if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
 495        uint8_t sense[SCSI_SENSE_BUF_SIZE];
 496        int sense_len =
 497            scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
 498
 499        trace_pvscsi_command_complete_sense_len(sense_len);
 500        pvscsi_write_sense(pvscsi_req, sense, sense_len);
 501    }
 502    qemu_sglist_destroy(&pvscsi_req->sgl);
 503    pvscsi_complete_request(s, pvscsi_req);
 504}
 505
 506static void
 507pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
 508{
 509    if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
 510        PVSCSIMsgDescDevStatusChanged msg = {0};
 511
 512        msg.type = msg_type;
 513        msg.bus = dev->channel;
 514        msg.target = dev->id;
 515        msg.lun[1] = dev->lun;
 516
 517        pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
 518        pvscsi_ring_flush_msg(&s->rings);
 519        pvscsi_raise_message_interrupt(s);
 520    }
 521}
 522
 523static void
 524pvscsi_hotplug(SCSIBus *bus, SCSIDevice *dev)
 525{
 526    PVSCSIState *s = container_of(bus, PVSCSIState, bus);
 527    pvscsi_send_msg(s, dev, PVSCSI_MSG_DEV_ADDED);
 528}
 529
 530static void
 531pvscsi_hot_unplug(SCSIBus *bus, SCSIDevice *dev)
 532{
 533    PVSCSIState *s = container_of(bus, PVSCSIState, bus);
 534    pvscsi_send_msg(s, dev, PVSCSI_MSG_DEV_REMOVED);
 535}
 536
 537static void
 538pvscsi_request_cancelled(SCSIRequest *req)
 539{
 540    PVSCSIRequest *pvscsi_req = req->hba_private;
 541    PVSCSIState *s = pvscsi_req->dev;
 542
 543    if (pvscsi_req->completed) {
 544        return;
 545    }
 546
 547   if (pvscsi_req->dev->resetting) {
 548       pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
 549    } else {
 550       pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 551    }
 552
 553    pvscsi_complete_request(s, pvscsi_req);
 554}
 555
 556static SCSIDevice*
 557pvscsi_device_find(PVSCSIState *s, int channel, int target,
 558                   uint8_t *requested_lun, uint8_t *target_lun)
 559{
 560    if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
 561        requested_lun[4] || requested_lun[5] || requested_lun[6] ||
 562        requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
 563        return NULL;
 564    } else {
 565        *target_lun = requested_lun[1];
 566        return scsi_device_find(&s->bus, channel, target, *target_lun);
 567    }
 568}
 569
 570static PVSCSIRequest *
 571pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
 572                                struct PVSCSIRingReqDesc *descr)
 573{
 574    PVSCSIRequest *pvscsi_req;
 575    uint8_t lun;
 576
 577    pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
 578    pvscsi_req->dev = s;
 579    pvscsi_req->req = *descr;
 580    pvscsi_req->cmp.context = pvscsi_req->req.context;
 581    QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
 582
 583    *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
 584    if (*d) {
 585        pvscsi_req->lun = lun;
 586    }
 587
 588    return pvscsi_req;
 589}
 590
 591static void
 592pvscsi_convert_sglist(PVSCSIRequest *r)
 593{
 594    int chunk_size;
 595    uint64_t data_length = r->req.dataLen;
 596    PVSCSISGState sg = r->sg;
 597    while (data_length) {
 598        while (!sg.resid) {
 599            pvscsi_get_next_sg_elem(&sg);
 600            trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
 601                                        r->sg.resid);
 602        }
 603        assert(data_length > 0);
 604        chunk_size = MIN((unsigned) data_length, sg.resid);
 605        if (chunk_size) {
 606            qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
 607        }
 608
 609        sg.dataAddr += chunk_size;
 610        data_length -= chunk_size;
 611        sg.resid -= chunk_size;
 612    }
 613}
 614
 615static void
 616pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
 617{
 618    PCIDevice *d = PCI_DEVICE(s);
 619
 620    qemu_sglist_init(&r->sgl, 1, pci_dma_context(d));
 621    if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 622        pvscsi_convert_sglist(r);
 623    } else {
 624        qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
 625    }
 626}
 627
 628static void
 629pvscsi_process_request_descriptor(PVSCSIState *s,
 630                                  struct PVSCSIRingReqDesc *descr)
 631{
 632    SCSIDevice *d;
 633    PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
 634    int64_t n;
 635
 636    trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
 637
 638    if (!d) {
 639        r->cmp.hostStatus = BTSTAT_SELTIMEO;
 640        trace_pvscsi_process_req_descr_unknown_device();
 641        pvscsi_complete_request(s, r);
 642        return;
 643    }
 644
 645    if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 646        r->sg.elemAddr = descr->dataAddr;
 647    }
 648
 649    r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r);
 650    if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
 651        (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
 652        r->cmp.hostStatus = BTSTAT_BADMSG;
 653        trace_pvscsi_process_req_descr_invalid_dir();
 654        scsi_req_cancel(r->sreq);
 655        return;
 656    }
 657    if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
 658        (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
 659        r->cmp.hostStatus = BTSTAT_BADMSG;
 660        trace_pvscsi_process_req_descr_invalid_dir();
 661        scsi_req_cancel(r->sreq);
 662        return;
 663    }
 664
 665    pvscsi_build_sglist(s, r);
 666    n = scsi_req_enqueue(r->sreq);
 667
 668    if (n) {
 669        scsi_req_continue(r->sreq);
 670    }
 671}
 672
 673static void
 674pvscsi_process_io(PVSCSIState *s)
 675{
 676    PVSCSIRingReqDesc descr;
 677    hwaddr next_descr_pa;
 678
 679    assert(s->rings_info_valid);
 680    while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
 681
 682        /* Only read after production index verification */
 683        smp_rmb();
 684
 685        trace_pvscsi_process_io(next_descr_pa);
 686        cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
 687        pvscsi_process_request_descriptor(s, &descr);
 688    }
 689
 690    pvscsi_ring_flush_req(&s->rings);
 691}
 692
 693static void
 694pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
 695{
 696    int i;
 697    trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
 698
 699    trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
 700    for (i = 0; i < rc->reqRingNumPages; i++) {
 701        trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
 702    }
 703
 704    trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
 705    for (i = 0; i < rc->cmpRingNumPages; i++) {
 706        trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]);
 707    }
 708}
 709
 710static uint64_t
 711pvscsi_on_cmd_config(PVSCSIState *s)
 712{
 713    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
 714    return PVSCSI_COMMAND_PROCESSING_FAILED;
 715}
 716
 717static uint64_t
 718pvscsi_on_cmd_unplug(PVSCSIState *s)
 719{
 720    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
 721    return PVSCSI_COMMAND_PROCESSING_FAILED;
 722}
 723
 724static uint64_t
 725pvscsi_on_issue_scsi(PVSCSIState *s)
 726{
 727    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
 728    return PVSCSI_COMMAND_PROCESSING_FAILED;
 729}
 730
 731static uint64_t
 732pvscsi_on_cmd_setup_rings(PVSCSIState *s)
 733{
 734    PVSCSICmdDescSetupRings *rc =
 735        (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
 736
 737    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
 738
 739    pvscsi_dbg_dump_tx_rings_config(rc);
 740    pvscsi_ring_init_data(&s->rings, rc);
 741    s->rings_info_valid = TRUE;
 742    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 743}
 744
 745static uint64_t
 746pvscsi_on_cmd_abort(PVSCSIState *s)
 747{
 748    PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
 749    PVSCSIRequest *r, *next;
 750
 751    trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
 752
 753    QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
 754        if (r->req.context == cmd->context) {
 755            break;
 756        }
 757    }
 758    if (r) {
 759        assert(!r->completed);
 760        r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 761        scsi_req_cancel(r->sreq);
 762    }
 763
 764    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 765}
 766
 767static uint64_t
 768pvscsi_on_cmd_unknown(PVSCSIState *s)
 769{
 770    trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
 771    return PVSCSI_COMMAND_PROCESSING_FAILED;
 772}
 773
 774static uint64_t
 775pvscsi_on_cmd_reset_device(PVSCSIState *s)
 776{
 777    uint8_t target_lun = 0;
 778    struct PVSCSICmdDescResetDevice *cmd =
 779        (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
 780    SCSIDevice *sdev;
 781
 782    sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
 783
 784    trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
 785
 786    if (sdev != NULL) {
 787        s->resetting++;
 788        device_reset(&sdev->qdev);
 789        s->resetting--;
 790        return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 791    }
 792
 793    return PVSCSI_COMMAND_PROCESSING_FAILED;
 794}
 795
 796static uint64_t
 797pvscsi_on_cmd_reset_bus(PVSCSIState *s)
 798{
 799    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
 800
 801    s->resetting++;
 802    qbus_reset_all_fn(&s->bus);
 803    s->resetting--;
 804    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 805}
 806
 807static uint64_t
 808pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
 809{
 810    PVSCSICmdDescSetupMsgRing *rc =
 811        (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
 812
 813    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
 814
 815    if (!s->use_msg) {
 816        return PVSCSI_COMMAND_PROCESSING_FAILED;
 817    }
 818
 819    if (s->rings_info_valid) {
 820        pvscsi_ring_init_msg(&s->rings, rc);
 821        s->msg_ring_info_valid = TRUE;
 822    }
 823    return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
 824}
 825
 826static uint64_t
 827pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
 828{
 829    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
 830
 831    pvscsi_reset_adapter(s);
 832    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 833}
 834
 835static const struct {
 836    int       data_size;
 837    uint64_t  (*handler_fn)(PVSCSIState *s);
 838} pvscsi_commands[] = {
 839    [PVSCSI_CMD_FIRST] = {
 840        .data_size = 0,
 841        .handler_fn = pvscsi_on_cmd_unknown,
 842    },
 843
 844    /* Not implemented, data size defined based on what arrives on windows */
 845    [PVSCSI_CMD_CONFIG] = {
 846        .data_size = 6 * sizeof(uint32_t),
 847        .handler_fn = pvscsi_on_cmd_config,
 848    },
 849
 850    /* Command not implemented, data size is unknown */
 851    [PVSCSI_CMD_ISSUE_SCSI] = {
 852        .data_size = 0,
 853        .handler_fn = pvscsi_on_issue_scsi,
 854    },
 855
 856    /* Command not implemented, data size is unknown */
 857    [PVSCSI_CMD_DEVICE_UNPLUG] = {
 858        .data_size = 0,
 859        .handler_fn = pvscsi_on_cmd_unplug,
 860    },
 861
 862    [PVSCSI_CMD_SETUP_RINGS] = {
 863        .data_size = sizeof(PVSCSICmdDescSetupRings),
 864        .handler_fn = pvscsi_on_cmd_setup_rings,
 865    },
 866
 867    [PVSCSI_CMD_RESET_DEVICE] = {
 868        .data_size = sizeof(struct PVSCSICmdDescResetDevice),
 869        .handler_fn = pvscsi_on_cmd_reset_device,
 870    },
 871
 872    [PVSCSI_CMD_RESET_BUS] = {
 873        .data_size = 0,
 874        .handler_fn = pvscsi_on_cmd_reset_bus,
 875    },
 876
 877    [PVSCSI_CMD_SETUP_MSG_RING] = {
 878        .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
 879        .handler_fn = pvscsi_on_cmd_setup_msg_ring,
 880    },
 881
 882    [PVSCSI_CMD_ADAPTER_RESET] = {
 883        .data_size = 0,
 884        .handler_fn = pvscsi_on_cmd_adapter_reset,
 885    },
 886
 887    [PVSCSI_CMD_ABORT_CMD] = {
 888        .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
 889        .handler_fn = pvscsi_on_cmd_abort,
 890    },
 891};
 892
 893static void
 894pvscsi_do_command_processing(PVSCSIState *s)
 895{
 896    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
 897
 898    assert(s->curr_cmd < PVSCSI_CMD_LAST);
 899    if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
 900        s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
 901        s->curr_cmd = PVSCSI_CMD_FIRST;
 902        s->curr_cmd_data_cntr   = 0;
 903    }
 904}
 905
 906static void
 907pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
 908{
 909    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
 910
 911    assert(bytes_arrived < sizeof(s->curr_cmd_data));
 912    s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
 913
 914    pvscsi_do_command_processing(s);
 915}
 916
 917static void
 918pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
 919{
 920    if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
 921        s->curr_cmd = cmd_id;
 922    } else {
 923        s->curr_cmd = PVSCSI_CMD_FIRST;
 924        trace_pvscsi_on_cmd_unknown(cmd_id);
 925    }
 926
 927    s->curr_cmd_data_cntr = 0;
 928    s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
 929
 930    pvscsi_do_command_processing(s);
 931}
 932
 933static void
 934pvscsi_io_write(void *opaque, hwaddr addr,
 935                uint64_t val, unsigned size)
 936{
 937    PVSCSIState *s = opaque;
 938
 939    switch (addr) {
 940    case PVSCSI_REG_OFFSET_COMMAND:
 941        pvscsi_on_command(s, val);
 942        break;
 943
 944    case PVSCSI_REG_OFFSET_COMMAND_DATA:
 945        pvscsi_on_command_data(s, (uint32_t) val);
 946        break;
 947
 948    case PVSCSI_REG_OFFSET_INTR_STATUS:
 949        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
 950        s->reg_interrupt_status &= ~val;
 951        pvscsi_update_irq_status(s);
 952        pvscsi_schedule_completion_processing(s);
 953        break;
 954
 955    case PVSCSI_REG_OFFSET_INTR_MASK:
 956        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
 957        s->reg_interrupt_enabled = val;
 958        pvscsi_update_irq_status(s);
 959        break;
 960
 961    case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
 962        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
 963        pvscsi_process_io(s);
 964        break;
 965
 966    case PVSCSI_REG_OFFSET_KICK_RW_IO:
 967        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
 968        pvscsi_process_io(s);
 969        break;
 970
 971    case PVSCSI_REG_OFFSET_DEBUG:
 972        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
 973        break;
 974
 975    default:
 976        trace_pvscsi_io_write_unknown(addr, size, val);
 977        break;
 978    }
 979
 980}
 981
 982static uint64_t
 983pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
 984{
 985    PVSCSIState *s = opaque;
 986
 987    switch (addr) {
 988    case PVSCSI_REG_OFFSET_INTR_STATUS:
 989        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
 990                             s->reg_interrupt_status);
 991        return s->reg_interrupt_status;
 992
 993    case PVSCSI_REG_OFFSET_INTR_MASK:
 994        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
 995                             s->reg_interrupt_status);
 996        return s->reg_interrupt_enabled;
 997
 998    case PVSCSI_REG_OFFSET_COMMAND_STATUS:
 999        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
1000                             s->reg_interrupt_status);
1001        return s->reg_command_status;
1002
1003    default:
1004        trace_pvscsi_io_read_unknown(addr, size);
1005        return 0;
1006    }
1007}
1008
1009
1010static bool
1011pvscsi_init_msi(PVSCSIState *s)
1012{
1013    int res;
1014    PCIDevice *d = PCI_DEVICE(s);
1015
1016    res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS,
1017                   PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK);
1018    if (res < 0) {
1019        trace_pvscsi_init_msi_fail(res);
1020        s->msi_used = false;
1021    } else {
1022        s->msi_used = true;
1023    }
1024
1025    return s->msi_used;
1026}
1027
1028static void
1029pvscsi_cleanup_msi(PVSCSIState *s)
1030{
1031    PCIDevice *d = PCI_DEVICE(s);
1032
1033    if (s->msi_used) {
1034        msi_uninit(d);
1035    }
1036}
1037
1038static const MemoryRegionOps pvscsi_ops = {
1039        .read = pvscsi_io_read,
1040        .write = pvscsi_io_write,
1041        .endianness = DEVICE_LITTLE_ENDIAN,
1042        .impl = {
1043                .min_access_size = 4,
1044                .max_access_size = 4,
1045        },
1046};
1047
1048static const struct SCSIBusInfo pvscsi_scsi_info = {
1049        .tcq = true,
1050        .max_target = PVSCSI_MAX_DEVS,
1051        .max_channel = 0,
1052        .max_lun = 0,
1053
1054        .get_sg_list = pvscsi_get_sg_list,
1055        .complete = pvscsi_command_complete,
1056        .cancel = pvscsi_request_cancelled,
1057        .hotplug = pvscsi_hotplug,
1058        .hot_unplug = pvscsi_hot_unplug,
1059};
1060
1061static int
1062pvscsi_init(PCIDevice *pci_dev)
1063{
1064    PVSCSIState *s = PVSCSI(pci_dev);
1065
1066    trace_pvscsi_state("init");
1067
1068    /* PCI subsystem ID */
1069    pci_dev->config[PCI_SUBSYSTEM_ID] = 0x00;
1070    pci_dev->config[PCI_SUBSYSTEM_ID + 1] = 0x10;
1071
1072    /* PCI latency timer = 255 */
1073    pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
1074
1075    /* Interrupt pin A */
1076    pci_config_set_interrupt_pin(pci_dev->config, 1);
1077
1078    memory_region_init_io(&s->io_space, &pvscsi_ops, s,
1079                          "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
1080    pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
1081
1082    pvscsi_init_msi(s);
1083
1084    s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
1085    if (!s->completion_worker) {
1086        pvscsi_cleanup_msi(s);
1087        memory_region_destroy(&s->io_space);
1088        return -ENOMEM;
1089    }
1090
1091    scsi_bus_new(&s->bus, &pci_dev->qdev, &pvscsi_scsi_info, NULL);
1092    pvscsi_reset_state(s);
1093
1094    return 0;
1095}
1096
1097static void
1098pvscsi_uninit(PCIDevice *pci_dev)
1099{
1100    PVSCSIState *s = PVSCSI(pci_dev);
1101
1102    trace_pvscsi_state("uninit");
1103    qemu_bh_delete(s->completion_worker);
1104
1105    pvscsi_cleanup_msi(s);
1106
1107    memory_region_destroy(&s->io_space);
1108}
1109
1110static void
1111pvscsi_reset(DeviceState *dev)
1112{
1113    PCIDevice *d = PCI_DEVICE(dev);
1114    PVSCSIState *s = PVSCSI(d);
1115
1116    trace_pvscsi_state("reset");
1117    pvscsi_reset_adapter(s);
1118}
1119
1120static void
1121pvscsi_pre_save(void *opaque)
1122{
1123    PVSCSIState *s = (PVSCSIState *) opaque;
1124
1125    trace_pvscsi_state("presave");
1126
1127    assert(QTAILQ_EMPTY(&s->pending_queue));
1128    assert(QTAILQ_EMPTY(&s->completion_queue));
1129}
1130
1131static int
1132pvscsi_post_load(void *opaque, int version_id)
1133{
1134    trace_pvscsi_state("postload");
1135    return 0;
1136}
1137
1138static const VMStateDescription vmstate_pvscsi = {
1139    .name = TYPE_PVSCSI,
1140    .version_id = 0,
1141    .minimum_version_id = 0,
1142    .minimum_version_id_old = 0,
1143    .pre_save = pvscsi_pre_save,
1144    .post_load = pvscsi_post_load,
1145    .fields      = (VMStateField[]) {
1146        VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
1147        VMSTATE_UINT8(msi_used, PVSCSIState),
1148        VMSTATE_UINT32(resetting, PVSCSIState),
1149        VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
1150        VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
1151        VMSTATE_UINT64(reg_command_status, PVSCSIState),
1152        VMSTATE_UINT64(curr_cmd, PVSCSIState),
1153        VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
1154        VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
1155                             ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
1156        VMSTATE_UINT8(rings_info_valid, PVSCSIState),
1157        VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
1158        VMSTATE_UINT8(use_msg, PVSCSIState),
1159
1160        VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
1161        VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
1162        VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
1163        VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
1164                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1165        VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
1166                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1167        VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
1168        VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
1169
1170        VMSTATE_END_OF_LIST()
1171    }
1172};
1173
1174static void
1175pvscsi_write_config(PCIDevice *pci, uint32_t addr, uint32_t val, int len)
1176{
1177    pci_default_write_config(pci, addr, val, len);
1178    msi_write_config(pci, addr, val, len);
1179}
1180
1181static Property pvscsi_properties[] = {
1182    DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
1183    DEFINE_PROP_END_OF_LIST(),
1184};
1185
1186static void pvscsi_class_init(ObjectClass *klass, void *data)
1187{
1188    DeviceClass *dc = DEVICE_CLASS(klass);
1189    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1190
1191    k->init = pvscsi_init;
1192    k->exit = pvscsi_uninit;
1193    k->vendor_id = PCI_VENDOR_ID_VMWARE;
1194    k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
1195    k->class_id = PCI_CLASS_STORAGE_SCSI;
1196    k->subsystem_id = 0x1000;
1197    dc->reset = pvscsi_reset;
1198    dc->vmsd = &vmstate_pvscsi;
1199    dc->props = pvscsi_properties;
1200    k->config_write = pvscsi_write_config;
1201}
1202
1203static const TypeInfo pvscsi_info = {
1204    .name          = "pvscsi",
1205    .parent        = TYPE_PCI_DEVICE,
1206    .instance_size = sizeof(PVSCSIState),
1207    .class_init    = pvscsi_class_init,
1208};
1209
1210static void
1211pvscsi_register_types(void)
1212{
1213    type_register_static(&pvscsi_info);
1214}
1215
1216type_init(pvscsi_register_types);
1217