qemu/hw/scsi/vmw_pvscsi.c
<<
>>
Prefs
   1/*
   2 * QEMU VMWARE PVSCSI paravirtual SCSI bus
   3 *
   4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
   5 *
   6 * Developed by Daynix Computing LTD (http://www.daynix.com)
   7 *
   8 * Based on implementation by Paolo Bonzini
   9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
  10 *
  11 * Authors:
  12 * Paolo Bonzini <pbonzini@redhat.com>
  13 * Dmitry Fleytman <dmitry@daynix.com>
  14 * Yan Vugenfirer <yan@daynix.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.
  17 * See the COPYING file in the top-level directory.
  18 *
  19 * NOTE about MSI-X:
  20 * MSI-X support has been removed for the moment because it leads Windows OS
  21 * to crash on startup. The crash happens because Windows driver requires
  22 * MSI-X shared memory to be part of the same BAR used for rings state
  23 * registers, etc. This is not supported by QEMU infrastructure so separate
  24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
  25 *
  26 */
  27
  28#include "hw/scsi/scsi.h"
  29#include <block/scsi.h>
  30#include "hw/pci/msi.h"
  31#include "vmw_pvscsi.h"
  32#include "trace.h"
  33
  34
  35#define PVSCSI_MSI_OFFSET        (0x50)
  36#define PVSCSI_USE_64BIT         (true)
  37#define PVSCSI_PER_VECTOR_MASK   (false)
  38
  39#define PVSCSI_MAX_DEVS                   (64)
  40#define PVSCSI_MSIX_NUM_VECTORS           (1)
  41
  42#define PVSCSI_MAX_CMD_DATA_WORDS \
  43    (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
  44
  45#define RS_GET_FIELD(rs_pa, field) \
  46    (ldl_le_phys(&address_space_memory, \
  47                 rs_pa + offsetof(struct PVSCSIRingsState, field)))
  48#define RS_SET_FIELD(rs_pa, field, val) \
  49    (stl_le_phys(&address_space_memory, \
  50                 rs_pa + offsetof(struct PVSCSIRingsState, field), val))
  51
  52#define TYPE_PVSCSI "pvscsi"
  53#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
  54
  55typedef struct PVSCSIRingInfo {
  56    uint64_t            rs_pa;
  57    uint32_t            txr_len_mask;
  58    uint32_t            rxr_len_mask;
  59    uint32_t            msg_len_mask;
  60    uint64_t            req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  61    uint64_t            cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  62    uint64_t            msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
  63    uint64_t            consumed_ptr;
  64    uint64_t            filled_cmp_ptr;
  65    uint64_t            filled_msg_ptr;
  66} PVSCSIRingInfo;
  67
  68typedef struct PVSCSISGState {
  69    hwaddr elemAddr;
  70    hwaddr dataAddr;
  71    uint32_t resid;
  72} PVSCSISGState;
  73
  74typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
  75
  76typedef struct {
  77    PCIDevice parent_obj;
  78    MemoryRegion io_space;
  79    SCSIBus bus;
  80    QEMUBH *completion_worker;
  81    PVSCSIRequestList pending_queue;
  82    PVSCSIRequestList completion_queue;
  83
  84    uint64_t reg_interrupt_status;        /* Interrupt status register value */
  85    uint64_t reg_interrupt_enabled;       /* Interrupt mask register value   */
  86    uint64_t reg_command_status;          /* Command status register value   */
  87
  88    /* Command data adoption mechanism */
  89    uint64_t curr_cmd;                   /* Last command arrived             */
  90    uint32_t curr_cmd_data_cntr;         /* Amount of data for last command  */
  91
  92    /* Collector for current command data */
  93    uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
  94
  95    uint8_t rings_info_valid;            /* Whether data rings initialized   */
  96    uint8_t msg_ring_info_valid;         /* Whether message ring initialized */
  97    uint8_t use_msg;                     /* Whether to use message ring      */
  98
  99    uint8_t msi_used;    /* Whether MSI support was installed successfully   */
 100
 101    PVSCSIRingInfo rings;                /* Data transfer rings manager      */
 102    uint32_t resetting;                  /* Reset in progress                */
 103} PVSCSIState;
 104
 105typedef struct PVSCSIRequest {
 106    SCSIRequest *sreq;
 107    PVSCSIState *dev;
 108    uint8_t sense_key;
 109    uint8_t completed;
 110    int lun;
 111    QEMUSGList sgl;
 112    PVSCSISGState sg;
 113    struct PVSCSIRingReqDesc req;
 114    struct PVSCSIRingCmpDesc cmp;
 115    QTAILQ_ENTRY(PVSCSIRequest) next;
 116} PVSCSIRequest;
 117
 118/* Integer binary logarithm */
 119static int
 120pvscsi_log2(uint32_t input)
 121{
 122    int log = 0;
 123    assert(input > 0);
 124    while (input >> ++log) {
 125    }
 126    return log;
 127}
 128
 129static void
 130pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
 131{
 132    int i;
 133    uint32_t txr_len_log2, rxr_len_log2;
 134    uint32_t req_ring_size, cmp_ring_size;
 135    m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
 136
 137    req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 138    cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 139    txr_len_log2 = pvscsi_log2(req_ring_size - 1);
 140    rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
 141
 142    m->txr_len_mask = MASK(txr_len_log2);
 143    m->rxr_len_mask = MASK(rxr_len_log2);
 144
 145    m->consumed_ptr = 0;
 146    m->filled_cmp_ptr = 0;
 147
 148    for (i = 0; i < ri->reqRingNumPages; i++) {
 149        m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
 150    }
 151
 152    for (i = 0; i < ri->cmpRingNumPages; i++) {
 153        m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
 154    }
 155
 156    RS_SET_FIELD(m->rs_pa, reqProdIdx, 0);
 157    RS_SET_FIELD(m->rs_pa, reqConsIdx, 0);
 158    RS_SET_FIELD(m->rs_pa, reqNumEntriesLog2, txr_len_log2);
 159
 160    RS_SET_FIELD(m->rs_pa, cmpProdIdx, 0);
 161    RS_SET_FIELD(m->rs_pa, cmpConsIdx, 0);
 162    RS_SET_FIELD(m->rs_pa, cmpNumEntriesLog2, rxr_len_log2);
 163
 164    trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
 165
 166    /* Flush ring state page changes */
 167    smp_wmb();
 168}
 169
 170static void
 171pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
 172{
 173    int i;
 174    uint32_t len_log2;
 175    uint32_t ring_size;
 176
 177    ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 178    len_log2 = pvscsi_log2(ring_size - 1);
 179
 180    m->msg_len_mask = MASK(len_log2);
 181
 182    m->filled_msg_ptr = 0;
 183
 184    for (i = 0; i < ri->numPages; i++) {
 185        m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
 186    }
 187
 188    RS_SET_FIELD(m->rs_pa, msgProdIdx, 0);
 189    RS_SET_FIELD(m->rs_pa, msgConsIdx, 0);
 190    RS_SET_FIELD(m->rs_pa, msgNumEntriesLog2, len_log2);
 191
 192    trace_pvscsi_ring_init_msg(len_log2);
 193
 194    /* Flush ring state page changes */
 195    smp_wmb();
 196}
 197
 198static void
 199pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
 200{
 201    mgr->rs_pa = 0;
 202    mgr->txr_len_mask = 0;
 203    mgr->rxr_len_mask = 0;
 204    mgr->msg_len_mask = 0;
 205    mgr->consumed_ptr = 0;
 206    mgr->filled_cmp_ptr = 0;
 207    mgr->filled_msg_ptr = 0;
 208    memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
 209    memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
 210    memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
 211}
 212
 213static hwaddr
 214pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
 215{
 216    uint32_t ready_ptr = RS_GET_FIELD(mgr->rs_pa, reqProdIdx);
 217
 218    if (ready_ptr != mgr->consumed_ptr) {
 219        uint32_t next_ready_ptr =
 220            mgr->consumed_ptr++ & mgr->txr_len_mask;
 221        uint32_t next_ready_page =
 222            next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 223        uint32_t inpage_idx =
 224            next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
 225
 226        return mgr->req_ring_pages_pa[next_ready_page] +
 227               inpage_idx * sizeof(PVSCSIRingReqDesc);
 228    } else {
 229        return 0;
 230    }
 231}
 232
 233static void
 234pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
 235{
 236    RS_SET_FIELD(mgr->rs_pa, reqConsIdx, mgr->consumed_ptr);
 237}
 238
 239static hwaddr
 240pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
 241{
 242    /*
 243     * According to Linux driver code it explicitly verifies that number
 244     * of requests being processed by device is less then the size of
 245     * completion queue, so device may omit completion queue overflow
 246     * conditions check. We assume that this is true for other (Windows)
 247     * drivers as well.
 248     */
 249
 250    uint32_t free_cmp_ptr =
 251        mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
 252    uint32_t free_cmp_page =
 253        free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 254    uint32_t inpage_idx =
 255        free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
 256    return mgr->cmp_ring_pages_pa[free_cmp_page] +
 257           inpage_idx * sizeof(PVSCSIRingCmpDesc);
 258}
 259
 260static hwaddr
 261pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
 262{
 263    uint32_t free_msg_ptr =
 264        mgr->filled_msg_ptr++ & mgr->msg_len_mask;
 265    uint32_t free_msg_page =
 266        free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 267    uint32_t inpage_idx =
 268        free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
 269    return mgr->msg_ring_pages_pa[free_msg_page] +
 270           inpage_idx * sizeof(PVSCSIRingMsgDesc);
 271}
 272
 273static void
 274pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
 275{
 276    /* Flush descriptor changes */
 277    smp_wmb();
 278
 279    trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
 280
 281    RS_SET_FIELD(mgr->rs_pa, cmpProdIdx, mgr->filled_cmp_ptr);
 282}
 283
 284static bool
 285pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
 286{
 287    uint32_t prodIdx = RS_GET_FIELD(mgr->rs_pa, msgProdIdx);
 288    uint32_t consIdx = RS_GET_FIELD(mgr->rs_pa, msgConsIdx);
 289
 290    return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
 291}
 292
 293static void
 294pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
 295{
 296    /* Flush descriptor changes */
 297    smp_wmb();
 298
 299    trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
 300
 301    RS_SET_FIELD(mgr->rs_pa, msgProdIdx, mgr->filled_msg_ptr);
 302}
 303
 304static void
 305pvscsi_reset_state(PVSCSIState *s)
 306{
 307    s->curr_cmd = PVSCSI_CMD_FIRST;
 308    s->curr_cmd_data_cntr = 0;
 309    s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 310    s->reg_interrupt_status = 0;
 311    pvscsi_ring_cleanup(&s->rings);
 312    s->rings_info_valid = FALSE;
 313    s->msg_ring_info_valid = FALSE;
 314    QTAILQ_INIT(&s->pending_queue);
 315    QTAILQ_INIT(&s->completion_queue);
 316}
 317
 318static void
 319pvscsi_update_irq_status(PVSCSIState *s)
 320{
 321    PCIDevice *d = PCI_DEVICE(s);
 322    bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
 323
 324    trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
 325                                  s->reg_interrupt_status);
 326
 327    if (s->msi_used && msi_enabled(d)) {
 328        if (should_raise) {
 329            trace_pvscsi_update_irq_msi();
 330            msi_notify(d, PVSCSI_VECTOR_COMPLETION);
 331        }
 332        return;
 333    }
 334
 335    pci_set_irq(d, !!should_raise);
 336}
 337
 338static void
 339pvscsi_raise_completion_interrupt(PVSCSIState *s)
 340{
 341    s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
 342
 343    /* Memory barrier to flush interrupt status register changes*/
 344    smp_wmb();
 345
 346    pvscsi_update_irq_status(s);
 347}
 348
 349static void
 350pvscsi_raise_message_interrupt(PVSCSIState *s)
 351{
 352    s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
 353
 354    /* Memory barrier to flush interrupt status register changes*/
 355    smp_wmb();
 356
 357    pvscsi_update_irq_status(s);
 358}
 359
 360static void
 361pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
 362{
 363    hwaddr cmp_descr_pa;
 364
 365    cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
 366    trace_pvscsi_cmp_ring_put(cmp_descr_pa);
 367    cpu_physical_memory_write(cmp_descr_pa, (void *)cmp_desc,
 368                              sizeof(*cmp_desc));
 369}
 370
 371static void
 372pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
 373{
 374    hwaddr msg_descr_pa;
 375
 376    msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
 377    trace_pvscsi_msg_ring_put(msg_descr_pa);
 378    cpu_physical_memory_write(msg_descr_pa, (void *)msg_desc,
 379                              sizeof(*msg_desc));
 380}
 381
 382static void
 383pvscsi_process_completion_queue(void *opaque)
 384{
 385    PVSCSIState *s = opaque;
 386    PVSCSIRequest *pvscsi_req;
 387    bool has_completed = false;
 388
 389    while (!QTAILQ_EMPTY(&s->completion_queue)) {
 390        pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
 391        QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
 392        pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
 393        g_free(pvscsi_req);
 394        has_completed = true;
 395    }
 396
 397    if (has_completed) {
 398        pvscsi_ring_flush_cmp(&s->rings);
 399        pvscsi_raise_completion_interrupt(s);
 400    }
 401}
 402
 403static void
 404pvscsi_reset_adapter(PVSCSIState *s)
 405{
 406    s->resetting++;
 407    qbus_reset_all_fn(&s->bus);
 408    s->resetting--;
 409    pvscsi_process_completion_queue(s);
 410    assert(QTAILQ_EMPTY(&s->pending_queue));
 411    pvscsi_reset_state(s);
 412}
 413
 414static void
 415pvscsi_schedule_completion_processing(PVSCSIState *s)
 416{
 417    /* Try putting more complete requests on the ring. */
 418    if (!QTAILQ_EMPTY(&s->completion_queue)) {
 419        qemu_bh_schedule(s->completion_worker);
 420    }
 421}
 422
 423static void
 424pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
 425{
 426    assert(!r->completed);
 427
 428    trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
 429                                  r->sense_key);
 430    if (r->sreq != NULL) {
 431        scsi_req_unref(r->sreq);
 432        r->sreq = NULL;
 433    }
 434    r->completed = 1;
 435    QTAILQ_REMOVE(&s->pending_queue, r, next);
 436    QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
 437    pvscsi_schedule_completion_processing(s);
 438}
 439
 440static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
 441{
 442    PVSCSIRequest *req = r->hba_private;
 443
 444    trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
 445
 446    return &req->sgl;
 447}
 448
 449static void
 450pvscsi_get_next_sg_elem(PVSCSISGState *sg)
 451{
 452    struct PVSCSISGElement elem;
 453
 454    cpu_physical_memory_read(sg->elemAddr, (void *)&elem, sizeof(elem));
 455    if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
 456        /*
 457            * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
 458            * header file but its value is unknown. This flag requires
 459            * additional processing, so we put warning here to catch it
 460            * some day and make proper implementation
 461            */
 462        trace_pvscsi_get_next_sg_elem(elem.flags);
 463    }
 464
 465    sg->elemAddr += sizeof(elem);
 466    sg->dataAddr = elem.addr;
 467    sg->resid = elem.length;
 468}
 469
 470static void
 471pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
 472{
 473    r->cmp.senseLen = MIN(r->req.senseLen, len);
 474    r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
 475    cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
 476}
 477
 478static void
 479pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
 480{
 481    PVSCSIRequest *pvscsi_req = req->hba_private;
 482    PVSCSIState *s;
 483
 484    if (!pvscsi_req) {
 485        trace_pvscsi_command_complete_not_found(req->tag);
 486        return;
 487    }
 488    s = pvscsi_req->dev;
 489
 490    if (resid) {
 491        /* Short transfer.  */
 492        trace_pvscsi_command_complete_data_run();
 493        pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
 494    }
 495
 496    pvscsi_req->cmp.scsiStatus = status;
 497    if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
 498        uint8_t sense[SCSI_SENSE_BUF_SIZE];
 499        int sense_len =
 500            scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
 501
 502        trace_pvscsi_command_complete_sense_len(sense_len);
 503        pvscsi_write_sense(pvscsi_req, sense, sense_len);
 504    }
 505    qemu_sglist_destroy(&pvscsi_req->sgl);
 506    pvscsi_complete_request(s, pvscsi_req);
 507}
 508
 509static void
 510pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
 511{
 512    if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
 513        PVSCSIMsgDescDevStatusChanged msg = {0};
 514
 515        msg.type = msg_type;
 516        msg.bus = dev->channel;
 517        msg.target = dev->id;
 518        msg.lun[1] = dev->lun;
 519
 520        pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
 521        pvscsi_ring_flush_msg(&s->rings);
 522        pvscsi_raise_message_interrupt(s);
 523    }
 524}
 525
 526static void
 527pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
 528{
 529    PVSCSIState *s = PVSCSI(hotplug_dev);
 530
 531    pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED);
 532}
 533
 534static void
 535pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
 536{
 537    PVSCSIState *s = PVSCSI(hotplug_dev);
 538
 539    pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED);
 540    qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
 541}
 542
 543static void
 544pvscsi_request_cancelled(SCSIRequest *req)
 545{
 546    PVSCSIRequest *pvscsi_req = req->hba_private;
 547    PVSCSIState *s = pvscsi_req->dev;
 548
 549    if (pvscsi_req->completed) {
 550        return;
 551    }
 552
 553   if (pvscsi_req->dev->resetting) {
 554       pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
 555    } else {
 556       pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 557    }
 558
 559    pvscsi_complete_request(s, pvscsi_req);
 560}
 561
 562static SCSIDevice*
 563pvscsi_device_find(PVSCSIState *s, int channel, int target,
 564                   uint8_t *requested_lun, uint8_t *target_lun)
 565{
 566    if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
 567        requested_lun[4] || requested_lun[5] || requested_lun[6] ||
 568        requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
 569        return NULL;
 570    } else {
 571        *target_lun = requested_lun[1];
 572        return scsi_device_find(&s->bus, channel, target, *target_lun);
 573    }
 574}
 575
 576static PVSCSIRequest *
 577pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
 578                                struct PVSCSIRingReqDesc *descr)
 579{
 580    PVSCSIRequest *pvscsi_req;
 581    uint8_t lun;
 582
 583    pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
 584    pvscsi_req->dev = s;
 585    pvscsi_req->req = *descr;
 586    pvscsi_req->cmp.context = pvscsi_req->req.context;
 587    QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
 588
 589    *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
 590    if (*d) {
 591        pvscsi_req->lun = lun;
 592    }
 593
 594    return pvscsi_req;
 595}
 596
 597static void
 598pvscsi_convert_sglist(PVSCSIRequest *r)
 599{
 600    int chunk_size;
 601    uint64_t data_length = r->req.dataLen;
 602    PVSCSISGState sg = r->sg;
 603    while (data_length) {
 604        while (!sg.resid) {
 605            pvscsi_get_next_sg_elem(&sg);
 606            trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
 607                                        r->sg.resid);
 608        }
 609        assert(data_length > 0);
 610        chunk_size = MIN((unsigned) data_length, sg.resid);
 611        if (chunk_size) {
 612            qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
 613        }
 614
 615        sg.dataAddr += chunk_size;
 616        data_length -= chunk_size;
 617        sg.resid -= chunk_size;
 618    }
 619}
 620
 621static void
 622pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
 623{
 624    PCIDevice *d = PCI_DEVICE(s);
 625
 626    pci_dma_sglist_init(&r->sgl, d, 1);
 627    if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 628        pvscsi_convert_sglist(r);
 629    } else {
 630        qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
 631    }
 632}
 633
 634static void
 635pvscsi_process_request_descriptor(PVSCSIState *s,
 636                                  struct PVSCSIRingReqDesc *descr)
 637{
 638    SCSIDevice *d;
 639    PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
 640    int64_t n;
 641
 642    trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
 643
 644    if (!d) {
 645        r->cmp.hostStatus = BTSTAT_SELTIMEO;
 646        trace_pvscsi_process_req_descr_unknown_device();
 647        pvscsi_complete_request(s, r);
 648        return;
 649    }
 650
 651    if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
 652        r->sg.elemAddr = descr->dataAddr;
 653    }
 654
 655    r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r);
 656    if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
 657        (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
 658        r->cmp.hostStatus = BTSTAT_BADMSG;
 659        trace_pvscsi_process_req_descr_invalid_dir();
 660        scsi_req_cancel(r->sreq);
 661        return;
 662    }
 663    if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
 664        (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
 665        r->cmp.hostStatus = BTSTAT_BADMSG;
 666        trace_pvscsi_process_req_descr_invalid_dir();
 667        scsi_req_cancel(r->sreq);
 668        return;
 669    }
 670
 671    pvscsi_build_sglist(s, r);
 672    n = scsi_req_enqueue(r->sreq);
 673
 674    if (n) {
 675        scsi_req_continue(r->sreq);
 676    }
 677}
 678
 679static void
 680pvscsi_process_io(PVSCSIState *s)
 681{
 682    PVSCSIRingReqDesc descr;
 683    hwaddr next_descr_pa;
 684
 685    assert(s->rings_info_valid);
 686    while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
 687
 688        /* Only read after production index verification */
 689        smp_rmb();
 690
 691        trace_pvscsi_process_io(next_descr_pa);
 692        cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
 693        pvscsi_process_request_descriptor(s, &descr);
 694    }
 695
 696    pvscsi_ring_flush_req(&s->rings);
 697}
 698
 699static void
 700pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
 701{
 702    int i;
 703    trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
 704
 705    trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
 706    for (i = 0; i < rc->reqRingNumPages; i++) {
 707        trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
 708    }
 709
 710    trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
 711    for (i = 0; i < rc->cmpRingNumPages; i++) {
 712        trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]);
 713    }
 714}
 715
 716static uint64_t
 717pvscsi_on_cmd_config(PVSCSIState *s)
 718{
 719    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
 720    return PVSCSI_COMMAND_PROCESSING_FAILED;
 721}
 722
 723static uint64_t
 724pvscsi_on_cmd_unplug(PVSCSIState *s)
 725{
 726    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
 727    return PVSCSI_COMMAND_PROCESSING_FAILED;
 728}
 729
 730static uint64_t
 731pvscsi_on_issue_scsi(PVSCSIState *s)
 732{
 733    trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
 734    return PVSCSI_COMMAND_PROCESSING_FAILED;
 735}
 736
 737static uint64_t
 738pvscsi_on_cmd_setup_rings(PVSCSIState *s)
 739{
 740    PVSCSICmdDescSetupRings *rc =
 741        (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
 742
 743    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
 744
 745    pvscsi_dbg_dump_tx_rings_config(rc);
 746    pvscsi_ring_init_data(&s->rings, rc);
 747    s->rings_info_valid = TRUE;
 748    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 749}
 750
 751static uint64_t
 752pvscsi_on_cmd_abort(PVSCSIState *s)
 753{
 754    PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
 755    PVSCSIRequest *r, *next;
 756
 757    trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
 758
 759    QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
 760        if (r->req.context == cmd->context) {
 761            break;
 762        }
 763    }
 764    if (r) {
 765        assert(!r->completed);
 766        r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
 767        scsi_req_cancel(r->sreq);
 768    }
 769
 770    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 771}
 772
 773static uint64_t
 774pvscsi_on_cmd_unknown(PVSCSIState *s)
 775{
 776    trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
 777    return PVSCSI_COMMAND_PROCESSING_FAILED;
 778}
 779
 780static uint64_t
 781pvscsi_on_cmd_reset_device(PVSCSIState *s)
 782{
 783    uint8_t target_lun = 0;
 784    struct PVSCSICmdDescResetDevice *cmd =
 785        (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
 786    SCSIDevice *sdev;
 787
 788    sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
 789
 790    trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
 791
 792    if (sdev != NULL) {
 793        s->resetting++;
 794        device_reset(&sdev->qdev);
 795        s->resetting--;
 796        return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 797    }
 798
 799    return PVSCSI_COMMAND_PROCESSING_FAILED;
 800}
 801
 802static uint64_t
 803pvscsi_on_cmd_reset_bus(PVSCSIState *s)
 804{
 805    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
 806
 807    s->resetting++;
 808    qbus_reset_all_fn(&s->bus);
 809    s->resetting--;
 810    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 811}
 812
 813static uint64_t
 814pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
 815{
 816    PVSCSICmdDescSetupMsgRing *rc =
 817        (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
 818
 819    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
 820
 821    if (!s->use_msg) {
 822        return PVSCSI_COMMAND_PROCESSING_FAILED;
 823    }
 824
 825    if (s->rings_info_valid) {
 826        pvscsi_ring_init_msg(&s->rings, rc);
 827        s->msg_ring_info_valid = TRUE;
 828    }
 829    return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
 830}
 831
 832static uint64_t
 833pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
 834{
 835    trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
 836
 837    pvscsi_reset_adapter(s);
 838    return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
 839}
 840
 841static const struct {
 842    int       data_size;
 843    uint64_t  (*handler_fn)(PVSCSIState *s);
 844} pvscsi_commands[] = {
 845    [PVSCSI_CMD_FIRST] = {
 846        .data_size = 0,
 847        .handler_fn = pvscsi_on_cmd_unknown,
 848    },
 849
 850    /* Not implemented, data size defined based on what arrives on windows */
 851    [PVSCSI_CMD_CONFIG] = {
 852        .data_size = 6 * sizeof(uint32_t),
 853        .handler_fn = pvscsi_on_cmd_config,
 854    },
 855
 856    /* Command not implemented, data size is unknown */
 857    [PVSCSI_CMD_ISSUE_SCSI] = {
 858        .data_size = 0,
 859        .handler_fn = pvscsi_on_issue_scsi,
 860    },
 861
 862    /* Command not implemented, data size is unknown */
 863    [PVSCSI_CMD_DEVICE_UNPLUG] = {
 864        .data_size = 0,
 865        .handler_fn = pvscsi_on_cmd_unplug,
 866    },
 867
 868    [PVSCSI_CMD_SETUP_RINGS] = {
 869        .data_size = sizeof(PVSCSICmdDescSetupRings),
 870        .handler_fn = pvscsi_on_cmd_setup_rings,
 871    },
 872
 873    [PVSCSI_CMD_RESET_DEVICE] = {
 874        .data_size = sizeof(struct PVSCSICmdDescResetDevice),
 875        .handler_fn = pvscsi_on_cmd_reset_device,
 876    },
 877
 878    [PVSCSI_CMD_RESET_BUS] = {
 879        .data_size = 0,
 880        .handler_fn = pvscsi_on_cmd_reset_bus,
 881    },
 882
 883    [PVSCSI_CMD_SETUP_MSG_RING] = {
 884        .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
 885        .handler_fn = pvscsi_on_cmd_setup_msg_ring,
 886    },
 887
 888    [PVSCSI_CMD_ADAPTER_RESET] = {
 889        .data_size = 0,
 890        .handler_fn = pvscsi_on_cmd_adapter_reset,
 891    },
 892
 893    [PVSCSI_CMD_ABORT_CMD] = {
 894        .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
 895        .handler_fn = pvscsi_on_cmd_abort,
 896    },
 897};
 898
 899static void
 900pvscsi_do_command_processing(PVSCSIState *s)
 901{
 902    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
 903
 904    assert(s->curr_cmd < PVSCSI_CMD_LAST);
 905    if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
 906        s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
 907        s->curr_cmd = PVSCSI_CMD_FIRST;
 908        s->curr_cmd_data_cntr   = 0;
 909    }
 910}
 911
 912static void
 913pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
 914{
 915    size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
 916
 917    assert(bytes_arrived < sizeof(s->curr_cmd_data));
 918    s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
 919
 920    pvscsi_do_command_processing(s);
 921}
 922
 923static void
 924pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
 925{
 926    if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
 927        s->curr_cmd = cmd_id;
 928    } else {
 929        s->curr_cmd = PVSCSI_CMD_FIRST;
 930        trace_pvscsi_on_cmd_unknown(cmd_id);
 931    }
 932
 933    s->curr_cmd_data_cntr = 0;
 934    s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
 935
 936    pvscsi_do_command_processing(s);
 937}
 938
 939static void
 940pvscsi_io_write(void *opaque, hwaddr addr,
 941                uint64_t val, unsigned size)
 942{
 943    PVSCSIState *s = opaque;
 944
 945    switch (addr) {
 946    case PVSCSI_REG_OFFSET_COMMAND:
 947        pvscsi_on_command(s, val);
 948        break;
 949
 950    case PVSCSI_REG_OFFSET_COMMAND_DATA:
 951        pvscsi_on_command_data(s, (uint32_t) val);
 952        break;
 953
 954    case PVSCSI_REG_OFFSET_INTR_STATUS:
 955        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
 956        s->reg_interrupt_status &= ~val;
 957        pvscsi_update_irq_status(s);
 958        pvscsi_schedule_completion_processing(s);
 959        break;
 960
 961    case PVSCSI_REG_OFFSET_INTR_MASK:
 962        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
 963        s->reg_interrupt_enabled = val;
 964        pvscsi_update_irq_status(s);
 965        break;
 966
 967    case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
 968        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
 969        pvscsi_process_io(s);
 970        break;
 971
 972    case PVSCSI_REG_OFFSET_KICK_RW_IO:
 973        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
 974        pvscsi_process_io(s);
 975        break;
 976
 977    case PVSCSI_REG_OFFSET_DEBUG:
 978        trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
 979        break;
 980
 981    default:
 982        trace_pvscsi_io_write_unknown(addr, size, val);
 983        break;
 984    }
 985
 986}
 987
 988static uint64_t
 989pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
 990{
 991    PVSCSIState *s = opaque;
 992
 993    switch (addr) {
 994    case PVSCSI_REG_OFFSET_INTR_STATUS:
 995        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
 996                             s->reg_interrupt_status);
 997        return s->reg_interrupt_status;
 998
 999    case PVSCSI_REG_OFFSET_INTR_MASK:
1000        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
1001                             s->reg_interrupt_status);
1002        return s->reg_interrupt_enabled;
1003
1004    case PVSCSI_REG_OFFSET_COMMAND_STATUS:
1005        trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
1006                             s->reg_interrupt_status);
1007        return s->reg_command_status;
1008
1009    default:
1010        trace_pvscsi_io_read_unknown(addr, size);
1011        return 0;
1012    }
1013}
1014
1015
1016static bool
1017pvscsi_init_msi(PVSCSIState *s)
1018{
1019    int res;
1020    PCIDevice *d = PCI_DEVICE(s);
1021
1022    res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS,
1023                   PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK);
1024    if (res < 0) {
1025        trace_pvscsi_init_msi_fail(res);
1026        s->msi_used = false;
1027    } else {
1028        s->msi_used = true;
1029    }
1030
1031    return s->msi_used;
1032}
1033
1034static void
1035pvscsi_cleanup_msi(PVSCSIState *s)
1036{
1037    PCIDevice *d = PCI_DEVICE(s);
1038
1039    if (s->msi_used) {
1040        msi_uninit(d);
1041    }
1042}
1043
1044static const MemoryRegionOps pvscsi_ops = {
1045        .read = pvscsi_io_read,
1046        .write = pvscsi_io_write,
1047        .endianness = DEVICE_LITTLE_ENDIAN,
1048        .impl = {
1049                .min_access_size = 4,
1050                .max_access_size = 4,
1051        },
1052};
1053
1054static const struct SCSIBusInfo pvscsi_scsi_info = {
1055        .tcq = true,
1056        .max_target = PVSCSI_MAX_DEVS,
1057        .max_channel = 0,
1058        .max_lun = 0,
1059
1060        .get_sg_list = pvscsi_get_sg_list,
1061        .complete = pvscsi_command_complete,
1062        .cancel = pvscsi_request_cancelled,
1063};
1064
1065static int
1066pvscsi_init(PCIDevice *pci_dev)
1067{
1068    PVSCSIState *s = PVSCSI(pci_dev);
1069
1070    trace_pvscsi_state("init");
1071
1072    /* PCI subsystem ID */
1073    pci_dev->config[PCI_SUBSYSTEM_ID] = 0x00;
1074    pci_dev->config[PCI_SUBSYSTEM_ID + 1] = 0x10;
1075
1076    /* PCI latency timer = 255 */
1077    pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
1078
1079    /* Interrupt pin A */
1080    pci_config_set_interrupt_pin(pci_dev->config, 1);
1081
1082    memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s,
1083                          "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
1084    pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
1085
1086    pvscsi_init_msi(s);
1087
1088    s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
1089    if (!s->completion_worker) {
1090        pvscsi_cleanup_msi(s);
1091        return -ENOMEM;
1092    }
1093
1094    scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev),
1095                 &pvscsi_scsi_info, NULL);
1096    /* override default SCSI bus hotplug-handler, with pvscsi's one */
1097    qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(s), &error_abort);
1098    pvscsi_reset_state(s);
1099
1100    return 0;
1101}
1102
1103static void
1104pvscsi_uninit(PCIDevice *pci_dev)
1105{
1106    PVSCSIState *s = PVSCSI(pci_dev);
1107
1108    trace_pvscsi_state("uninit");
1109    qemu_bh_delete(s->completion_worker);
1110
1111    pvscsi_cleanup_msi(s);
1112}
1113
1114static void
1115pvscsi_reset(DeviceState *dev)
1116{
1117    PCIDevice *d = PCI_DEVICE(dev);
1118    PVSCSIState *s = PVSCSI(d);
1119
1120    trace_pvscsi_state("reset");
1121    pvscsi_reset_adapter(s);
1122}
1123
1124static void
1125pvscsi_pre_save(void *opaque)
1126{
1127    PVSCSIState *s = (PVSCSIState *) opaque;
1128
1129    trace_pvscsi_state("presave");
1130
1131    assert(QTAILQ_EMPTY(&s->pending_queue));
1132    assert(QTAILQ_EMPTY(&s->completion_queue));
1133}
1134
1135static int
1136pvscsi_post_load(void *opaque, int version_id)
1137{
1138    trace_pvscsi_state("postload");
1139    return 0;
1140}
1141
1142static const VMStateDescription vmstate_pvscsi = {
1143    .name = "pvscsi",
1144    .version_id = 0,
1145    .minimum_version_id = 0,
1146    .pre_save = pvscsi_pre_save,
1147    .post_load = pvscsi_post_load,
1148    .fields = (VMStateField[]) {
1149        VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
1150        VMSTATE_UINT8(msi_used, PVSCSIState),
1151        VMSTATE_UINT32(resetting, PVSCSIState),
1152        VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
1153        VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
1154        VMSTATE_UINT64(reg_command_status, PVSCSIState),
1155        VMSTATE_UINT64(curr_cmd, PVSCSIState),
1156        VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
1157        VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
1158                             ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
1159        VMSTATE_UINT8(rings_info_valid, PVSCSIState),
1160        VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
1161        VMSTATE_UINT8(use_msg, PVSCSIState),
1162
1163        VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
1164        VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
1165        VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
1166        VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
1167                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1168        VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
1169                             PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1170        VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
1171        VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
1172
1173        VMSTATE_END_OF_LIST()
1174    }
1175};
1176
1177static void
1178pvscsi_write_config(PCIDevice *pci, uint32_t addr, uint32_t val, int len)
1179{
1180    pci_default_write_config(pci, addr, val, len);
1181    msi_write_config(pci, addr, val, len);
1182}
1183
1184static Property pvscsi_properties[] = {
1185    DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
1186    DEFINE_PROP_END_OF_LIST(),
1187};
1188
1189static void pvscsi_class_init(ObjectClass *klass, void *data)
1190{
1191    DeviceClass *dc = DEVICE_CLASS(klass);
1192    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1193    HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1194
1195    k->init = pvscsi_init;
1196    k->exit = pvscsi_uninit;
1197    k->vendor_id = PCI_VENDOR_ID_VMWARE;
1198    k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
1199    k->class_id = PCI_CLASS_STORAGE_SCSI;
1200    k->subsystem_id = 0x1000;
1201    dc->reset = pvscsi_reset;
1202    dc->vmsd = &vmstate_pvscsi;
1203    dc->props = pvscsi_properties;
1204    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1205    k->config_write = pvscsi_write_config;
1206    hc->unplug = pvscsi_hot_unplug;
1207    hc->plug = pvscsi_hotplug;
1208}
1209
1210static const TypeInfo pvscsi_info = {
1211    .name          = TYPE_PVSCSI,
1212    .parent        = TYPE_PCI_DEVICE,
1213    .instance_size = sizeof(PVSCSIState),
1214    .class_init    = pvscsi_class_init,
1215    .interfaces = (InterfaceInfo[]) {
1216        { TYPE_HOTPLUG_HANDLER },
1217        { }
1218    }
1219};
1220
1221static void
1222pvscsi_register_types(void)
1223{
1224    type_register_static(&pvscsi_info);
1225}
1226
1227type_init(pvscsi_register_types);
1228