qemu/hw/scsi/virtio-scsi.c
<<
>>
Prefs
   1/*
   2 * Virtio SCSI HBA
   3 *
   4 * Copyright IBM, Corp. 2010
   5 * Copyright Red Hat, Inc. 2011
   6 *
   7 * Authors:
   8 *   Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   9 *   Paolo Bonzini      <pbonzini@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "standard-headers/linux/virtio_ids.h"
  19#include "hw/virtio/virtio-scsi.h"
  20#include "migration/qemu-file-types.h"
  21#include "qemu/error-report.h"
  22#include "qemu/iov.h"
  23#include "qemu/module.h"
  24#include "sysemu/block-backend.h"
  25#include "sysemu/dma.h"
  26#include "hw/qdev-properties.h"
  27#include "hw/scsi/scsi.h"
  28#include "scsi/constants.h"
  29#include "hw/virtio/virtio-bus.h"
  30#include "hw/virtio/virtio-access.h"
  31#include "trace.h"
  32
  33typedef struct VirtIOSCSIReq {
  34    /*
  35     * Note:
  36     * - fields up to resp_iov are initialized by virtio_scsi_init_req;
  37     * - fields starting at vring are zeroed by virtio_scsi_init_req.
  38     */
  39    VirtQueueElement elem;
  40
  41    VirtIOSCSI *dev;
  42    VirtQueue *vq;
  43    QEMUSGList qsgl;
  44    QEMUIOVector resp_iov;
  45
  46    /* Used for two-stage request submission and TMFs deferred to BH */
  47    QTAILQ_ENTRY(VirtIOSCSIReq) next;
  48
  49    /* Used for cancellation of request during TMFs */
  50    int remaining;
  51
  52    SCSIRequest *sreq;
  53    size_t resp_size;
  54    enum SCSIXferMode mode;
  55    union {
  56        VirtIOSCSICmdResp     cmd;
  57        VirtIOSCSICtrlTMFResp tmf;
  58        VirtIOSCSICtrlANResp  an;
  59        VirtIOSCSIEvent       event;
  60    } resp;
  61    union {
  62        VirtIOSCSICmdReq      cmd;
  63        VirtIOSCSICtrlTMFReq  tmf;
  64        VirtIOSCSICtrlANReq   an;
  65    } req;
  66} VirtIOSCSIReq;
  67
  68static inline int virtio_scsi_get_lun(uint8_t *lun)
  69{
  70    return ((lun[2] << 8) | lun[3]) & 0x3FFF;
  71}
  72
  73static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
  74{
  75    if (lun[0] != 1) {
  76        return NULL;
  77    }
  78    if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
  79        return NULL;
  80    }
  81    return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
  82}
  83
  84static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
  85{
  86    VirtIODevice *vdev = VIRTIO_DEVICE(s);
  87    const size_t zero_skip =
  88        offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
  89
  90    req->vq = vq;
  91    req->dev = s;
  92    qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
  93    qemu_iovec_init(&req->resp_iov, 1);
  94    memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
  95}
  96
  97static void virtio_scsi_free_req(VirtIOSCSIReq *req)
  98{
  99    qemu_iovec_destroy(&req->resp_iov);
 100    qemu_sglist_destroy(&req->qsgl);
 101    g_free(req);
 102}
 103
 104static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
 105{
 106    VirtIOSCSI *s = req->dev;
 107    VirtQueue *vq = req->vq;
 108    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 109
 110    qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
 111    virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
 112    if (s->dataplane_started && !s->dataplane_fenced) {
 113        virtio_notify_irqfd(vdev, vq);
 114    } else {
 115        virtio_notify(vdev, vq);
 116    }
 117
 118    if (req->sreq) {
 119        req->sreq->hba_private = NULL;
 120        scsi_req_unref(req->sreq);
 121    }
 122    virtio_scsi_free_req(req);
 123}
 124
 125static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
 126{
 127    virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
 128    virtqueue_detach_element(req->vq, &req->elem, 0);
 129    virtio_scsi_free_req(req);
 130}
 131
 132static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
 133                              hwaddr *addr, int num, size_t skip)
 134{
 135    QEMUSGList *qsgl = &req->qsgl;
 136    size_t copied = 0;
 137
 138    while (num) {
 139        if (skip >= iov->iov_len) {
 140            skip -= iov->iov_len;
 141        } else {
 142            qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
 143            copied += iov->iov_len - skip;
 144            skip = 0;
 145        }
 146        iov++;
 147        addr++;
 148        num--;
 149    }
 150
 151    assert(skip == 0);
 152    return copied;
 153}
 154
 155static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
 156                                 unsigned req_size, unsigned resp_size)
 157{
 158    VirtIODevice *vdev = (VirtIODevice *) req->dev;
 159    size_t in_size, out_size;
 160
 161    if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
 162                   &req->req, req_size) < req_size) {
 163        return -EINVAL;
 164    }
 165
 166    if (qemu_iovec_concat_iov(&req->resp_iov,
 167                              req->elem.in_sg, req->elem.in_num, 0,
 168                              resp_size) < resp_size) {
 169        return -EINVAL;
 170    }
 171
 172    req->resp_size = resp_size;
 173
 174    /* Old BIOSes left some padding by mistake after the req_size/resp_size.
 175     * As a workaround, always consider the first buffer as the virtio-scsi
 176     * request/response, making the payload start at the second element
 177     * of the iovec.
 178     *
 179     * The actual length of the response header, stored in req->resp_size,
 180     * does not change.
 181     *
 182     * TODO: always disable this workaround for virtio 1.0 devices.
 183     */
 184    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
 185        if (req->elem.out_num) {
 186            req_size = req->elem.out_sg[0].iov_len;
 187        }
 188        if (req->elem.in_num) {
 189            resp_size = req->elem.in_sg[0].iov_len;
 190        }
 191    }
 192
 193    out_size = qemu_sgl_concat(req, req->elem.out_sg,
 194                               &req->elem.out_addr[0], req->elem.out_num,
 195                               req_size);
 196    in_size = qemu_sgl_concat(req, req->elem.in_sg,
 197                              &req->elem.in_addr[0], req->elem.in_num,
 198                              resp_size);
 199
 200    if (out_size && in_size) {
 201        return -ENOTSUP;
 202    }
 203
 204    if (out_size) {
 205        req->mode = SCSI_XFER_TO_DEV;
 206    } else if (in_size) {
 207        req->mode = SCSI_XFER_FROM_DEV;
 208    }
 209
 210    return 0;
 211}
 212
 213static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
 214{
 215    VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
 216    VirtIOSCSIReq *req;
 217
 218    req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
 219    if (!req) {
 220        return NULL;
 221    }
 222    virtio_scsi_init_req(s, vq, req);
 223    return req;
 224}
 225
 226static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
 227{
 228    VirtIOSCSIReq *req = sreq->hba_private;
 229    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
 230    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
 231    uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
 232
 233    assert(n < vs->conf.num_queues);
 234    qemu_put_be32s(f, &n);
 235    qemu_put_virtqueue_element(vdev, f, &req->elem);
 236}
 237
 238static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
 239{
 240    SCSIBus *bus = sreq->bus;
 241    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
 242    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
 243    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 244    VirtIOSCSIReq *req;
 245    uint32_t n;
 246
 247    qemu_get_be32s(f, &n);
 248    assert(n < vs->conf.num_queues);
 249    req = qemu_get_virtqueue_element(vdev, f,
 250                                     sizeof(VirtIOSCSIReq) + vs->cdb_size);
 251    virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
 252
 253    if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
 254                              sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
 255        error_report("invalid SCSI request migration data");
 256        exit(1);
 257    }
 258
 259    scsi_req_ref(sreq);
 260    req->sreq = sreq;
 261    if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
 262        assert(req->sreq->cmd.mode == req->mode);
 263    }
 264    return req;
 265}
 266
 267typedef struct {
 268    Notifier        notifier;
 269    VirtIOSCSIReq  *tmf_req;
 270} VirtIOSCSICancelNotifier;
 271
 272static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
 273{
 274    VirtIOSCSICancelNotifier *n = container_of(notifier,
 275                                               VirtIOSCSICancelNotifier,
 276                                               notifier);
 277
 278    if (--n->tmf_req->remaining == 0) {
 279        VirtIOSCSIReq *req = n->tmf_req;
 280
 281        trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
 282                                   req->req.tmf.tag, req->resp.tmf.response);
 283        virtio_scsi_complete_req(req);
 284    }
 285    g_free(n);
 286}
 287
 288static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
 289{
 290    if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
 291        assert(blk_get_aio_context(d->conf.blk) == s->ctx);
 292    }
 293}
 294
 295static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
 296{
 297    VirtIOSCSI *s = req->dev;
 298    SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
 299    BusChild *kid;
 300    int target;
 301
 302    switch (req->req.tmf.subtype) {
 303    case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
 304        if (!d) {
 305            req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
 306            goto out;
 307        }
 308        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 309            req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 310            goto out;
 311        }
 312        qatomic_inc(&s->resetting);
 313        device_cold_reset(&d->qdev);
 314        qatomic_dec(&s->resetting);
 315        break;
 316
 317    case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
 318        target = req->req.tmf.lun[1];
 319        qatomic_inc(&s->resetting);
 320
 321        rcu_read_lock();
 322        QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
 323            SCSIDevice *d1 = SCSI_DEVICE(kid->child);
 324            if (d1->channel == 0 && d1->id == target) {
 325                device_cold_reset(&d1->qdev);
 326            }
 327        }
 328        rcu_read_unlock();
 329
 330        qatomic_dec(&s->resetting);
 331        break;
 332
 333    default:
 334        g_assert_not_reached();
 335        break;
 336    }
 337
 338out:
 339    object_unref(OBJECT(d));
 340
 341    virtio_scsi_acquire(s);
 342    virtio_scsi_complete_req(req);
 343    virtio_scsi_release(s);
 344}
 345
 346/* Some TMFs must be processed from the main loop thread */
 347static void virtio_scsi_do_tmf_bh(void *opaque)
 348{
 349    VirtIOSCSI *s = opaque;
 350    QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
 351    VirtIOSCSIReq *req;
 352    VirtIOSCSIReq *tmp;
 353
 354    GLOBAL_STATE_CODE();
 355
 356    virtio_scsi_acquire(s);
 357
 358    QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
 359        QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
 360        QTAILQ_INSERT_TAIL(&reqs, req, next);
 361    }
 362
 363    qemu_bh_delete(s->tmf_bh);
 364    s->tmf_bh = NULL;
 365
 366    virtio_scsi_release(s);
 367
 368    QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
 369        QTAILQ_REMOVE(&reqs, req, next);
 370        virtio_scsi_do_one_tmf_bh(req);
 371    }
 372}
 373
 374static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
 375{
 376    VirtIOSCSIReq *req;
 377    VirtIOSCSIReq *tmp;
 378
 379    GLOBAL_STATE_CODE();
 380
 381    virtio_scsi_acquire(s);
 382
 383    if (s->tmf_bh) {
 384        qemu_bh_delete(s->tmf_bh);
 385        s->tmf_bh = NULL;
 386    }
 387
 388    QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
 389        QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
 390
 391        /* SAM-6 6.3.2 Hard reset */
 392        req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
 393        virtio_scsi_complete_req(req);
 394    }
 395
 396    virtio_scsi_release(s);
 397}
 398
 399static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
 400{
 401    VirtIOSCSI *s = req->dev;
 402
 403    QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
 404
 405    if (!s->tmf_bh) {
 406        s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
 407        qemu_bh_schedule(s->tmf_bh);
 408    }
 409}
 410
 411/* Return 0 if the request is ready to be completed and return to guest;
 412 * -EINPROGRESS if the request is submitted and will be completed later, in the
 413 *  case of async cancellation. */
 414static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
 415{
 416    SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
 417    SCSIRequest *r, *next;
 418    int ret = 0;
 419
 420    virtio_scsi_ctx_check(s, d);
 421    /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE".  */
 422    req->resp.tmf.response = VIRTIO_SCSI_S_OK;
 423
 424    /*
 425     * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
 426     * to avoid compiler errors.
 427     */
 428    req->req.tmf.subtype =
 429        virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
 430
 431    trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
 432                              req->req.tmf.tag, req->req.tmf.subtype);
 433
 434    switch (req->req.tmf.subtype) {
 435    case VIRTIO_SCSI_T_TMF_ABORT_TASK:
 436    case VIRTIO_SCSI_T_TMF_QUERY_TASK:
 437        if (!d) {
 438            goto fail;
 439        }
 440        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 441            goto incorrect_lun;
 442        }
 443        QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
 444            VirtIOSCSIReq *cmd_req = r->hba_private;
 445            if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
 446                break;
 447            }
 448        }
 449        if (r) {
 450            /*
 451             * Assert that the request has not been completed yet, we
 452             * check for it in the loop above.
 453             */
 454            assert(r->hba_private);
 455            if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
 456                /* "If the specified command is present in the task set, then
 457                 * return a service response set to FUNCTION SUCCEEDED".
 458                 */
 459                req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
 460            } else {
 461                VirtIOSCSICancelNotifier *notifier;
 462
 463                req->remaining = 1;
 464                notifier = g_new(VirtIOSCSICancelNotifier, 1);
 465                notifier->tmf_req = req;
 466                notifier->notifier.notify = virtio_scsi_cancel_notify;
 467                scsi_req_cancel_async(r, &notifier->notifier);
 468                ret = -EINPROGRESS;
 469            }
 470        }
 471        break;
 472
 473    case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
 474    case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
 475        virtio_scsi_defer_tmf_to_bh(req);
 476        ret = -EINPROGRESS;
 477        break;
 478
 479    case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
 480    case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
 481    case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
 482        if (!d) {
 483            goto fail;
 484        }
 485        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 486            goto incorrect_lun;
 487        }
 488
 489        /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
 490         * This way, if the bus starts calling back to the notifiers
 491         * even before we finish the loop, virtio_scsi_cancel_notify
 492         * will not complete the TMF too early.
 493         */
 494        req->remaining = 1;
 495        QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
 496            if (r->hba_private) {
 497                if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
 498                    /* "If there is any command present in the task set, then
 499                     * return a service response set to FUNCTION SUCCEEDED".
 500                     */
 501                    req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
 502                    break;
 503                } else {
 504                    VirtIOSCSICancelNotifier *notifier;
 505
 506                    req->remaining++;
 507                    notifier = g_new(VirtIOSCSICancelNotifier, 1);
 508                    notifier->notifier.notify = virtio_scsi_cancel_notify;
 509                    notifier->tmf_req = req;
 510                    scsi_req_cancel_async(r, &notifier->notifier);
 511                }
 512            }
 513        }
 514        if (--req->remaining > 0) {
 515            ret = -EINPROGRESS;
 516        }
 517        break;
 518
 519    case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
 520    default:
 521        req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
 522        break;
 523    }
 524
 525    object_unref(OBJECT(d));
 526    return ret;
 527
 528incorrect_lun:
 529    req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 530    object_unref(OBJECT(d));
 531    return ret;
 532
 533fail:
 534    req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
 535    object_unref(OBJECT(d));
 536    return ret;
 537}
 538
 539static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
 540{
 541    VirtIODevice *vdev = (VirtIODevice *)s;
 542    uint32_t type;
 543    int r = 0;
 544
 545    if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
 546                &type, sizeof(type)) < sizeof(type)) {
 547        virtio_scsi_bad_req(req);
 548        return;
 549    }
 550
 551    virtio_tswap32s(vdev, &type);
 552    if (type == VIRTIO_SCSI_T_TMF) {
 553        if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
 554                    sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
 555            virtio_scsi_bad_req(req);
 556            return;
 557        } else {
 558            r = virtio_scsi_do_tmf(s, req);
 559        }
 560
 561    } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
 562               type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
 563        if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
 564                    sizeof(VirtIOSCSICtrlANResp)) < 0) {
 565            virtio_scsi_bad_req(req);
 566            return;
 567        } else {
 568            req->req.an.event_requested =
 569                virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
 570            trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
 571                                     req->req.an.event_requested);
 572            req->resp.an.event_actual = 0;
 573            req->resp.an.response = VIRTIO_SCSI_S_OK;
 574        }
 575    }
 576    if (r == 0) {
 577        if (type == VIRTIO_SCSI_T_TMF)
 578            trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
 579                                       req->req.tmf.tag,
 580                                       req->resp.tmf.response);
 581        else if (type == VIRTIO_SCSI_T_AN_QUERY ||
 582                 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
 583            trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
 584                                      req->resp.an.response);
 585        virtio_scsi_complete_req(req);
 586    } else {
 587        assert(r == -EINPROGRESS);
 588    }
 589}
 590
 591static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
 592{
 593    VirtIOSCSIReq *req;
 594
 595    while ((req = virtio_scsi_pop_req(s, vq))) {
 596        virtio_scsi_handle_ctrl_req(s, req);
 597    }
 598}
 599
 600/*
 601 * If dataplane is configured but not yet started, do so now and return true on
 602 * success.
 603 *
 604 * Dataplane is started by the core virtio code but virtqueue handler functions
 605 * can also be invoked when a guest kicks before DRIVER_OK, so this helper
 606 * function helps us deal with manually starting ioeventfd in that case.
 607 */
 608static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
 609{
 610    if (!s->ctx || s->dataplane_started) {
 611        return false;
 612    }
 613
 614    virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
 615    return !s->dataplane_fenced;
 616}
 617
 618static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 619{
 620    VirtIOSCSI *s = (VirtIOSCSI *)vdev;
 621
 622    if (virtio_scsi_defer_to_dataplane(s)) {
 623        return;
 624    }
 625
 626    virtio_scsi_acquire(s);
 627    virtio_scsi_handle_ctrl_vq(s, vq);
 628    virtio_scsi_release(s);
 629}
 630
 631static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
 632{
 633    trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
 634                               req->req.cmd.tag,
 635                               req->resp.cmd.response,
 636                               req->resp.cmd.status);
 637    /* Sense data is not in req->resp and is copied separately
 638     * in virtio_scsi_command_complete.
 639     */
 640    req->resp_size = sizeof(VirtIOSCSICmdResp);
 641    virtio_scsi_complete_req(req);
 642}
 643
 644static void virtio_scsi_command_failed(SCSIRequest *r)
 645{
 646    VirtIOSCSIReq *req = r->hba_private;
 647
 648    if (r->io_canceled) {
 649        return;
 650    }
 651
 652    req->resp.cmd.status = GOOD;
 653    switch (r->host_status) {
 654    case SCSI_HOST_NO_LUN:
 655        req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 656        break;
 657    case SCSI_HOST_BUSY:
 658        req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
 659        break;
 660    case SCSI_HOST_TIME_OUT:
 661    case SCSI_HOST_ABORTED:
 662        req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
 663        break;
 664    case SCSI_HOST_BAD_RESPONSE:
 665        req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 666        break;
 667    case SCSI_HOST_RESET:
 668        req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
 669        break;
 670    case SCSI_HOST_TRANSPORT_DISRUPTED:
 671        req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
 672        break;
 673    case SCSI_HOST_TARGET_FAILURE:
 674        req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
 675        break;
 676    case SCSI_HOST_RESERVATION_ERROR:
 677        req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
 678        break;
 679    case SCSI_HOST_ALLOCATION_FAILURE:
 680    case SCSI_HOST_MEDIUM_ERROR:
 681    case SCSI_HOST_ERROR:
 682    default:
 683        req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
 684        break;
 685    }
 686    virtio_scsi_complete_cmd_req(req);
 687}
 688
 689static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
 690{
 691    VirtIOSCSIReq *req = r->hba_private;
 692    uint8_t sense[SCSI_SENSE_BUF_SIZE];
 693    uint32_t sense_len;
 694    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
 695
 696    if (r->io_canceled) {
 697        return;
 698    }
 699
 700    req->resp.cmd.response = VIRTIO_SCSI_S_OK;
 701    req->resp.cmd.status = r->status;
 702    if (req->resp.cmd.status == GOOD) {
 703        req->resp.cmd.resid = virtio_tswap32(vdev, resid);
 704    } else {
 705        req->resp.cmd.resid = 0;
 706        sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
 707        sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
 708        qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
 709                            sense, sense_len);
 710        req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
 711    }
 712    virtio_scsi_complete_cmd_req(req);
 713}
 714
 715static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
 716                                 uint8_t *buf, size_t buf_len,
 717                                 void *hba_private)
 718{
 719    VirtIOSCSIReq *req = hba_private;
 720
 721    if (cmd->len == 0) {
 722        cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
 723        memcpy(cmd->buf, buf, cmd->len);
 724    }
 725
 726    /* Extract the direction and mode directly from the request, for
 727     * host device passthrough.
 728     */
 729    cmd->xfer = req->qsgl.size;
 730    cmd->mode = req->mode;
 731    return 0;
 732}
 733
 734static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
 735{
 736    VirtIOSCSIReq *req = r->hba_private;
 737
 738    return &req->qsgl;
 739}
 740
 741static void virtio_scsi_request_cancelled(SCSIRequest *r)
 742{
 743    VirtIOSCSIReq *req = r->hba_private;
 744
 745    if (!req) {
 746        return;
 747    }
 748    if (qatomic_read(&req->dev->resetting)) {
 749        req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
 750    } else {
 751        req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
 752    }
 753    virtio_scsi_complete_cmd_req(req);
 754}
 755
 756static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
 757{
 758    req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
 759    virtio_scsi_complete_cmd_req(req);
 760}
 761
 762static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
 763{
 764    VirtIOSCSICommon *vs = &s->parent_obj;
 765    SCSIDevice *d;
 766    int rc;
 767
 768    rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
 769                               sizeof(VirtIOSCSICmdResp) + vs->sense_size);
 770    if (rc < 0) {
 771        if (rc == -ENOTSUP) {
 772            virtio_scsi_fail_cmd_req(req);
 773            return -ENOTSUP;
 774        } else {
 775            virtio_scsi_bad_req(req);
 776            return -EINVAL;
 777        }
 778    }
 779    trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
 780                              req->req.cmd.tag, req->req.cmd.cdb[0]);
 781
 782    d = virtio_scsi_device_get(s, req->req.cmd.lun);
 783    if (!d) {
 784        req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 785        virtio_scsi_complete_cmd_req(req);
 786        return -ENOENT;
 787    }
 788    virtio_scsi_ctx_check(s, d);
 789    req->sreq = scsi_req_new(d, req->req.cmd.tag,
 790                             virtio_scsi_get_lun(req->req.cmd.lun),
 791                             req->req.cmd.cdb, vs->cdb_size, req);
 792
 793    if (req->sreq->cmd.mode != SCSI_XFER_NONE
 794        && (req->sreq->cmd.mode != req->mode ||
 795            req->sreq->cmd.xfer > req->qsgl.size)) {
 796        req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
 797        virtio_scsi_complete_cmd_req(req);
 798        object_unref(OBJECT(d));
 799        return -ENOBUFS;
 800    }
 801    scsi_req_ref(req->sreq);
 802    blk_io_plug(d->conf.blk);
 803    object_unref(OBJECT(d));
 804    return 0;
 805}
 806
 807static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
 808{
 809    SCSIRequest *sreq = req->sreq;
 810    if (scsi_req_enqueue(sreq)) {
 811        scsi_req_continue(sreq);
 812    }
 813    blk_io_unplug(sreq->dev->conf.blk);
 814    scsi_req_unref(sreq);
 815}
 816
 817static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
 818{
 819    VirtIOSCSIReq *req, *next;
 820    int ret = 0;
 821    bool suppress_notifications = virtio_queue_get_notification(vq);
 822
 823    QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
 824
 825    do {
 826        if (suppress_notifications) {
 827            virtio_queue_set_notification(vq, 0);
 828        }
 829
 830        while ((req = virtio_scsi_pop_req(s, vq))) {
 831            ret = virtio_scsi_handle_cmd_req_prepare(s, req);
 832            if (!ret) {
 833                QTAILQ_INSERT_TAIL(&reqs, req, next);
 834            } else if (ret == -EINVAL) {
 835                /* The device is broken and shouldn't process any request */
 836                while (!QTAILQ_EMPTY(&reqs)) {
 837                    req = QTAILQ_FIRST(&reqs);
 838                    QTAILQ_REMOVE(&reqs, req, next);
 839                    blk_io_unplug(req->sreq->dev->conf.blk);
 840                    scsi_req_unref(req->sreq);
 841                    virtqueue_detach_element(req->vq, &req->elem, 0);
 842                    virtio_scsi_free_req(req);
 843                }
 844            }
 845        }
 846
 847        if (suppress_notifications) {
 848            virtio_queue_set_notification(vq, 1);
 849        }
 850    } while (ret != -EINVAL && !virtio_queue_empty(vq));
 851
 852    QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
 853        virtio_scsi_handle_cmd_req_submit(s, req);
 854    }
 855}
 856
 857static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
 858{
 859    /* use non-QOM casts in the data path */
 860    VirtIOSCSI *s = (VirtIOSCSI *)vdev;
 861
 862    if (virtio_scsi_defer_to_dataplane(s)) {
 863        return;
 864    }
 865
 866    virtio_scsi_acquire(s);
 867    virtio_scsi_handle_cmd_vq(s, vq);
 868    virtio_scsi_release(s);
 869}
 870
 871static void virtio_scsi_get_config(VirtIODevice *vdev,
 872                                   uint8_t *config)
 873{
 874    VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
 875    VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
 876
 877    virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
 878    virtio_stl_p(vdev, &scsiconf->seg_max,
 879                 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
 880    virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
 881    virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
 882    virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
 883    virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
 884    virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
 885    virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
 886    virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
 887    virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
 888}
 889
 890static void virtio_scsi_set_config(VirtIODevice *vdev,
 891                                   const uint8_t *config)
 892{
 893    VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
 894    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
 895
 896    if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
 897        (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
 898        virtio_error(vdev,
 899                     "bad data written to virtio-scsi configuration space");
 900        return;
 901    }
 902
 903    vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
 904    vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
 905}
 906
 907static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
 908                                         uint64_t requested_features,
 909                                         Error **errp)
 910{
 911    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
 912
 913    /* Firstly sync all virtio-scsi possible supported features */
 914    requested_features |= s->host_features;
 915    return requested_features;
 916}
 917
 918static void virtio_scsi_reset(VirtIODevice *vdev)
 919{
 920    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
 921    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
 922
 923    assert(!s->dataplane_started);
 924
 925    virtio_scsi_reset_tmf_bh(s);
 926
 927    qatomic_inc(&s->resetting);
 928    bus_cold_reset(BUS(&s->bus));
 929    qatomic_dec(&s->resetting);
 930
 931    vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
 932    vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
 933    s->events_dropped = false;
 934}
 935
 936static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
 937                                   uint32_t event, uint32_t reason)
 938{
 939    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
 940    VirtIOSCSIReq *req;
 941    VirtIOSCSIEvent *evt;
 942    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 943
 944    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
 945        return;
 946    }
 947
 948    req = virtio_scsi_pop_req(s, vs->event_vq);
 949    if (!req) {
 950        s->events_dropped = true;
 951        return;
 952    }
 953
 954    if (s->events_dropped) {
 955        event |= VIRTIO_SCSI_T_EVENTS_MISSED;
 956        s->events_dropped = false;
 957    }
 958
 959    if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
 960        virtio_scsi_bad_req(req);
 961        return;
 962    }
 963
 964    evt = &req->resp.event;
 965    memset(evt, 0, sizeof(VirtIOSCSIEvent));
 966    evt->event = virtio_tswap32(vdev, event);
 967    evt->reason = virtio_tswap32(vdev, reason);
 968    if (!dev) {
 969        assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
 970    } else {
 971        evt->lun[0] = 1;
 972        evt->lun[1] = dev->id;
 973
 974        /* Linux wants us to keep the same encoding we use for REPORT LUNS.  */
 975        if (dev->lun >= 256) {
 976            evt->lun[2] = (dev->lun >> 8) | 0x40;
 977        }
 978        evt->lun[3] = dev->lun & 0xFF;
 979    }
 980    trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
 981     
 982    virtio_scsi_complete_req(req);
 983}
 984
 985static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
 986{
 987    if (s->events_dropped) {
 988        virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
 989    }
 990}
 991
 992static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
 993{
 994    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
 995
 996    if (virtio_scsi_defer_to_dataplane(s)) {
 997        return;
 998    }
 999
1000    virtio_scsi_acquire(s);
1001    virtio_scsi_handle_event_vq(s, vq);
1002    virtio_scsi_release(s);
1003}
1004
1005static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
1006{
1007    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1008    VirtIODevice *vdev = VIRTIO_DEVICE(s);
1009
1010    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
1011        dev->type != TYPE_ROM) {
1012        virtio_scsi_acquire(s);
1013        virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
1014                               sense.asc | (sense.ascq << 8));
1015        virtio_scsi_release(s);
1016    }
1017}
1018
1019static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
1020                                    DeviceState *dev, Error **errp)
1021{
1022    SCSIDevice *sd = SCSI_DEVICE(dev);
1023    sd->hba_supports_iothread = true;
1024}
1025
1026static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1027                                Error **errp)
1028{
1029    VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1030    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1031    SCSIDevice *sd = SCSI_DEVICE(dev);
1032    AioContext *old_context;
1033    int ret;
1034
1035    if (s->ctx && !s->dataplane_fenced) {
1036        if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
1037            return;
1038        }
1039        old_context = blk_get_aio_context(sd->conf.blk);
1040        aio_context_acquire(old_context);
1041        ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
1042        aio_context_release(old_context);
1043        if (ret < 0) {
1044            return;
1045        }
1046    }
1047
1048    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1049        virtio_scsi_acquire(s);
1050        virtio_scsi_push_event(s, sd,
1051                               VIRTIO_SCSI_T_TRANSPORT_RESET,
1052                               VIRTIO_SCSI_EVT_RESET_RESCAN);
1053        scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1054        virtio_scsi_release(s);
1055    }
1056}
1057
1058static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1059                                  Error **errp)
1060{
1061    VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1062    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1063    SCSIDevice *sd = SCSI_DEVICE(dev);
1064    AioContext *ctx = s->ctx ?: qemu_get_aio_context();
1065
1066    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1067        virtio_scsi_acquire(s);
1068        virtio_scsi_push_event(s, sd,
1069                               VIRTIO_SCSI_T_TRANSPORT_RESET,
1070                               VIRTIO_SCSI_EVT_RESET_REMOVED);
1071        scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1072        virtio_scsi_release(s);
1073    }
1074
1075    aio_disable_external(ctx);
1076    qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
1077    aio_enable_external(ctx);
1078
1079    if (s->ctx) {
1080        virtio_scsi_acquire(s);
1081        /* If other users keep the BlockBackend in the iothread, that's ok */
1082        blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
1083        virtio_scsi_release(s);
1084    }
1085}
1086
1087static struct SCSIBusInfo virtio_scsi_scsi_info = {
1088    .tcq = true,
1089    .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
1090    .max_target = VIRTIO_SCSI_MAX_TARGET,
1091    .max_lun = VIRTIO_SCSI_MAX_LUN,
1092
1093    .complete = virtio_scsi_command_complete,
1094    .fail = virtio_scsi_command_failed,
1095    .cancel = virtio_scsi_request_cancelled,
1096    .change = virtio_scsi_change,
1097    .parse_cdb = virtio_scsi_parse_cdb,
1098    .get_sg_list = virtio_scsi_get_sg_list,
1099    .save_request = virtio_scsi_save_request,
1100    .load_request = virtio_scsi_load_request,
1101};
1102
1103void virtio_scsi_common_realize(DeviceState *dev,
1104                                VirtIOHandleOutput ctrl,
1105                                VirtIOHandleOutput evt,
1106                                VirtIOHandleOutput cmd,
1107                                Error **errp)
1108{
1109    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1110    VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
1111    int i;
1112
1113    virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
1114
1115    if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
1116        s->conf.num_queues = 1;
1117    }
1118    if (s->conf.num_queues == 0 ||
1119            s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
1120        error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1121                         "must be a positive integer less than %d.",
1122                   s->conf.num_queues,
1123                   VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
1124        virtio_cleanup(vdev);
1125        return;
1126    }
1127    if (s->conf.virtqueue_size <= 2) {
1128        error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
1129                   "must be > 2", s->conf.virtqueue_size);
1130        return;
1131    }
1132    s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
1133    s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1134    s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1135
1136    s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
1137    s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
1138    for (i = 0; i < s->conf.num_queues; i++) {
1139        s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
1140    }
1141}
1142
1143static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
1144{
1145    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1146    VirtIOSCSI *s = VIRTIO_SCSI(dev);
1147    Error *err = NULL;
1148
1149    QTAILQ_INIT(&s->tmf_bh_list);
1150
1151    virtio_scsi_common_realize(dev,
1152                               virtio_scsi_handle_ctrl,
1153                               virtio_scsi_handle_event,
1154                               virtio_scsi_handle_cmd,
1155                               &err);
1156    if (err != NULL) {
1157        error_propagate(errp, err);
1158        return;
1159    }
1160
1161    scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
1162                       &virtio_scsi_scsi_info, vdev->bus_name);
1163    /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
1164    qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
1165
1166    virtio_scsi_dataplane_setup(s, errp);
1167}
1168
1169void virtio_scsi_common_unrealize(DeviceState *dev)
1170{
1171    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1172    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
1173    int i;
1174
1175    virtio_delete_queue(vs->ctrl_vq);
1176    virtio_delete_queue(vs->event_vq);
1177    for (i = 0; i < vs->conf.num_queues; i++) {
1178        virtio_delete_queue(vs->cmd_vqs[i]);
1179    }
1180    g_free(vs->cmd_vqs);
1181    virtio_cleanup(vdev);
1182}
1183
1184static void virtio_scsi_device_unrealize(DeviceState *dev)
1185{
1186    VirtIOSCSI *s = VIRTIO_SCSI(dev);
1187
1188    virtio_scsi_reset_tmf_bh(s);
1189
1190    qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1191    virtio_scsi_common_unrealize(dev);
1192}
1193
1194static Property virtio_scsi_properties[] = {
1195    DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1196                       VIRTIO_SCSI_AUTO_NUM_QUEUES),
1197    DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1198                                         parent_obj.conf.virtqueue_size, 256),
1199    DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1200                      parent_obj.conf.seg_max_adjust, true),
1201    DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1202                                                  0xFFFF),
1203    DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1204                                                  128),
1205    DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1206                                           VIRTIO_SCSI_F_HOTPLUG, true),
1207    DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1208                                                VIRTIO_SCSI_F_CHANGE, true),
1209    DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1210                     TYPE_IOTHREAD, IOThread *),
1211    DEFINE_PROP_END_OF_LIST(),
1212};
1213
1214static const VMStateDescription vmstate_virtio_scsi = {
1215    .name = "virtio-scsi",
1216    .minimum_version_id = 1,
1217    .version_id = 1,
1218    .fields = (VMStateField[]) {
1219        VMSTATE_VIRTIO_DEVICE,
1220        VMSTATE_END_OF_LIST()
1221    },
1222};
1223
1224static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1225{
1226    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1227    DeviceClass *dc = DEVICE_CLASS(klass);
1228
1229    vdc->get_config = virtio_scsi_get_config;
1230    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1231}
1232
1233static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1234{
1235    DeviceClass *dc = DEVICE_CLASS(klass);
1236    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1237    HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1238
1239    device_class_set_props(dc, virtio_scsi_properties);
1240    dc->vmsd = &vmstate_virtio_scsi;
1241    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1242    vdc->realize = virtio_scsi_device_realize;
1243    vdc->unrealize = virtio_scsi_device_unrealize;
1244    vdc->set_config = virtio_scsi_set_config;
1245    vdc->get_features = virtio_scsi_get_features;
1246    vdc->reset = virtio_scsi_reset;
1247    vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1248    vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1249    hc->pre_plug = virtio_scsi_pre_hotplug;
1250    hc->plug = virtio_scsi_hotplug;
1251    hc->unplug = virtio_scsi_hotunplug;
1252}
1253
1254static const TypeInfo virtio_scsi_common_info = {
1255    .name = TYPE_VIRTIO_SCSI_COMMON,
1256    .parent = TYPE_VIRTIO_DEVICE,
1257    .instance_size = sizeof(VirtIOSCSICommon),
1258    .abstract = true,
1259    .class_init = virtio_scsi_common_class_init,
1260};
1261
1262static const TypeInfo virtio_scsi_info = {
1263    .name = TYPE_VIRTIO_SCSI,
1264    .parent = TYPE_VIRTIO_SCSI_COMMON,
1265    .instance_size = sizeof(VirtIOSCSI),
1266    .class_init = virtio_scsi_class_init,
1267    .interfaces = (InterfaceInfo[]) {
1268        { TYPE_HOTPLUG_HANDLER },
1269        { }
1270    }
1271};
1272
1273static void virtio_register_types(void)
1274{
1275    type_register_static(&virtio_scsi_common_info);
1276    type_register_static(&virtio_scsi_info);
1277}
1278
1279type_init(virtio_register_types)
1280