qemu/hw/scsi/virtio-scsi.c
<<
>>
Prefs
   1/*
   2 * Virtio SCSI HBA
   3 *
   4 * Copyright IBM, Corp. 2010
   5 * Copyright Red Hat, Inc. 2011
   6 *
   7 * Authors:
   8 *   Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   9 *   Paolo Bonzini      <pbonzini@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "standard-headers/linux/virtio_ids.h"
  19#include "hw/virtio/virtio-scsi.h"
  20#include "migration/qemu-file-types.h"
  21#include "qemu/error-report.h"
  22#include "qemu/iov.h"
  23#include "qemu/module.h"
  24#include "sysemu/block-backend.h"
  25#include "sysemu/dma.h"
  26#include "hw/qdev-properties.h"
  27#include "hw/scsi/scsi.h"
  28#include "scsi/constants.h"
  29#include "hw/virtio/virtio-bus.h"
  30#include "hw/virtio/virtio-access.h"
  31#include "trace.h"
  32
  33typedef struct VirtIOSCSIReq {
  34    /*
  35     * Note:
  36     * - fields up to resp_iov are initialized by virtio_scsi_init_req;
  37     * - fields starting at vring are zeroed by virtio_scsi_init_req.
  38     */
  39    VirtQueueElement elem;
  40
  41    VirtIOSCSI *dev;
  42    VirtQueue *vq;
  43    QEMUSGList qsgl;
  44    QEMUIOVector resp_iov;
  45
  46    /* Used for two-stage request submission and TMFs deferred to BH */
  47    QTAILQ_ENTRY(VirtIOSCSIReq) next;
  48
  49    /* Used for cancellation of request during TMFs */
  50    int remaining;
  51
  52    SCSIRequest *sreq;
  53    size_t resp_size;
  54    enum SCSIXferMode mode;
  55    union {
  56        VirtIOSCSICmdResp     cmd;
  57        VirtIOSCSICtrlTMFResp tmf;
  58        VirtIOSCSICtrlANResp  an;
  59        VirtIOSCSIEvent       event;
  60    } resp;
  61    union {
  62        VirtIOSCSICmdReq      cmd;
  63        VirtIOSCSICtrlTMFReq  tmf;
  64        VirtIOSCSICtrlANReq   an;
  65    } req;
  66} VirtIOSCSIReq;
  67
  68static inline int virtio_scsi_get_lun(uint8_t *lun)
  69{
  70    return ((lun[2] << 8) | lun[3]) & 0x3FFF;
  71}
  72
  73static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
  74{
  75    if (lun[0] != 1) {
  76        return NULL;
  77    }
  78    if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
  79        return NULL;
  80    }
  81    return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
  82}
  83
  84static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
  85{
  86    VirtIODevice *vdev = VIRTIO_DEVICE(s);
  87    const size_t zero_skip =
  88        offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
  89
  90    req->vq = vq;
  91    req->dev = s;
  92    qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
  93    qemu_iovec_init(&req->resp_iov, 1);
  94    memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
  95}
  96
  97static void virtio_scsi_free_req(VirtIOSCSIReq *req)
  98{
  99    qemu_iovec_destroy(&req->resp_iov);
 100    qemu_sglist_destroy(&req->qsgl);
 101    g_free(req);
 102}
 103
 104static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
 105{
 106    VirtIOSCSI *s = req->dev;
 107    VirtQueue *vq = req->vq;
 108    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 109
 110    qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
 111    virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
 112    if (s->dataplane_started && !s->dataplane_fenced) {
 113        virtio_notify_irqfd(vdev, vq);
 114    } else {
 115        virtio_notify(vdev, vq);
 116    }
 117
 118    if (req->sreq) {
 119        req->sreq->hba_private = NULL;
 120        scsi_req_unref(req->sreq);
 121    }
 122    virtio_scsi_free_req(req);
 123}
 124
 125static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
 126{
 127    virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
 128    virtqueue_detach_element(req->vq, &req->elem, 0);
 129    virtio_scsi_free_req(req);
 130}
 131
 132static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
 133                              hwaddr *addr, int num, size_t skip)
 134{
 135    QEMUSGList *qsgl = &req->qsgl;
 136    size_t copied = 0;
 137
 138    while (num) {
 139        if (skip >= iov->iov_len) {
 140            skip -= iov->iov_len;
 141        } else {
 142            qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
 143            copied += iov->iov_len - skip;
 144            skip = 0;
 145        }
 146        iov++;
 147        addr++;
 148        num--;
 149    }
 150
 151    assert(skip == 0);
 152    return copied;
 153}
 154
 155static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
 156                                 unsigned req_size, unsigned resp_size)
 157{
 158    VirtIODevice *vdev = (VirtIODevice *) req->dev;
 159    size_t in_size, out_size;
 160
 161    if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
 162                   &req->req, req_size) < req_size) {
 163        return -EINVAL;
 164    }
 165
 166    if (qemu_iovec_concat_iov(&req->resp_iov,
 167                              req->elem.in_sg, req->elem.in_num, 0,
 168                              resp_size) < resp_size) {
 169        return -EINVAL;
 170    }
 171
 172    req->resp_size = resp_size;
 173
 174    /* Old BIOSes left some padding by mistake after the req_size/resp_size.
 175     * As a workaround, always consider the first buffer as the virtio-scsi
 176     * request/response, making the payload start at the second element
 177     * of the iovec.
 178     *
 179     * The actual length of the response header, stored in req->resp_size,
 180     * does not change.
 181     *
 182     * TODO: always disable this workaround for virtio 1.0 devices.
 183     */
 184    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
 185        if (req->elem.out_num) {
 186            req_size = req->elem.out_sg[0].iov_len;
 187        }
 188        if (req->elem.in_num) {
 189            resp_size = req->elem.in_sg[0].iov_len;
 190        }
 191    }
 192
 193    out_size = qemu_sgl_concat(req, req->elem.out_sg,
 194                               &req->elem.out_addr[0], req->elem.out_num,
 195                               req_size);
 196    in_size = qemu_sgl_concat(req, req->elem.in_sg,
 197                              &req->elem.in_addr[0], req->elem.in_num,
 198                              resp_size);
 199
 200    if (out_size && in_size) {
 201        return -ENOTSUP;
 202    }
 203
 204    if (out_size) {
 205        req->mode = SCSI_XFER_TO_DEV;
 206    } else if (in_size) {
 207        req->mode = SCSI_XFER_FROM_DEV;
 208    }
 209
 210    return 0;
 211}
 212
 213static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
 214{
 215    VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
 216    VirtIOSCSIReq *req;
 217
 218    req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
 219    if (!req) {
 220        return NULL;
 221    }
 222    virtio_scsi_init_req(s, vq, req);
 223    return req;
 224}
 225
 226static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
 227{
 228    VirtIOSCSIReq *req = sreq->hba_private;
 229    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
 230    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
 231    uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
 232
 233    assert(n < vs->conf.num_queues);
 234    qemu_put_be32s(f, &n);
 235    qemu_put_virtqueue_element(vdev, f, &req->elem);
 236}
 237
 238static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
 239{
 240    SCSIBus *bus = sreq->bus;
 241    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
 242    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
 243    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 244    VirtIOSCSIReq *req;
 245    uint32_t n;
 246
 247    qemu_get_be32s(f, &n);
 248    assert(n < vs->conf.num_queues);
 249    req = qemu_get_virtqueue_element(vdev, f,
 250                                     sizeof(VirtIOSCSIReq) + vs->cdb_size);
 251    virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
 252
 253    if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
 254                              sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
 255        error_report("invalid SCSI request migration data");
 256        exit(1);
 257    }
 258
 259    scsi_req_ref(sreq);
 260    req->sreq = sreq;
 261    if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
 262        assert(req->sreq->cmd.mode == req->mode);
 263    }
 264    return req;
 265}
 266
 267typedef struct {
 268    Notifier        notifier;
 269    VirtIOSCSIReq  *tmf_req;
 270} VirtIOSCSICancelNotifier;
 271
 272static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
 273{
 274    VirtIOSCSICancelNotifier *n = container_of(notifier,
 275                                               VirtIOSCSICancelNotifier,
 276                                               notifier);
 277
 278    if (--n->tmf_req->remaining == 0) {
 279        VirtIOSCSIReq *req = n->tmf_req;
 280
 281        trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
 282                                   req->req.tmf.tag, req->resp.tmf.response);
 283        virtio_scsi_complete_req(req);
 284    }
 285    g_free(n);
 286}
 287
 288static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
 289{
 290    if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
 291        assert(blk_get_aio_context(d->conf.blk) == s->ctx);
 292    }
 293}
 294
 295static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
 296{
 297    VirtIOSCSI *s = req->dev;
 298    SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
 299    BusChild *kid;
 300    int target;
 301
 302    switch (req->req.tmf.subtype) {
 303    case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
 304        if (!d) {
 305            req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
 306            goto out;
 307        }
 308        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 309            req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 310            goto out;
 311        }
 312        qatomic_inc(&s->resetting);
 313        device_cold_reset(&d->qdev);
 314        qatomic_dec(&s->resetting);
 315        break;
 316
 317    case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
 318        target = req->req.tmf.lun[1];
 319        qatomic_inc(&s->resetting);
 320
 321        rcu_read_lock();
 322        QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
 323            SCSIDevice *d1 = SCSI_DEVICE(kid->child);
 324            if (d1->channel == 0 && d1->id == target) {
 325                device_cold_reset(&d1->qdev);
 326            }
 327        }
 328        rcu_read_unlock();
 329
 330        qatomic_dec(&s->resetting);
 331        break;
 332
 333    default:
 334        g_assert_not_reached();
 335        break;
 336    }
 337
 338out:
 339    object_unref(OBJECT(d));
 340
 341    virtio_scsi_acquire(s);
 342    virtio_scsi_complete_req(req);
 343    virtio_scsi_release(s);
 344}
 345
 346/* Some TMFs must be processed from the main loop thread */
 347static void virtio_scsi_do_tmf_bh(void *opaque)
 348{
 349    VirtIOSCSI *s = opaque;
 350    QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
 351    VirtIOSCSIReq *req;
 352    VirtIOSCSIReq *tmp;
 353
 354    GLOBAL_STATE_CODE();
 355
 356    virtio_scsi_acquire(s);
 357
 358    QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
 359        QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
 360        QTAILQ_INSERT_TAIL(&reqs, req, next);
 361    }
 362
 363    qemu_bh_delete(s->tmf_bh);
 364    s->tmf_bh = NULL;
 365
 366    virtio_scsi_release(s);
 367
 368    QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
 369        QTAILQ_REMOVE(&reqs, req, next);
 370        virtio_scsi_do_one_tmf_bh(req);
 371    }
 372}
 373
 374static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
 375{
 376    VirtIOSCSIReq *req;
 377    VirtIOSCSIReq *tmp;
 378
 379    GLOBAL_STATE_CODE();
 380
 381    virtio_scsi_acquire(s);
 382
 383    if (s->tmf_bh) {
 384        qemu_bh_delete(s->tmf_bh);
 385        s->tmf_bh = NULL;
 386    }
 387
 388    QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
 389        QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
 390
 391        /* SAM-6 6.3.2 Hard reset */
 392        req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
 393        virtio_scsi_complete_req(req);
 394    }
 395
 396    virtio_scsi_release(s);
 397}
 398
 399static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
 400{
 401    VirtIOSCSI *s = req->dev;
 402
 403    QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
 404
 405    if (!s->tmf_bh) {
 406        s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
 407        qemu_bh_schedule(s->tmf_bh);
 408    }
 409}
 410
 411/* Return 0 if the request is ready to be completed and return to guest;
 412 * -EINPROGRESS if the request is submitted and will be completed later, in the
 413 *  case of async cancellation. */
 414static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
 415{
 416    SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
 417    SCSIRequest *r, *next;
 418    int ret = 0;
 419
 420    virtio_scsi_ctx_check(s, d);
 421    /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE".  */
 422    req->resp.tmf.response = VIRTIO_SCSI_S_OK;
 423
 424    /*
 425     * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
 426     * to avoid compiler errors.
 427     */
 428    req->req.tmf.subtype =
 429        virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
 430
 431    trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
 432                              req->req.tmf.tag, req->req.tmf.subtype);
 433
 434    switch (req->req.tmf.subtype) {
 435    case VIRTIO_SCSI_T_TMF_ABORT_TASK:
 436    case VIRTIO_SCSI_T_TMF_QUERY_TASK:
 437        if (!d) {
 438            goto fail;
 439        }
 440        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 441            goto incorrect_lun;
 442        }
 443        QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
 444            VirtIOSCSIReq *cmd_req = r->hba_private;
 445            if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
 446                break;
 447            }
 448        }
 449        if (r) {
 450            /*
 451             * Assert that the request has not been completed yet, we
 452             * check for it in the loop above.
 453             */
 454            assert(r->hba_private);
 455            if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
 456                /* "If the specified command is present in the task set, then
 457                 * return a service response set to FUNCTION SUCCEEDED".
 458                 */
 459                req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
 460            } else {
 461                VirtIOSCSICancelNotifier *notifier;
 462
 463                req->remaining = 1;
 464                notifier = g_new(VirtIOSCSICancelNotifier, 1);
 465                notifier->tmf_req = req;
 466                notifier->notifier.notify = virtio_scsi_cancel_notify;
 467                scsi_req_cancel_async(r, &notifier->notifier);
 468                ret = -EINPROGRESS;
 469            }
 470        }
 471        break;
 472
 473    case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
 474    case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
 475        virtio_scsi_defer_tmf_to_bh(req);
 476        ret = -EINPROGRESS;
 477        break;
 478
 479    case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
 480    case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
 481    case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
 482        if (!d) {
 483            goto fail;
 484        }
 485        if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
 486            goto incorrect_lun;
 487        }
 488
 489        /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
 490         * This way, if the bus starts calling back to the notifiers
 491         * even before we finish the loop, virtio_scsi_cancel_notify
 492         * will not complete the TMF too early.
 493         */
 494        req->remaining = 1;
 495        QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
 496            if (r->hba_private) {
 497                if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
 498                    /* "If there is any command present in the task set, then
 499                     * return a service response set to FUNCTION SUCCEEDED".
 500                     */
 501                    req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
 502                    break;
 503                } else {
 504                    VirtIOSCSICancelNotifier *notifier;
 505
 506                    req->remaining++;
 507                    notifier = g_new(VirtIOSCSICancelNotifier, 1);
 508                    notifier->notifier.notify = virtio_scsi_cancel_notify;
 509                    notifier->tmf_req = req;
 510                    scsi_req_cancel_async(r, &notifier->notifier);
 511                }
 512            }
 513        }
 514        if (--req->remaining > 0) {
 515            ret = -EINPROGRESS;
 516        }
 517        break;
 518
 519    case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
 520    default:
 521        req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
 522        break;
 523    }
 524
 525    object_unref(OBJECT(d));
 526    return ret;
 527
 528incorrect_lun:
 529    req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 530    object_unref(OBJECT(d));
 531    return ret;
 532
 533fail:
 534    req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
 535    object_unref(OBJECT(d));
 536    return ret;
 537}
 538
 539static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
 540{
 541    VirtIODevice *vdev = (VirtIODevice *)s;
 542    uint32_t type;
 543    int r = 0;
 544
 545    if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
 546                &type, sizeof(type)) < sizeof(type)) {
 547        virtio_scsi_bad_req(req);
 548        return;
 549    }
 550
 551    virtio_tswap32s(vdev, &type);
 552    if (type == VIRTIO_SCSI_T_TMF) {
 553        if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
 554                    sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
 555            virtio_scsi_bad_req(req);
 556            return;
 557        } else {
 558            r = virtio_scsi_do_tmf(s, req);
 559        }
 560
 561    } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
 562               type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
 563        if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
 564                    sizeof(VirtIOSCSICtrlANResp)) < 0) {
 565            virtio_scsi_bad_req(req);
 566            return;
 567        } else {
 568            req->req.an.event_requested =
 569                virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
 570            trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
 571                                     req->req.an.event_requested);
 572            req->resp.an.event_actual = 0;
 573            req->resp.an.response = VIRTIO_SCSI_S_OK;
 574        }
 575    }
 576    if (r == 0) {
 577        if (type == VIRTIO_SCSI_T_TMF)
 578            trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
 579                                       req->req.tmf.tag,
 580                                       req->resp.tmf.response);
 581        else if (type == VIRTIO_SCSI_T_AN_QUERY ||
 582                 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
 583            trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
 584                                      req->resp.an.response);
 585        virtio_scsi_complete_req(req);
 586    } else {
 587        assert(r == -EINPROGRESS);
 588    }
 589}
 590
 591static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
 592{
 593    VirtIOSCSIReq *req;
 594
 595    while ((req = virtio_scsi_pop_req(s, vq))) {
 596        virtio_scsi_handle_ctrl_req(s, req);
 597    }
 598}
 599
 600/*
 601 * If dataplane is configured but not yet started, do so now and return true on
 602 * success.
 603 *
 604 * Dataplane is started by the core virtio code but virtqueue handler functions
 605 * can also be invoked when a guest kicks before DRIVER_OK, so this helper
 606 * function helps us deal with manually starting ioeventfd in that case.
 607 */
 608static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
 609{
 610    if (!s->ctx || s->dataplane_started) {
 611        return false;
 612    }
 613
 614    virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
 615    return !s->dataplane_fenced;
 616}
 617
 618static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 619{
 620    VirtIOSCSI *s = (VirtIOSCSI *)vdev;
 621
 622    if (virtio_scsi_defer_to_dataplane(s)) {
 623        return;
 624    }
 625
 626    virtio_scsi_acquire(s);
 627    virtio_scsi_handle_ctrl_vq(s, vq);
 628    virtio_scsi_release(s);
 629}
 630
 631static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
 632{
 633    trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
 634                               req->req.cmd.tag,
 635                               req->resp.cmd.response,
 636                               req->resp.cmd.status);
 637    /* Sense data is not in req->resp and is copied separately
 638     * in virtio_scsi_command_complete.
 639     */
 640    req->resp_size = sizeof(VirtIOSCSICmdResp);
 641    virtio_scsi_complete_req(req);
 642}
 643
 644static void virtio_scsi_command_failed(SCSIRequest *r)
 645{
 646    VirtIOSCSIReq *req = r->hba_private;
 647
 648    if (r->io_canceled) {
 649        return;
 650    }
 651
 652    req->resp.cmd.status = GOOD;
 653    switch (r->host_status) {
 654    case SCSI_HOST_NO_LUN:
 655        req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
 656        break;
 657    case SCSI_HOST_BUSY:
 658        req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
 659        break;
 660    case SCSI_HOST_TIME_OUT:
 661    case SCSI_HOST_ABORTED:
 662        req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
 663        break;
 664    case SCSI_HOST_BAD_RESPONSE:
 665        req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 666        break;
 667    case SCSI_HOST_RESET:
 668        req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
 669        break;
 670    case SCSI_HOST_TRANSPORT_DISRUPTED:
 671        req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
 672        break;
 673    case SCSI_HOST_TARGET_FAILURE:
 674        req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
 675        break;
 676    case SCSI_HOST_RESERVATION_ERROR:
 677        req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
 678        break;
 679    case SCSI_HOST_ALLOCATION_FAILURE:
 680    case SCSI_HOST_MEDIUM_ERROR:
 681    case SCSI_HOST_ERROR:
 682    default:
 683        req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
 684        break;
 685    }
 686    virtio_scsi_complete_cmd_req(req);
 687}
 688
 689static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
 690{
 691    VirtIOSCSIReq *req = r->hba_private;
 692    uint8_t sense[SCSI_SENSE_BUF_SIZE];
 693    uint32_t sense_len;
 694    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
 695
 696    if (r->io_canceled) {
 697        return;
 698    }
 699
 700    req->resp.cmd.response = VIRTIO_SCSI_S_OK;
 701    req->resp.cmd.status = r->status;
 702    if (req->resp.cmd.status == GOOD) {
 703        req->resp.cmd.resid = virtio_tswap32(vdev, resid);
 704    } else {
 705        req->resp.cmd.resid = 0;
 706        sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
 707        sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
 708        qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
 709                            sense, sense_len);
 710        req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
 711    }
 712    virtio_scsi_complete_cmd_req(req);
 713}
 714
 715static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
 716                                 uint8_t *buf, size_t buf_len,
 717                                 void *hba_private)
 718{
 719    VirtIOSCSIReq *req = hba_private;
 720
 721    if (cmd->len == 0) {
 722        cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
 723        memcpy(cmd->buf, buf, cmd->len);
 724    }
 725
 726    /* Extract the direction and mode directly from the request, for
 727     * host device passthrough.
 728     */
 729    cmd->xfer = req->qsgl.size;
 730    cmd->mode = req->mode;
 731    return 0;
 732}
 733
 734static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
 735{
 736    VirtIOSCSIReq *req = r->hba_private;
 737
 738    return &req->qsgl;
 739}
 740
 741static void virtio_scsi_request_cancelled(SCSIRequest *r)
 742{
 743    VirtIOSCSIReq *req = r->hba_private;
 744
 745    if (!req) {
 746        return;
 747    }
 748    if (qatomic_read(&req->dev->resetting)) {
 749        req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
 750    } else {
 751        req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
 752    }
 753    virtio_scsi_complete_cmd_req(req);
 754}
 755
 756static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
 757{
 758    req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
 759    virtio_scsi_complete_cmd_req(req);
 760}
 761
 762static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
 763{
 764    VirtIOSCSICommon *vs = &s->parent_obj;
 765    SCSIDevice *d;
 766    int rc;
 767
 768    rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
 769                               sizeof(VirtIOSCSICmdResp) + vs->sense_size);
 770    if (rc < 0) {
 771        if (rc == -ENOTSUP) {
 772            virtio_scsi_fail_cmd_req(req);
 773            return -ENOTSUP;
 774        } else {
 775            virtio_scsi_bad_req(req);
 776            return -EINVAL;
 777        }
 778    }
 779    trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
 780                              req->req.cmd.tag, req->req.cmd.cdb[0]);
 781
 782    d = virtio_scsi_device_get(s, req->req.cmd.lun);
 783    if (!d) {
 784        req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 785        virtio_scsi_complete_cmd_req(req);
 786        return -ENOENT;
 787    }
 788    virtio_scsi_ctx_check(s, d);
 789    req->sreq = scsi_req_new(d, req->req.cmd.tag,
 790                             virtio_scsi_get_lun(req->req.cmd.lun),
 791                             req->req.cmd.cdb, vs->cdb_size, req);
 792
 793    if (req->sreq->cmd.mode != SCSI_XFER_NONE
 794        && (req->sreq->cmd.mode != req->mode ||
 795            req->sreq->cmd.xfer > req->qsgl.size)) {
 796        req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
 797        virtio_scsi_complete_cmd_req(req);
 798        object_unref(OBJECT(d));
 799        return -ENOBUFS;
 800    }
 801    scsi_req_ref(req->sreq);
 802    blk_io_plug();
 803    object_unref(OBJECT(d));
 804    return 0;
 805}
 806
 807static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
 808{
 809    SCSIRequest *sreq = req->sreq;
 810    if (scsi_req_enqueue(sreq)) {
 811        scsi_req_continue(sreq);
 812    }
 813    blk_io_unplug();
 814    scsi_req_unref(sreq);
 815}
 816
 817static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
 818{
 819    VirtIOSCSIReq *req, *next;
 820    int ret = 0;
 821    bool suppress_notifications = virtio_queue_get_notification(vq);
 822
 823    QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
 824
 825    do {
 826        if (suppress_notifications) {
 827            virtio_queue_set_notification(vq, 0);
 828        }
 829
 830        while ((req = virtio_scsi_pop_req(s, vq))) {
 831            ret = virtio_scsi_handle_cmd_req_prepare(s, req);
 832            if (!ret) {
 833                QTAILQ_INSERT_TAIL(&reqs, req, next);
 834            } else if (ret == -EINVAL) {
 835                /* The device is broken and shouldn't process any request */
 836                while (!QTAILQ_EMPTY(&reqs)) {
 837                    req = QTAILQ_FIRST(&reqs);
 838                    QTAILQ_REMOVE(&reqs, req, next);
 839                    blk_io_unplug();
 840                    scsi_req_unref(req->sreq);
 841                    virtqueue_detach_element(req->vq, &req->elem, 0);
 842                    virtio_scsi_free_req(req);
 843                }
 844            }
 845        }
 846
 847        if (suppress_notifications) {
 848            virtio_queue_set_notification(vq, 1);
 849        }
 850    } while (ret != -EINVAL && !virtio_queue_empty(vq));
 851
 852    QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
 853        virtio_scsi_handle_cmd_req_submit(s, req);
 854    }
 855}
 856
 857static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
 858{
 859    /* use non-QOM casts in the data path */
 860    VirtIOSCSI *s = (VirtIOSCSI *)vdev;
 861
 862    if (virtio_scsi_defer_to_dataplane(s)) {
 863        return;
 864    }
 865
 866    virtio_scsi_acquire(s);
 867    virtio_scsi_handle_cmd_vq(s, vq);
 868    virtio_scsi_release(s);
 869}
 870
 871static void virtio_scsi_get_config(VirtIODevice *vdev,
 872                                   uint8_t *config)
 873{
 874    VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
 875    VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
 876
 877    virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
 878    virtio_stl_p(vdev, &scsiconf->seg_max,
 879                 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
 880    virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
 881    virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
 882    virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
 883    virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
 884    virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
 885    virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
 886    virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
 887    virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
 888}
 889
 890static void virtio_scsi_set_config(VirtIODevice *vdev,
 891                                   const uint8_t *config)
 892{
 893    VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
 894    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
 895
 896    if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
 897        (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
 898        virtio_error(vdev,
 899                     "bad data written to virtio-scsi configuration space");
 900        return;
 901    }
 902
 903    vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
 904    vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
 905}
 906
 907static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
 908                                         uint64_t requested_features,
 909                                         Error **errp)
 910{
 911    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
 912
 913    /* Firstly sync all virtio-scsi possible supported features */
 914    requested_features |= s->host_features;
 915    return requested_features;
 916}
 917
 918static void virtio_scsi_reset(VirtIODevice *vdev)
 919{
 920    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
 921    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
 922
 923    assert(!s->dataplane_started);
 924
 925    virtio_scsi_reset_tmf_bh(s);
 926
 927    qatomic_inc(&s->resetting);
 928    bus_cold_reset(BUS(&s->bus));
 929    qatomic_dec(&s->resetting);
 930
 931    vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
 932    vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
 933    s->events_dropped = false;
 934}
 935
 936typedef struct {
 937    uint32_t event;
 938    uint32_t reason;
 939    union {
 940        /* Used by messages specific to a device */
 941        struct {
 942            uint32_t id;
 943            uint32_t lun;
 944        } address;
 945    };
 946} VirtIOSCSIEventInfo;
 947
 948static void virtio_scsi_push_event(VirtIOSCSI *s,
 949                                   const VirtIOSCSIEventInfo *info)
 950{
 951    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
 952    VirtIOSCSIReq *req;
 953    VirtIOSCSIEvent *evt;
 954    VirtIODevice *vdev = VIRTIO_DEVICE(s);
 955    uint32_t event = info->event;
 956    uint32_t reason = info->reason;
 957
 958    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
 959        return;
 960    }
 961
 962    req = virtio_scsi_pop_req(s, vs->event_vq);
 963    if (!req) {
 964        s->events_dropped = true;
 965        return;
 966    }
 967
 968    if (s->events_dropped) {
 969        event |= VIRTIO_SCSI_T_EVENTS_MISSED;
 970        s->events_dropped = false;
 971    }
 972
 973    if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
 974        virtio_scsi_bad_req(req);
 975        return;
 976    }
 977
 978    evt = &req->resp.event;
 979    memset(evt, 0, sizeof(VirtIOSCSIEvent));
 980    evt->event = virtio_tswap32(vdev, event);
 981    evt->reason = virtio_tswap32(vdev, reason);
 982    if (event != VIRTIO_SCSI_T_EVENTS_MISSED) {
 983        evt->lun[0] = 1;
 984        evt->lun[1] = info->address.id;
 985
 986        /* Linux wants us to keep the same encoding we use for REPORT LUNS.  */
 987        if (info->address.lun >= 256) {
 988            evt->lun[2] = (info->address.lun >> 8) | 0x40;
 989        }
 990        evt->lun[3] = info->address.lun & 0xFF;
 991    }
 992    trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
 993
 994    virtio_scsi_complete_req(req);
 995}
 996
 997static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
 998{
 999    if (s->events_dropped) {
1000        VirtIOSCSIEventInfo info = {
1001            .event = VIRTIO_SCSI_T_NO_EVENT,
1002        };
1003        virtio_scsi_push_event(s, &info);
1004    }
1005}
1006
1007static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
1008{
1009    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1010
1011    if (virtio_scsi_defer_to_dataplane(s)) {
1012        return;
1013    }
1014
1015    virtio_scsi_acquire(s);
1016    virtio_scsi_handle_event_vq(s, vq);
1017    virtio_scsi_release(s);
1018}
1019
1020static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
1021{
1022    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1023    VirtIODevice *vdev = VIRTIO_DEVICE(s);
1024
1025    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
1026        dev->type != TYPE_ROM) {
1027        VirtIOSCSIEventInfo info = {
1028            .event   = VIRTIO_SCSI_T_PARAM_CHANGE,
1029            .reason  = sense.asc | (sense.ascq << 8),
1030            .address = {
1031                .id  = dev->id,
1032                .lun = dev->lun,
1033            },
1034        };
1035
1036        virtio_scsi_acquire(s);
1037        virtio_scsi_push_event(s, &info);
1038        virtio_scsi_release(s);
1039    }
1040}
1041
1042static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
1043                                    DeviceState *dev, Error **errp)
1044{
1045    SCSIDevice *sd = SCSI_DEVICE(dev);
1046    sd->hba_supports_iothread = true;
1047}
1048
1049static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1050                                Error **errp)
1051{
1052    VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1053    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1054    SCSIDevice *sd = SCSI_DEVICE(dev);
1055    AioContext *old_context;
1056    int ret;
1057
1058    if (s->ctx && !s->dataplane_fenced) {
1059        if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
1060            return;
1061        }
1062        old_context = blk_get_aio_context(sd->conf.blk);
1063        aio_context_acquire(old_context);
1064        ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
1065        aio_context_release(old_context);
1066        if (ret < 0) {
1067            return;
1068        }
1069    }
1070
1071    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1072        VirtIOSCSIEventInfo info = {
1073            .event   = VIRTIO_SCSI_T_TRANSPORT_RESET,
1074            .reason  = VIRTIO_SCSI_EVT_RESET_RESCAN,
1075            .address = {
1076                .id  = sd->id,
1077                .lun = sd->lun,
1078            },
1079        };
1080
1081        virtio_scsi_acquire(s);
1082        virtio_scsi_push_event(s, &info);
1083        scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1084        virtio_scsi_release(s);
1085    }
1086}
1087
1088static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1089                                  Error **errp)
1090{
1091    VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1092    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1093    SCSIDevice *sd = SCSI_DEVICE(dev);
1094    VirtIOSCSIEventInfo info = {
1095        .event   = VIRTIO_SCSI_T_TRANSPORT_RESET,
1096        .reason  = VIRTIO_SCSI_EVT_RESET_REMOVED,
1097        .address = {
1098            .id  = sd->id,
1099            .lun = sd->lun,
1100        },
1101    };
1102
1103    qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
1104
1105    if (s->ctx) {
1106        virtio_scsi_acquire(s);
1107        /* If other users keep the BlockBackend in the iothread, that's ok */
1108        blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
1109        virtio_scsi_release(s);
1110    }
1111
1112    if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1113        virtio_scsi_acquire(s);
1114        virtio_scsi_push_event(s, &info);
1115        scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1116        virtio_scsi_release(s);
1117    }
1118}
1119
1120/* Suspend virtqueue ioeventfd processing during drain */
1121static void virtio_scsi_drained_begin(SCSIBus *bus)
1122{
1123    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1124    VirtIODevice *vdev = VIRTIO_DEVICE(s);
1125    uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
1126                            s->parent_obj.conf.num_queues;
1127
1128    /*
1129     * Drain is called when stopping dataplane but the host notifier has
1130     * already been detached. Detaching multiple times is a no-op if nothing
1131     * else is using the monitoring same file descriptor, but avoid it just in
1132     * case.
1133     *
1134     * Also, don't detach if dataplane has not even been started yet because
1135     * the host notifier isn't attached.
1136     */
1137    if (s->dataplane_stopping || !s->dataplane_started) {
1138        return;
1139    }
1140
1141    for (uint32_t i = 0; i < total_queues; i++) {
1142        VirtQueue *vq = virtio_get_queue(vdev, i);
1143        virtio_queue_aio_detach_host_notifier(vq, s->ctx);
1144    }
1145}
1146
1147/* Resume virtqueue ioeventfd processing after drain */
1148static void virtio_scsi_drained_end(SCSIBus *bus)
1149{
1150    VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1151    VirtIODevice *vdev = VIRTIO_DEVICE(s);
1152    uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
1153                            s->parent_obj.conf.num_queues;
1154
1155    /*
1156     * Drain is called when stopping dataplane. Keep the host notifier detached
1157     * so it's not left dangling after dataplane is stopped.
1158     *
1159     * Also, don't attach if dataplane has not even been started yet. We're not
1160     * ready.
1161     */
1162    if (s->dataplane_stopping || !s->dataplane_started) {
1163        return;
1164    }
1165
1166    for (uint32_t i = 0; i < total_queues; i++) {
1167        VirtQueue *vq = virtio_get_queue(vdev, i);
1168        virtio_queue_aio_attach_host_notifier(vq, s->ctx);
1169    }
1170}
1171
1172static struct SCSIBusInfo virtio_scsi_scsi_info = {
1173    .tcq = true,
1174    .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
1175    .max_target = VIRTIO_SCSI_MAX_TARGET,
1176    .max_lun = VIRTIO_SCSI_MAX_LUN,
1177
1178    .complete = virtio_scsi_command_complete,
1179    .fail = virtio_scsi_command_failed,
1180    .cancel = virtio_scsi_request_cancelled,
1181    .change = virtio_scsi_change,
1182    .parse_cdb = virtio_scsi_parse_cdb,
1183    .get_sg_list = virtio_scsi_get_sg_list,
1184    .save_request = virtio_scsi_save_request,
1185    .load_request = virtio_scsi_load_request,
1186    .drained_begin = virtio_scsi_drained_begin,
1187    .drained_end = virtio_scsi_drained_end,
1188};
1189
1190void virtio_scsi_common_realize(DeviceState *dev,
1191                                VirtIOHandleOutput ctrl,
1192                                VirtIOHandleOutput evt,
1193                                VirtIOHandleOutput cmd,
1194                                Error **errp)
1195{
1196    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1197    VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
1198    int i;
1199
1200    virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
1201
1202    if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
1203        s->conf.num_queues = 1;
1204    }
1205    if (s->conf.num_queues == 0 ||
1206            s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
1207        error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1208                         "must be a positive integer less than %d.",
1209                   s->conf.num_queues,
1210                   VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
1211        virtio_cleanup(vdev);
1212        return;
1213    }
1214    if (s->conf.virtqueue_size <= 2) {
1215        error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
1216                   "must be > 2", s->conf.virtqueue_size);
1217        return;
1218    }
1219    s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
1220    s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1221    s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1222
1223    s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
1224    s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
1225    for (i = 0; i < s->conf.num_queues; i++) {
1226        s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
1227    }
1228}
1229
1230static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
1231{
1232    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1233    VirtIOSCSI *s = VIRTIO_SCSI(dev);
1234    Error *err = NULL;
1235
1236    QTAILQ_INIT(&s->tmf_bh_list);
1237
1238    virtio_scsi_common_realize(dev,
1239                               virtio_scsi_handle_ctrl,
1240                               virtio_scsi_handle_event,
1241                               virtio_scsi_handle_cmd,
1242                               &err);
1243    if (err != NULL) {
1244        error_propagate(errp, err);
1245        return;
1246    }
1247
1248    scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
1249                       &virtio_scsi_scsi_info, vdev->bus_name);
1250    /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
1251    qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
1252
1253    virtio_scsi_dataplane_setup(s, errp);
1254}
1255
1256void virtio_scsi_common_unrealize(DeviceState *dev)
1257{
1258    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1259    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
1260    int i;
1261
1262    virtio_delete_queue(vs->ctrl_vq);
1263    virtio_delete_queue(vs->event_vq);
1264    for (i = 0; i < vs->conf.num_queues; i++) {
1265        virtio_delete_queue(vs->cmd_vqs[i]);
1266    }
1267    g_free(vs->cmd_vqs);
1268    virtio_cleanup(vdev);
1269}
1270
1271static void virtio_scsi_device_unrealize(DeviceState *dev)
1272{
1273    VirtIOSCSI *s = VIRTIO_SCSI(dev);
1274
1275    virtio_scsi_reset_tmf_bh(s);
1276
1277    qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1278    virtio_scsi_common_unrealize(dev);
1279}
1280
1281static Property virtio_scsi_properties[] = {
1282    DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1283                       VIRTIO_SCSI_AUTO_NUM_QUEUES),
1284    DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1285                                         parent_obj.conf.virtqueue_size, 256),
1286    DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1287                      parent_obj.conf.seg_max_adjust, true),
1288    DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1289                                                  0xFFFF),
1290    DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1291                                                  128),
1292    DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1293                                           VIRTIO_SCSI_F_HOTPLUG, true),
1294    DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1295                                                VIRTIO_SCSI_F_CHANGE, true),
1296    DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1297                     TYPE_IOTHREAD, IOThread *),
1298    DEFINE_PROP_END_OF_LIST(),
1299};
1300
1301static const VMStateDescription vmstate_virtio_scsi = {
1302    .name = "virtio-scsi",
1303    .minimum_version_id = 1,
1304    .version_id = 1,
1305    .fields = (VMStateField[]) {
1306        VMSTATE_VIRTIO_DEVICE,
1307        VMSTATE_END_OF_LIST()
1308    },
1309};
1310
1311static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1312{
1313    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1314    DeviceClass *dc = DEVICE_CLASS(klass);
1315
1316    vdc->get_config = virtio_scsi_get_config;
1317    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1318}
1319
1320static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1321{
1322    DeviceClass *dc = DEVICE_CLASS(klass);
1323    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1324    HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1325
1326    device_class_set_props(dc, virtio_scsi_properties);
1327    dc->vmsd = &vmstate_virtio_scsi;
1328    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1329    vdc->realize = virtio_scsi_device_realize;
1330    vdc->unrealize = virtio_scsi_device_unrealize;
1331    vdc->set_config = virtio_scsi_set_config;
1332    vdc->get_features = virtio_scsi_get_features;
1333    vdc->reset = virtio_scsi_reset;
1334    vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1335    vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1336    hc->pre_plug = virtio_scsi_pre_hotplug;
1337    hc->plug = virtio_scsi_hotplug;
1338    hc->unplug = virtio_scsi_hotunplug;
1339}
1340
1341static const TypeInfo virtio_scsi_common_info = {
1342    .name = TYPE_VIRTIO_SCSI_COMMON,
1343    .parent = TYPE_VIRTIO_DEVICE,
1344    .instance_size = sizeof(VirtIOSCSICommon),
1345    .abstract = true,
1346    .class_init = virtio_scsi_common_class_init,
1347};
1348
1349static const TypeInfo virtio_scsi_info = {
1350    .name = TYPE_VIRTIO_SCSI,
1351    .parent = TYPE_VIRTIO_SCSI_COMMON,
1352    .instance_size = sizeof(VirtIOSCSI),
1353    .class_init = virtio_scsi_class_init,
1354    .interfaces = (InterfaceInfo[]) {
1355        { TYPE_HOTPLUG_HANDLER },
1356        { }
1357    }
1358};
1359
1360static void virtio_register_types(void)
1361{
1362    type_register_static(&virtio_scsi_common_info);
1363    type_register_static(&virtio_scsi_info);
1364}
1365
1366type_init(virtio_register_types)
1367