qemu/hw/virtio/virtio-mmio.c
<<
>>
Prefs
   1/*
   2 * Virtio MMIO bindings
   3 *
   4 * Copyright (c) 2011 Linaro Limited
   5 *
   6 * Author:
   7 *  Peter Maydell <peter.maydell@linaro.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License along
  19 * with this program; if not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include "hw/sysbus.h"
  24#include "hw/virtio/virtio.h"
  25#include "qemu/host-utils.h"
  26#include "sysemu/kvm.h"
  27#include "hw/virtio/virtio-bus.h"
  28#include "qemu/error-report.h"
  29
  30/* #define DEBUG_VIRTIO_MMIO */
  31
  32#ifdef DEBUG_VIRTIO_MMIO
  33
  34#define DPRINTF(fmt, ...) \
  35do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
  36#else
  37#define DPRINTF(fmt, ...) do {} while (0)
  38#endif
  39
  40/* QOM macros */
  41/* virtio-mmio-bus */
  42#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
  43#define VIRTIO_MMIO_BUS(obj) \
  44        OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
  45#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
  46        OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
  47#define VIRTIO_MMIO_BUS_CLASS(klass) \
  48        OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
  49
  50/* virtio-mmio */
  51#define TYPE_VIRTIO_MMIO "virtio-mmio"
  52#define VIRTIO_MMIO(obj) \
  53        OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
  54
  55/* Memory mapped register offsets */
  56#define VIRTIO_MMIO_MAGIC 0x0
  57#define VIRTIO_MMIO_VERSION 0x4
  58#define VIRTIO_MMIO_DEVICEID 0x8
  59#define VIRTIO_MMIO_VENDORID 0xc
  60#define VIRTIO_MMIO_HOSTFEATURES 0x10
  61#define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
  62#define VIRTIO_MMIO_GUESTFEATURES 0x20
  63#define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
  64#define VIRTIO_MMIO_GUESTPAGESIZE 0x28
  65#define VIRTIO_MMIO_QUEUESEL 0x30
  66#define VIRTIO_MMIO_QUEUENUMMAX 0x34
  67#define VIRTIO_MMIO_QUEUENUM 0x38
  68#define VIRTIO_MMIO_QUEUEALIGN 0x3c
  69#define VIRTIO_MMIO_QUEUEPFN 0x40
  70#define VIRTIO_MMIO_QUEUENOTIFY 0x50
  71#define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
  72#define VIRTIO_MMIO_INTERRUPTACK 0x64
  73#define VIRTIO_MMIO_STATUS 0x70
  74/* Device specific config space starts here */
  75#define VIRTIO_MMIO_CONFIG 0x100
  76
  77#define VIRT_MAGIC 0x74726976 /* 'virt' */
  78#define VIRT_VERSION 1
  79#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
  80
  81typedef struct {
  82    /* Generic */
  83    SysBusDevice parent_obj;
  84    MemoryRegion iomem;
  85    qemu_irq irq;
  86    /* Guest accessible state needing migration and reset */
  87    uint32_t host_features_sel;
  88    uint32_t guest_features_sel;
  89    uint32_t guest_page_shift;
  90    /* virtio-bus */
  91    VirtioBusState bus;
  92    bool ioeventfd_disabled;
  93    bool ioeventfd_started;
  94} VirtIOMMIOProxy;
  95
  96static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
  97                                                  int n, bool assign,
  98                                                  bool set_handler)
  99{
 100    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 101    VirtQueue *vq = virtio_get_queue(vdev, n);
 102    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
 103    int r = 0;
 104
 105    if (assign) {
 106        r = event_notifier_init(notifier, 1);
 107        if (r < 0) {
 108            error_report("%s: unable to init event notifier: %d",
 109                         __func__, r);
 110            return r;
 111        }
 112        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
 113        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
 114                                  true, n, notifier);
 115    } else {
 116        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
 117                                  true, n, notifier);
 118        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
 119        event_notifier_cleanup(notifier);
 120    }
 121    return r;
 122}
 123
 124static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
 125{
 126    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 127    int n, r;
 128
 129    if (!kvm_eventfds_enabled() ||
 130        proxy->ioeventfd_disabled ||
 131        proxy->ioeventfd_started) {
 132        return;
 133    }
 134
 135    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
 136        if (!virtio_queue_get_num(vdev, n)) {
 137            continue;
 138        }
 139
 140        r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
 141        if (r < 0) {
 142            goto assign_error;
 143        }
 144    }
 145    proxy->ioeventfd_started = true;
 146    return;
 147
 148assign_error:
 149    while (--n >= 0) {
 150        if (!virtio_queue_get_num(vdev, n)) {
 151            continue;
 152        }
 153
 154        r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
 155        assert(r >= 0);
 156    }
 157    proxy->ioeventfd_started = false;
 158    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
 159}
 160
 161static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
 162{
 163    int r;
 164    int n;
 165    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 166
 167    if (!proxy->ioeventfd_started) {
 168        return;
 169    }
 170
 171    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
 172        if (!virtio_queue_get_num(vdev, n)) {
 173            continue;
 174        }
 175
 176        r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
 177        assert(r >= 0);
 178    }
 179    proxy->ioeventfd_started = false;
 180}
 181
 182static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
 183{
 184    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
 185    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 186
 187    DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
 188
 189    if (!vdev) {
 190        /* If no backend is present, we treat most registers as
 191         * read-as-zero, except for the magic number, version and
 192         * vendor ID. This is not strictly sanctioned by the virtio
 193         * spec, but it allows us to provide transports with no backend
 194         * plugged in which don't confuse Linux's virtio code: the
 195         * probe won't complain about the bad magic number, but the
 196         * device ID of zero means no backend will claim it.
 197         */
 198        switch (offset) {
 199        case VIRTIO_MMIO_MAGIC:
 200            return VIRT_MAGIC;
 201        case VIRTIO_MMIO_VERSION:
 202            return VIRT_VERSION;
 203        case VIRTIO_MMIO_VENDORID:
 204            return VIRT_VENDOR;
 205        default:
 206            return 0;
 207        }
 208    }
 209
 210    if (offset >= VIRTIO_MMIO_CONFIG) {
 211        offset -= VIRTIO_MMIO_CONFIG;
 212        switch (size) {
 213        case 1:
 214            return virtio_config_readb(vdev, offset);
 215        case 2:
 216            return virtio_config_readw(vdev, offset);
 217        case 4:
 218            return virtio_config_readl(vdev, offset);
 219        default:
 220            abort();
 221        }
 222    }
 223    if (size != 4) {
 224        DPRINTF("wrong size access to register!\n");
 225        return 0;
 226    }
 227    switch (offset) {
 228    case VIRTIO_MMIO_MAGIC:
 229        return VIRT_MAGIC;
 230    case VIRTIO_MMIO_VERSION:
 231        return VIRT_VERSION;
 232    case VIRTIO_MMIO_DEVICEID:
 233        return vdev->device_id;
 234    case VIRTIO_MMIO_VENDORID:
 235        return VIRT_VENDOR;
 236    case VIRTIO_MMIO_HOSTFEATURES:
 237        if (proxy->host_features_sel) {
 238            return 0;
 239        }
 240        return vdev->host_features;
 241    case VIRTIO_MMIO_QUEUENUMMAX:
 242        if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
 243            return 0;
 244        }
 245        return VIRTQUEUE_MAX_SIZE;
 246    case VIRTIO_MMIO_QUEUEPFN:
 247        return virtio_queue_get_addr(vdev, vdev->queue_sel)
 248            >> proxy->guest_page_shift;
 249    case VIRTIO_MMIO_INTERRUPTSTATUS:
 250        return vdev->isr;
 251    case VIRTIO_MMIO_STATUS:
 252        return vdev->status;
 253    case VIRTIO_MMIO_HOSTFEATURESSEL:
 254    case VIRTIO_MMIO_GUESTFEATURES:
 255    case VIRTIO_MMIO_GUESTFEATURESSEL:
 256    case VIRTIO_MMIO_GUESTPAGESIZE:
 257    case VIRTIO_MMIO_QUEUESEL:
 258    case VIRTIO_MMIO_QUEUENUM:
 259    case VIRTIO_MMIO_QUEUEALIGN:
 260    case VIRTIO_MMIO_QUEUENOTIFY:
 261    case VIRTIO_MMIO_INTERRUPTACK:
 262        DPRINTF("read of write-only register\n");
 263        return 0;
 264    default:
 265        DPRINTF("bad register offset\n");
 266        return 0;
 267    }
 268    return 0;
 269}
 270
 271static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
 272                              unsigned size)
 273{
 274    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
 275    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 276
 277    DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
 278            (int)offset, value);
 279
 280    if (!vdev) {
 281        /* If no backend is present, we just make all registers
 282         * write-ignored. This allows us to provide transports with
 283         * no backend plugged in.
 284         */
 285        return;
 286    }
 287
 288    if (offset >= VIRTIO_MMIO_CONFIG) {
 289        offset -= VIRTIO_MMIO_CONFIG;
 290        switch (size) {
 291        case 1:
 292            virtio_config_writeb(vdev, offset, value);
 293            break;
 294        case 2:
 295            virtio_config_writew(vdev, offset, value);
 296            break;
 297        case 4:
 298            virtio_config_writel(vdev, offset, value);
 299            break;
 300        default:
 301            abort();
 302        }
 303        return;
 304    }
 305    if (size != 4) {
 306        DPRINTF("wrong size access to register!\n");
 307        return;
 308    }
 309    switch (offset) {
 310    case VIRTIO_MMIO_HOSTFEATURESSEL:
 311        proxy->host_features_sel = value;
 312        break;
 313    case VIRTIO_MMIO_GUESTFEATURES:
 314        if (!proxy->guest_features_sel) {
 315            virtio_set_features(vdev, value);
 316        }
 317        break;
 318    case VIRTIO_MMIO_GUESTFEATURESSEL:
 319        proxy->guest_features_sel = value;
 320        break;
 321    case VIRTIO_MMIO_GUESTPAGESIZE:
 322        proxy->guest_page_shift = ctz32(value);
 323        if (proxy->guest_page_shift > 31) {
 324            proxy->guest_page_shift = 0;
 325        }
 326        DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
 327                proxy->guest_page_shift);
 328        break;
 329    case VIRTIO_MMIO_QUEUESEL:
 330        if (value < VIRTIO_QUEUE_MAX) {
 331            vdev->queue_sel = value;
 332        }
 333        break;
 334    case VIRTIO_MMIO_QUEUENUM:
 335        DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
 336        virtio_queue_set_num(vdev, vdev->queue_sel, value);
 337        /* Note: only call this function for legacy devices */
 338        virtio_queue_update_rings(vdev, vdev->queue_sel);
 339        break;
 340    case VIRTIO_MMIO_QUEUEALIGN:
 341        /* Note: this is only valid for legacy devices */
 342        virtio_queue_set_align(vdev, vdev->queue_sel, value);
 343        break;
 344    case VIRTIO_MMIO_QUEUEPFN:
 345        if (value == 0) {
 346            virtio_reset(vdev);
 347        } else {
 348            virtio_queue_set_addr(vdev, vdev->queue_sel,
 349                                  value << proxy->guest_page_shift);
 350        }
 351        break;
 352    case VIRTIO_MMIO_QUEUENOTIFY:
 353        if (value < VIRTIO_QUEUE_MAX) {
 354            virtio_queue_notify(vdev, value);
 355        }
 356        break;
 357    case VIRTIO_MMIO_INTERRUPTACK:
 358        vdev->isr &= ~value;
 359        virtio_update_irq(vdev);
 360        break;
 361    case VIRTIO_MMIO_STATUS:
 362        if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
 363            virtio_mmio_stop_ioeventfd(proxy);
 364        }
 365
 366        virtio_set_status(vdev, value & 0xff);
 367
 368        if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
 369            virtio_mmio_start_ioeventfd(proxy);
 370        }
 371
 372        if (vdev->status == 0) {
 373            virtio_reset(vdev);
 374        }
 375        break;
 376    case VIRTIO_MMIO_MAGIC:
 377    case VIRTIO_MMIO_VERSION:
 378    case VIRTIO_MMIO_DEVICEID:
 379    case VIRTIO_MMIO_VENDORID:
 380    case VIRTIO_MMIO_HOSTFEATURES:
 381    case VIRTIO_MMIO_QUEUENUMMAX:
 382    case VIRTIO_MMIO_INTERRUPTSTATUS:
 383        DPRINTF("write to readonly register\n");
 384        break;
 385
 386    default:
 387        DPRINTF("bad register offset\n");
 388    }
 389}
 390
 391static const MemoryRegionOps virtio_mem_ops = {
 392    .read = virtio_mmio_read,
 393    .write = virtio_mmio_write,
 394    .endianness = DEVICE_NATIVE_ENDIAN,
 395};
 396
 397static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
 398{
 399    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 400    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 401    int level;
 402
 403    if (!vdev) {
 404        return;
 405    }
 406    level = (vdev->isr != 0);
 407    DPRINTF("virtio_mmio setting IRQ %d\n", level);
 408    qemu_set_irq(proxy->irq, level);
 409}
 410
 411static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
 412{
 413    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 414
 415    proxy->host_features_sel = qemu_get_be32(f);
 416    proxy->guest_features_sel = qemu_get_be32(f);
 417    proxy->guest_page_shift = qemu_get_be32(f);
 418    return 0;
 419}
 420
 421static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
 422{
 423    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 424
 425    qemu_put_be32(f, proxy->host_features_sel);
 426    qemu_put_be32(f, proxy->guest_features_sel);
 427    qemu_put_be32(f, proxy->guest_page_shift);
 428}
 429
 430static void virtio_mmio_reset(DeviceState *d)
 431{
 432    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 433
 434    virtio_mmio_stop_ioeventfd(proxy);
 435    virtio_bus_reset(&proxy->bus);
 436    proxy->host_features_sel = 0;
 437    proxy->guest_features_sel = 0;
 438    proxy->guest_page_shift = 0;
 439}
 440
 441static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
 442                                          bool with_irqfd)
 443{
 444    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 445    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 446    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 447    VirtQueue *vq = virtio_get_queue(vdev, n);
 448    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
 449
 450    if (assign) {
 451        int r = event_notifier_init(notifier, 0);
 452        if (r < 0) {
 453            return r;
 454        }
 455        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
 456    } else {
 457        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
 458        event_notifier_cleanup(notifier);
 459    }
 460
 461    if (vdc->guest_notifier_mask) {
 462        vdc->guest_notifier_mask(vdev, n, !assign);
 463    }
 464
 465    return 0;
 466}
 467
 468static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
 469                                           bool assign)
 470{
 471    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 472    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 473    /* TODO: need to check if kvm-arm supports irqfd */
 474    bool with_irqfd = false;
 475    int r, n;
 476
 477    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
 478
 479    for (n = 0; n < nvqs; n++) {
 480        if (!virtio_queue_get_num(vdev, n)) {
 481            break;
 482        }
 483
 484        r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
 485        if (r < 0) {
 486            goto assign_error;
 487        }
 488    }
 489
 490    return 0;
 491
 492assign_error:
 493    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
 494    assert(assign);
 495    while (--n >= 0) {
 496        virtio_mmio_set_guest_notifier(d, n, !assign, false);
 497    }
 498    return r;
 499}
 500
 501static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
 502                                         bool assign)
 503{
 504    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 505
 506    /* Stop using ioeventfd for virtqueue kick if the device starts using host
 507     * notifiers.  This makes it easy to avoid stepping on each others' toes.
 508     */
 509    proxy->ioeventfd_disabled = assign;
 510    if (assign) {
 511        virtio_mmio_stop_ioeventfd(proxy);
 512    }
 513    /* We don't need to start here: it's not needed because backend
 514     * currently only stops on status change away from ok,
 515     * reset, vmstop and such. If we do add code to start here,
 516     * need to check vmstate, device state etc. */
 517    return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
 518}
 519
 520/* virtio-mmio device */
 521
 522static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
 523{
 524    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 525    SysBusDevice *sbd = SYS_BUS_DEVICE(d);
 526
 527    qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
 528                        d, NULL);
 529    sysbus_init_irq(sbd, &proxy->irq);
 530    memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
 531                          TYPE_VIRTIO_MMIO, 0x200);
 532    sysbus_init_mmio(sbd, &proxy->iomem);
 533}
 534
 535static void virtio_mmio_class_init(ObjectClass *klass, void *data)
 536{
 537    DeviceClass *dc = DEVICE_CLASS(klass);
 538
 539    dc->realize = virtio_mmio_realizefn;
 540    dc->reset = virtio_mmio_reset;
 541    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 542}
 543
 544static const TypeInfo virtio_mmio_info = {
 545    .name          = TYPE_VIRTIO_MMIO,
 546    .parent        = TYPE_SYS_BUS_DEVICE,
 547    .instance_size = sizeof(VirtIOMMIOProxy),
 548    .class_init    = virtio_mmio_class_init,
 549};
 550
 551/* virtio-mmio-bus. */
 552
 553static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
 554{
 555    BusClass *bus_class = BUS_CLASS(klass);
 556    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
 557
 558    k->notify = virtio_mmio_update_irq;
 559    k->save_config = virtio_mmio_save_config;
 560    k->load_config = virtio_mmio_load_config;
 561    k->set_host_notifier = virtio_mmio_set_host_notifier;
 562    k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
 563    k->has_variable_vring_alignment = true;
 564    bus_class->max_dev = 1;
 565}
 566
 567static const TypeInfo virtio_mmio_bus_info = {
 568    .name          = TYPE_VIRTIO_MMIO_BUS,
 569    .parent        = TYPE_VIRTIO_BUS,
 570    .instance_size = sizeof(VirtioBusState),
 571    .class_init    = virtio_mmio_bus_class_init,
 572};
 573
 574static void virtio_mmio_register_types(void)
 575{
 576    type_register_static(&virtio_mmio_bus_info);
 577    type_register_static(&virtio_mmio_info);
 578}
 579
 580type_init(virtio_mmio_register_types)
 581