qemu/hw/virtio/virtio-mmio.c
<<
>>
Prefs
   1/*
   2 * Virtio MMIO bindings
   3 *
   4 * Copyright (c) 2011 Linaro Limited
   5 *
   6 * Author:
   7 *  Peter Maydell <peter.maydell@linaro.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License along
  19 * with this program; if not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include "standard-headers/linux/virtio_mmio.h"
  24#include "hw/irq.h"
  25#include "hw/qdev-properties.h"
  26#include "hw/sysbus.h"
  27#include "hw/virtio/virtio.h"
  28#include "migration/qemu-file-types.h"
  29#include "qemu/host-utils.h"
  30#include "qemu/module.h"
  31#include "sysemu/kvm.h"
  32#include "hw/virtio/virtio-mmio.h"
  33#include "qemu/error-report.h"
  34#include "qemu/log.h"
  35#include "trace.h"
  36
  37static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
  38{
  39    return kvm_eventfds_enabled();
  40}
  41
  42static int virtio_mmio_ioeventfd_assign(DeviceState *d,
  43                                        EventNotifier *notifier,
  44                                        int n, bool assign)
  45{
  46    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
  47
  48    if (assign) {
  49        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  50                                  true, n, notifier);
  51    } else {
  52        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  53                                  true, n, notifier);
  54    }
  55    return 0;
  56}
  57
  58static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
  59{
  60    virtio_bus_start_ioeventfd(&proxy->bus);
  61}
  62
  63static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
  64{
  65    virtio_bus_stop_ioeventfd(&proxy->bus);
  66}
  67
  68static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
  69{
  70    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
  71    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  72
  73    trace_virtio_mmio_read(offset);
  74
  75    if (!vdev) {
  76        /* If no backend is present, we treat most registers as
  77         * read-as-zero, except for the magic number, version and
  78         * vendor ID. This is not strictly sanctioned by the virtio
  79         * spec, but it allows us to provide transports with no backend
  80         * plugged in which don't confuse Linux's virtio code: the
  81         * probe won't complain about the bad magic number, but the
  82         * device ID of zero means no backend will claim it.
  83         */
  84        switch (offset) {
  85        case VIRTIO_MMIO_MAGIC_VALUE:
  86            return VIRT_MAGIC;
  87        case VIRTIO_MMIO_VERSION:
  88            if (proxy->legacy) {
  89                return VIRT_VERSION_LEGACY;
  90            } else {
  91                return VIRT_VERSION;
  92            }
  93        case VIRTIO_MMIO_VENDOR_ID:
  94            return VIRT_VENDOR;
  95        default:
  96            return 0;
  97        }
  98    }
  99
 100    if (offset >= VIRTIO_MMIO_CONFIG) {
 101        offset -= VIRTIO_MMIO_CONFIG;
 102        switch (size) {
 103        case 1:
 104            return virtio_config_readb(vdev, offset);
 105        case 2:
 106            return virtio_config_readw(vdev, offset);
 107        case 4:
 108            return virtio_config_readl(vdev, offset);
 109        default:
 110            abort();
 111        }
 112    }
 113    if (size != 4) {
 114        qemu_log_mask(LOG_GUEST_ERROR,
 115                      "%s: wrong size access to register!\n",
 116                      __func__);
 117        return 0;
 118    }
 119    switch (offset) {
 120    case VIRTIO_MMIO_MAGIC_VALUE:
 121        return VIRT_MAGIC;
 122    case VIRTIO_MMIO_VERSION:
 123        if (proxy->legacy) {
 124            return VIRT_VERSION_LEGACY;
 125        } else {
 126            return VIRT_VERSION;
 127        }
 128    case VIRTIO_MMIO_DEVICE_ID:
 129        return vdev->device_id;
 130    case VIRTIO_MMIO_VENDOR_ID:
 131        return VIRT_VENDOR;
 132    case VIRTIO_MMIO_DEVICE_FEATURES:
 133        if (proxy->legacy) {
 134            if (proxy->host_features_sel) {
 135                return 0;
 136            } else {
 137                return vdev->host_features;
 138            }
 139        } else {
 140            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 141            return (vdev->host_features & ~vdc->legacy_features)
 142                >> (32 * proxy->host_features_sel);
 143        }
 144    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 145        if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
 146            return 0;
 147        }
 148        return VIRTQUEUE_MAX_SIZE;
 149    case VIRTIO_MMIO_QUEUE_PFN:
 150        if (!proxy->legacy) {
 151            qemu_log_mask(LOG_GUEST_ERROR,
 152                          "%s: read from legacy register (0x%"
 153                          HWADDR_PRIx ") in non-legacy mode\n",
 154                          __func__, offset);
 155            return 0;
 156        }
 157        return virtio_queue_get_addr(vdev, vdev->queue_sel)
 158            >> proxy->guest_page_shift;
 159    case VIRTIO_MMIO_QUEUE_READY:
 160        if (proxy->legacy) {
 161            qemu_log_mask(LOG_GUEST_ERROR,
 162                          "%s: read from non-legacy register (0x%"
 163                          HWADDR_PRIx ") in legacy mode\n",
 164                          __func__, offset);
 165            return 0;
 166        }
 167        return proxy->vqs[vdev->queue_sel].enabled;
 168    case VIRTIO_MMIO_INTERRUPT_STATUS:
 169        return atomic_read(&vdev->isr);
 170    case VIRTIO_MMIO_STATUS:
 171        return vdev->status;
 172    case VIRTIO_MMIO_CONFIG_GENERATION:
 173        if (proxy->legacy) {
 174            qemu_log_mask(LOG_GUEST_ERROR,
 175                          "%s: read from non-legacy register (0x%"
 176                          HWADDR_PRIx ") in legacy mode\n",
 177                          __func__, offset);
 178            return 0;
 179        }
 180        return vdev->generation;
 181    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 182    case VIRTIO_MMIO_DRIVER_FEATURES:
 183    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 184    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 185    case VIRTIO_MMIO_QUEUE_SEL:
 186    case VIRTIO_MMIO_QUEUE_NUM:
 187    case VIRTIO_MMIO_QUEUE_ALIGN:
 188    case VIRTIO_MMIO_QUEUE_NOTIFY:
 189    case VIRTIO_MMIO_INTERRUPT_ACK:
 190    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 191    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 192    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 193    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 194    case VIRTIO_MMIO_QUEUE_USED_LOW:
 195    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 196        qemu_log_mask(LOG_GUEST_ERROR,
 197                      "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
 198                      __func__, offset);
 199        return 0;
 200    default:
 201        qemu_log_mask(LOG_GUEST_ERROR,
 202                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 203                      __func__, offset);
 204        return 0;
 205    }
 206    return 0;
 207}
 208
 209static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
 210                              unsigned size)
 211{
 212    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
 213    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 214
 215    trace_virtio_mmio_write_offset(offset, value);
 216
 217    if (!vdev) {
 218        /* If no backend is present, we just make all registers
 219         * write-ignored. This allows us to provide transports with
 220         * no backend plugged in.
 221         */
 222        return;
 223    }
 224
 225    if (offset >= VIRTIO_MMIO_CONFIG) {
 226        offset -= VIRTIO_MMIO_CONFIG;
 227        switch (size) {
 228        case 1:
 229            virtio_config_writeb(vdev, offset, value);
 230            break;
 231        case 2:
 232            virtio_config_writew(vdev, offset, value);
 233            break;
 234        case 4:
 235            virtio_config_writel(vdev, offset, value);
 236            break;
 237        default:
 238            abort();
 239        }
 240        return;
 241    }
 242    if (size != 4) {
 243        qemu_log_mask(LOG_GUEST_ERROR,
 244                      "%s: wrong size access to register!\n",
 245                      __func__);
 246        return;
 247    }
 248    switch (offset) {
 249    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 250        if (value) {
 251            proxy->host_features_sel = 1;
 252        } else {
 253            proxy->host_features_sel = 0;
 254        }
 255        break;
 256    case VIRTIO_MMIO_DRIVER_FEATURES:
 257        if (proxy->legacy) {
 258            if (proxy->guest_features_sel) {
 259                qemu_log_mask(LOG_GUEST_ERROR,
 260                              "%s: attempt to write guest features with "
 261                              "guest_features_sel > 0 in legacy mode\n",
 262                              __func__);
 263            } else {
 264                virtio_set_features(vdev, value);
 265            }
 266        } else {
 267            proxy->guest_features[proxy->guest_features_sel] = value;
 268        }
 269        break;
 270    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 271        if (value) {
 272            proxy->guest_features_sel = 1;
 273        } else {
 274            proxy->guest_features_sel = 0;
 275        }
 276        break;
 277    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 278        if (!proxy->legacy) {
 279            qemu_log_mask(LOG_GUEST_ERROR,
 280                          "%s: write to legacy register (0x%"
 281                          HWADDR_PRIx ") in non-legacy mode\n",
 282                          __func__, offset);
 283            return;
 284        }
 285        proxy->guest_page_shift = ctz32(value);
 286        if (proxy->guest_page_shift > 31) {
 287            proxy->guest_page_shift = 0;
 288        }
 289        trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
 290        break;
 291    case VIRTIO_MMIO_QUEUE_SEL:
 292        if (value < VIRTIO_QUEUE_MAX) {
 293            vdev->queue_sel = value;
 294        }
 295        break;
 296    case VIRTIO_MMIO_QUEUE_NUM:
 297        trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
 298        if (proxy->legacy) {
 299            virtio_queue_set_num(vdev, vdev->queue_sel, value);
 300            virtio_queue_update_rings(vdev, vdev->queue_sel);
 301        } else {
 302            proxy->vqs[vdev->queue_sel].num = value;
 303        }
 304        break;
 305    case VIRTIO_MMIO_QUEUE_ALIGN:
 306        if (!proxy->legacy) {
 307            qemu_log_mask(LOG_GUEST_ERROR,
 308                          "%s: write to legacy register (0x%"
 309                          HWADDR_PRIx ") in non-legacy mode\n",
 310                          __func__, offset);
 311            return;
 312        }
 313        virtio_queue_set_align(vdev, vdev->queue_sel, value);
 314        break;
 315    case VIRTIO_MMIO_QUEUE_PFN:
 316        if (!proxy->legacy) {
 317            qemu_log_mask(LOG_GUEST_ERROR,
 318                          "%s: write to legacy register (0x%"
 319                          HWADDR_PRIx ") in non-legacy mode\n",
 320                          __func__, offset);
 321            return;
 322        }
 323        if (value == 0) {
 324            virtio_reset(vdev);
 325        } else {
 326            virtio_queue_set_addr(vdev, vdev->queue_sel,
 327                                  value << proxy->guest_page_shift);
 328        }
 329        break;
 330    case VIRTIO_MMIO_QUEUE_READY:
 331        if (proxy->legacy) {
 332            qemu_log_mask(LOG_GUEST_ERROR,
 333                          "%s: write to non-legacy register (0x%"
 334                          HWADDR_PRIx ") in legacy mode\n",
 335                          __func__, offset);
 336            return;
 337        }
 338        if (value) {
 339            virtio_queue_set_num(vdev, vdev->queue_sel,
 340                                 proxy->vqs[vdev->queue_sel].num);
 341            virtio_queue_set_rings(vdev, vdev->queue_sel,
 342                ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
 343                proxy->vqs[vdev->queue_sel].desc[0],
 344                ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
 345                proxy->vqs[vdev->queue_sel].avail[0],
 346                ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
 347                proxy->vqs[vdev->queue_sel].used[0]);
 348            proxy->vqs[vdev->queue_sel].enabled = 1;
 349        } else {
 350            proxy->vqs[vdev->queue_sel].enabled = 0;
 351        }
 352        break;
 353    case VIRTIO_MMIO_QUEUE_NOTIFY:
 354        if (value < VIRTIO_QUEUE_MAX) {
 355            virtio_queue_notify(vdev, value);
 356        }
 357        break;
 358    case VIRTIO_MMIO_INTERRUPT_ACK:
 359        atomic_and(&vdev->isr, ~value);
 360        virtio_update_irq(vdev);
 361        break;
 362    case VIRTIO_MMIO_STATUS:
 363        if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
 364            virtio_mmio_stop_ioeventfd(proxy);
 365        }
 366
 367        if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
 368            virtio_set_features(vdev,
 369                                ((uint64_t)proxy->guest_features[1]) << 32 |
 370                                proxy->guest_features[0]);
 371        }
 372
 373        virtio_set_status(vdev, value & 0xff);
 374
 375        if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
 376            virtio_mmio_start_ioeventfd(proxy);
 377        }
 378
 379        if (vdev->status == 0) {
 380            virtio_reset(vdev);
 381        }
 382        break;
 383    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 384        if (proxy->legacy) {
 385            qemu_log_mask(LOG_GUEST_ERROR,
 386                          "%s: write to non-legacy register (0x%"
 387                          HWADDR_PRIx ") in legacy mode\n",
 388                          __func__, offset);
 389            return;
 390        }
 391        proxy->vqs[vdev->queue_sel].desc[0] = value;
 392        break;
 393    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 394        if (proxy->legacy) {
 395            qemu_log_mask(LOG_GUEST_ERROR,
 396                          "%s: write to non-legacy register (0x%"
 397                          HWADDR_PRIx ") in legacy mode\n",
 398                          __func__, offset);
 399            return;
 400        }
 401        proxy->vqs[vdev->queue_sel].desc[1] = value;
 402        break;
 403    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 404        if (proxy->legacy) {
 405            qemu_log_mask(LOG_GUEST_ERROR,
 406                          "%s: write to non-legacy register (0x%"
 407                          HWADDR_PRIx ") in legacy mode\n",
 408                          __func__, offset);
 409            return;
 410        }
 411        proxy->vqs[vdev->queue_sel].avail[0] = value;
 412        break;
 413    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 414        if (proxy->legacy) {
 415            qemu_log_mask(LOG_GUEST_ERROR,
 416                          "%s: write to non-legacy register (0x%"
 417                          HWADDR_PRIx ") in legacy mode\n",
 418                          __func__, offset);
 419            return;
 420        }
 421        proxy->vqs[vdev->queue_sel].avail[1] = value;
 422        break;
 423    case VIRTIO_MMIO_QUEUE_USED_LOW:
 424        if (proxy->legacy) {
 425            qemu_log_mask(LOG_GUEST_ERROR,
 426                          "%s: write to non-legacy register (0x%"
 427                          HWADDR_PRIx ") in legacy mode\n",
 428                          __func__, offset);
 429            return;
 430        }
 431        proxy->vqs[vdev->queue_sel].used[0] = value;
 432        break;
 433    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 434        if (proxy->legacy) {
 435            qemu_log_mask(LOG_GUEST_ERROR,
 436                          "%s: write to non-legacy register (0x%"
 437                          HWADDR_PRIx ") in legacy mode\n",
 438                          __func__, offset);
 439            return;
 440        }
 441        proxy->vqs[vdev->queue_sel].used[1] = value;
 442        break;
 443    case VIRTIO_MMIO_MAGIC_VALUE:
 444    case VIRTIO_MMIO_VERSION:
 445    case VIRTIO_MMIO_DEVICE_ID:
 446    case VIRTIO_MMIO_VENDOR_ID:
 447    case VIRTIO_MMIO_DEVICE_FEATURES:
 448    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 449    case VIRTIO_MMIO_INTERRUPT_STATUS:
 450    case VIRTIO_MMIO_CONFIG_GENERATION:
 451        qemu_log_mask(LOG_GUEST_ERROR,
 452                      "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
 453                      __func__, offset);
 454        break;
 455
 456    default:
 457        qemu_log_mask(LOG_GUEST_ERROR,
 458                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 459                      __func__, offset);
 460    }
 461}
 462
 463static const MemoryRegionOps virtio_legacy_mem_ops = {
 464    .read = virtio_mmio_read,
 465    .write = virtio_mmio_write,
 466    .endianness = DEVICE_NATIVE_ENDIAN,
 467};
 468
 469static const MemoryRegionOps virtio_mem_ops = {
 470    .read = virtio_mmio_read,
 471    .write = virtio_mmio_write,
 472    .endianness = DEVICE_LITTLE_ENDIAN,
 473};
 474
 475static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
 476{
 477    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 478    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 479    int level;
 480
 481    if (!vdev) {
 482        return;
 483    }
 484    level = (atomic_read(&vdev->isr) != 0);
 485    trace_virtio_mmio_setting_irq(level);
 486    qemu_set_irq(proxy->irq, level);
 487}
 488
 489static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
 490{
 491    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 492
 493    proxy->host_features_sel = qemu_get_be32(f);
 494    proxy->guest_features_sel = qemu_get_be32(f);
 495    proxy->guest_page_shift = qemu_get_be32(f);
 496    return 0;
 497}
 498
 499static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
 500{
 501    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 502
 503    qemu_put_be32(f, proxy->host_features_sel);
 504    qemu_put_be32(f, proxy->guest_features_sel);
 505    qemu_put_be32(f, proxy->guest_page_shift);
 506}
 507
 508static const VMStateDescription vmstate_virtio_mmio_queue_state = {
 509    .name = "virtio_mmio/queue_state",
 510    .version_id = 1,
 511    .minimum_version_id = 1,
 512    .fields = (VMStateField[]) {
 513        VMSTATE_UINT16(num, VirtIOMMIOQueue),
 514        VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
 515        VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
 516        VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
 517        VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
 518        VMSTATE_END_OF_LIST()
 519    }
 520};
 521
 522static const VMStateDescription vmstate_virtio_mmio_state_sub = {
 523    .name = "virtio_mmio/state",
 524    .version_id = 1,
 525    .minimum_version_id = 1,
 526    .fields = (VMStateField[]) {
 527        VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
 528        VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
 529                             vmstate_virtio_mmio_queue_state,
 530                             VirtIOMMIOQueue),
 531        VMSTATE_END_OF_LIST()
 532    }
 533};
 534
 535static const VMStateDescription vmstate_virtio_mmio = {
 536    .name = "virtio_mmio",
 537    .version_id = 1,
 538    .minimum_version_id = 1,
 539    .minimum_version_id_old = 1,
 540    .fields = (VMStateField[]) {
 541        VMSTATE_END_OF_LIST()
 542    },
 543    .subsections = (const VMStateDescription * []) {
 544        &vmstate_virtio_mmio_state_sub,
 545        NULL
 546    }
 547};
 548
 549static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
 550{
 551    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 552
 553    vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
 554}
 555
 556static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
 557{
 558    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 559
 560    return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
 561}
 562
 563static bool virtio_mmio_has_extra_state(DeviceState *opaque)
 564{
 565    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 566
 567    return !proxy->legacy;
 568}
 569
 570static void virtio_mmio_reset(DeviceState *d)
 571{
 572    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 573    int i;
 574
 575    virtio_mmio_stop_ioeventfd(proxy);
 576    virtio_bus_reset(&proxy->bus);
 577    proxy->host_features_sel = 0;
 578    proxy->guest_features_sel = 0;
 579    proxy->guest_page_shift = 0;
 580
 581    if (!proxy->legacy) {
 582        proxy->guest_features[0] = proxy->guest_features[1] = 0;
 583
 584        for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
 585            proxy->vqs[i].enabled = 0;
 586            proxy->vqs[i].num = 0;
 587            proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
 588            proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
 589            proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
 590        }
 591    }
 592}
 593
 594static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
 595                                          bool with_irqfd)
 596{
 597    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 598    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 599    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 600    VirtQueue *vq = virtio_get_queue(vdev, n);
 601    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
 602
 603    if (assign) {
 604        int r = event_notifier_init(notifier, 0);
 605        if (r < 0) {
 606            return r;
 607        }
 608        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
 609    } else {
 610        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
 611        event_notifier_cleanup(notifier);
 612    }
 613
 614    if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
 615        vdc->guest_notifier_mask(vdev, n, !assign);
 616    }
 617
 618    return 0;
 619}
 620
 621static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
 622                                           bool assign)
 623{
 624    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 625    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 626    /* TODO: need to check if kvm-arm supports irqfd */
 627    bool with_irqfd = false;
 628    int r, n;
 629
 630    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
 631
 632    for (n = 0; n < nvqs; n++) {
 633        if (!virtio_queue_get_num(vdev, n)) {
 634            break;
 635        }
 636
 637        r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
 638        if (r < 0) {
 639            goto assign_error;
 640        }
 641    }
 642
 643    return 0;
 644
 645assign_error:
 646    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
 647    assert(assign);
 648    while (--n >= 0) {
 649        virtio_mmio_set_guest_notifier(d, n, !assign, false);
 650    }
 651    return r;
 652}
 653
 654static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
 655{
 656    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 657    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 658
 659    if (!proxy->legacy) {
 660        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
 661    }
 662}
 663
 664/* virtio-mmio device */
 665
 666static Property virtio_mmio_properties[] = {
 667    DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
 668                     format_transport_address, true),
 669    DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
 670    DEFINE_PROP_END_OF_LIST(),
 671};
 672
 673static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
 674{
 675    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 676    SysBusDevice *sbd = SYS_BUS_DEVICE(d);
 677
 678    qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
 679                        d, NULL);
 680    sysbus_init_irq(sbd, &proxy->irq);
 681    if (proxy->legacy) {
 682        memory_region_init_io(&proxy->iomem, OBJECT(d),
 683                              &virtio_legacy_mem_ops, proxy,
 684                              TYPE_VIRTIO_MMIO, 0x200);
 685    } else {
 686        memory_region_init_io(&proxy->iomem, OBJECT(d),
 687                              &virtio_mem_ops, proxy,
 688                              TYPE_VIRTIO_MMIO, 0x200);
 689    }
 690    sysbus_init_mmio(sbd, &proxy->iomem);
 691}
 692
 693static void virtio_mmio_class_init(ObjectClass *klass, void *data)
 694{
 695    DeviceClass *dc = DEVICE_CLASS(klass);
 696
 697    dc->realize = virtio_mmio_realizefn;
 698    dc->reset = virtio_mmio_reset;
 699    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 700    dc->props = virtio_mmio_properties;
 701}
 702
 703static const TypeInfo virtio_mmio_info = {
 704    .name          = TYPE_VIRTIO_MMIO,
 705    .parent        = TYPE_SYS_BUS_DEVICE,
 706    .instance_size = sizeof(VirtIOMMIOProxy),
 707    .class_init    = virtio_mmio_class_init,
 708};
 709
 710/* virtio-mmio-bus. */
 711
 712static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
 713{
 714    BusState *virtio_mmio_bus;
 715    VirtIOMMIOProxy *virtio_mmio_proxy;
 716    char *proxy_path;
 717    SysBusDevice *proxy_sbd;
 718    char *path;
 719
 720    virtio_mmio_bus = qdev_get_parent_bus(dev);
 721    virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
 722    proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
 723
 724    /*
 725     * If @format_transport_address is false, then we just perform the same as
 726     * virtio_bus_get_dev_path(): we delegate the address formatting for the
 727     * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
 728     * (i.e., the device that implements the virtio-mmio bus) resides on. In
 729     * this case the base address of the virtio-mmio transport will be
 730     * invisible.
 731     */
 732    if (!virtio_mmio_proxy->format_transport_address) {
 733        return proxy_path;
 734    }
 735
 736    /* Otherwise, we append the base address of the transport. */
 737    proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
 738    assert(proxy_sbd->num_mmio == 1);
 739    assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
 740
 741    if (proxy_path) {
 742        path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
 743                               proxy_sbd->mmio[0].addr);
 744    } else {
 745        path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
 746                               proxy_sbd->mmio[0].addr);
 747    }
 748    g_free(proxy_path);
 749    return path;
 750}
 751
 752static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
 753{
 754    BusClass *bus_class = BUS_CLASS(klass);
 755    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
 756
 757    k->notify = virtio_mmio_update_irq;
 758    k->save_config = virtio_mmio_save_config;
 759    k->load_config = virtio_mmio_load_config;
 760    k->save_extra_state = virtio_mmio_save_extra_state;
 761    k->load_extra_state = virtio_mmio_load_extra_state;
 762    k->has_extra_state = virtio_mmio_has_extra_state;
 763    k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
 764    k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
 765    k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
 766    k->pre_plugged = virtio_mmio_pre_plugged;
 767    k->has_variable_vring_alignment = true;
 768    bus_class->max_dev = 1;
 769    bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
 770}
 771
 772static const TypeInfo virtio_mmio_bus_info = {
 773    .name          = TYPE_VIRTIO_MMIO_BUS,
 774    .parent        = TYPE_VIRTIO_BUS,
 775    .instance_size = sizeof(VirtioBusState),
 776    .class_init    = virtio_mmio_bus_class_init,
 777};
 778
 779static void virtio_mmio_register_types(void)
 780{
 781    type_register_static(&virtio_mmio_bus_info);
 782    type_register_static(&virtio_mmio_info);
 783}
 784
 785type_init(virtio_mmio_register_types)
 786