qemu/hw/virtio/virtio-mmio.c
<<
>>
Prefs
   1/*
   2 * Virtio MMIO bindings
   3 *
   4 * Copyright (c) 2011 Linaro Limited
   5 *
   6 * Author:
   7 *  Peter Maydell <peter.maydell@linaro.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License along
  19 * with this program; if not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include "standard-headers/linux/virtio_mmio.h"
  24#include "hw/irq.h"
  25#include "hw/qdev-properties.h"
  26#include "hw/sysbus.h"
  27#include "hw/virtio/virtio.h"
  28#include "migration/qemu-file-types.h"
  29#include "qemu/host-utils.h"
  30#include "qemu/module.h"
  31#include "sysemu/kvm.h"
  32#include "hw/virtio/virtio-mmio.h"
  33#include "qemu/error-report.h"
  34#include "qemu/log.h"
  35#include "trace.h"
  36
  37static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
  38{
  39    return kvm_eventfds_enabled();
  40}
  41
  42static int virtio_mmio_ioeventfd_assign(DeviceState *d,
  43                                        EventNotifier *notifier,
  44                                        int n, bool assign)
  45{
  46    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
  47
  48    if (assign) {
  49        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  50                                  true, n, notifier);
  51    } else {
  52        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  53                                  true, n, notifier);
  54    }
  55    return 0;
  56}
  57
  58static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
  59{
  60    virtio_bus_start_ioeventfd(&proxy->bus);
  61}
  62
  63static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
  64{
  65    virtio_bus_stop_ioeventfd(&proxy->bus);
  66}
  67
  68static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
  69{
  70    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
  71    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  72
  73    trace_virtio_mmio_read(offset);
  74
  75    if (!vdev) {
  76        /* If no backend is present, we treat most registers as
  77         * read-as-zero, except for the magic number, version and
  78         * vendor ID. This is not strictly sanctioned by the virtio
  79         * spec, but it allows us to provide transports with no backend
  80         * plugged in which don't confuse Linux's virtio code: the
  81         * probe won't complain about the bad magic number, but the
  82         * device ID of zero means no backend will claim it.
  83         */
  84        switch (offset) {
  85        case VIRTIO_MMIO_MAGIC_VALUE:
  86            return VIRT_MAGIC;
  87        case VIRTIO_MMIO_VERSION:
  88            if (proxy->legacy) {
  89                return VIRT_VERSION_LEGACY;
  90            } else {
  91                return VIRT_VERSION;
  92            }
  93        case VIRTIO_MMIO_VENDOR_ID:
  94            return VIRT_VENDOR;
  95        default:
  96            return 0;
  97        }
  98    }
  99
 100    if (offset >= VIRTIO_MMIO_CONFIG) {
 101        offset -= VIRTIO_MMIO_CONFIG;
 102        switch (size) {
 103        case 1:
 104            return virtio_config_readb(vdev, offset);
 105        case 2:
 106            return virtio_config_readw(vdev, offset);
 107        case 4:
 108            return virtio_config_readl(vdev, offset);
 109        default:
 110            abort();
 111        }
 112    }
 113    if (size != 4) {
 114        qemu_log_mask(LOG_GUEST_ERROR,
 115                      "%s: wrong size access to register!\n",
 116                      __func__);
 117        return 0;
 118    }
 119    switch (offset) {
 120    case VIRTIO_MMIO_MAGIC_VALUE:
 121        return VIRT_MAGIC;
 122    case VIRTIO_MMIO_VERSION:
 123        if (proxy->legacy) {
 124            return VIRT_VERSION_LEGACY;
 125        } else {
 126            return VIRT_VERSION;
 127        }
 128    case VIRTIO_MMIO_DEVICE_ID:
 129        return vdev->device_id;
 130    case VIRTIO_MMIO_VENDOR_ID:
 131        return VIRT_VENDOR;
 132    case VIRTIO_MMIO_DEVICE_FEATURES:
 133        if (proxy->legacy) {
 134            if (proxy->host_features_sel) {
 135                return 0;
 136            } else {
 137                return vdev->host_features;
 138            }
 139        } else {
 140            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 141            return (vdev->host_features & ~vdc->legacy_features)
 142                >> (32 * proxy->host_features_sel);
 143        }
 144    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 145        if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
 146            return 0;
 147        }
 148        return VIRTQUEUE_MAX_SIZE;
 149    case VIRTIO_MMIO_QUEUE_PFN:
 150        if (!proxy->legacy) {
 151            qemu_log_mask(LOG_GUEST_ERROR,
 152                          "%s: read from legacy register (0x%"
 153                          HWADDR_PRIx ") in non-legacy mode\n",
 154                          __func__, offset);
 155            return 0;
 156        }
 157        return virtio_queue_get_addr(vdev, vdev->queue_sel)
 158            >> proxy->guest_page_shift;
 159    case VIRTIO_MMIO_QUEUE_READY:
 160        if (proxy->legacy) {
 161            qemu_log_mask(LOG_GUEST_ERROR,
 162                          "%s: read from non-legacy register (0x%"
 163                          HWADDR_PRIx ") in legacy mode\n",
 164                          __func__, offset);
 165            return 0;
 166        }
 167        return proxy->vqs[vdev->queue_sel].enabled;
 168    case VIRTIO_MMIO_INTERRUPT_STATUS:
 169        return atomic_read(&vdev->isr);
 170    case VIRTIO_MMIO_STATUS:
 171        return vdev->status;
 172    case VIRTIO_MMIO_CONFIG_GENERATION:
 173        if (proxy->legacy) {
 174            qemu_log_mask(LOG_GUEST_ERROR,
 175                          "%s: read from non-legacy register (0x%"
 176                          HWADDR_PRIx ") in legacy mode\n",
 177                          __func__, offset);
 178            return 0;
 179        }
 180        return vdev->generation;
 181    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 182    case VIRTIO_MMIO_DRIVER_FEATURES:
 183    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 184    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 185    case VIRTIO_MMIO_QUEUE_SEL:
 186    case VIRTIO_MMIO_QUEUE_NUM:
 187    case VIRTIO_MMIO_QUEUE_ALIGN:
 188    case VIRTIO_MMIO_QUEUE_NOTIFY:
 189    case VIRTIO_MMIO_INTERRUPT_ACK:
 190    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 191    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 192    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 193    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 194    case VIRTIO_MMIO_QUEUE_USED_LOW:
 195    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 196        qemu_log_mask(LOG_GUEST_ERROR,
 197                      "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
 198                      __func__, offset);
 199        return 0;
 200    default:
 201        qemu_log_mask(LOG_GUEST_ERROR,
 202                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 203                      __func__, offset);
 204        return 0;
 205    }
 206    return 0;
 207}
 208
 209static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
 210                              unsigned size)
 211{
 212    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
 213    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 214
 215    trace_virtio_mmio_write_offset(offset, value);
 216
 217    if (!vdev) {
 218        /* If no backend is present, we just make all registers
 219         * write-ignored. This allows us to provide transports with
 220         * no backend plugged in.
 221         */
 222        return;
 223    }
 224
 225    if (offset >= VIRTIO_MMIO_CONFIG) {
 226        offset -= VIRTIO_MMIO_CONFIG;
 227        switch (size) {
 228        case 1:
 229            virtio_config_writeb(vdev, offset, value);
 230            break;
 231        case 2:
 232            virtio_config_writew(vdev, offset, value);
 233            break;
 234        case 4:
 235            virtio_config_writel(vdev, offset, value);
 236            break;
 237        default:
 238            abort();
 239        }
 240        return;
 241    }
 242    if (size != 4) {
 243        qemu_log_mask(LOG_GUEST_ERROR,
 244                      "%s: wrong size access to register!\n",
 245                      __func__);
 246        return;
 247    }
 248    switch (offset) {
 249    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 250        if (value) {
 251            proxy->host_features_sel = 1;
 252        } else {
 253            proxy->host_features_sel = 0;
 254        }
 255        break;
 256    case VIRTIO_MMIO_DRIVER_FEATURES:
 257        if (proxy->legacy) {
 258            if (proxy->guest_features_sel) {
 259                qemu_log_mask(LOG_GUEST_ERROR,
 260                              "%s: attempt to write guest features with "
 261                              "guest_features_sel > 0 in legacy mode\n",
 262                              __func__);
 263            } else {
 264                virtio_set_features(vdev, value);
 265            }
 266        } else {
 267            proxy->guest_features[proxy->guest_features_sel] = value;
 268        }
 269        break;
 270    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 271        if (value) {
 272            proxy->guest_features_sel = 1;
 273        } else {
 274            proxy->guest_features_sel = 0;
 275        }
 276        break;
 277    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 278        if (!proxy->legacy) {
 279            qemu_log_mask(LOG_GUEST_ERROR,
 280                          "%s: write to legacy register (0x%"
 281                          HWADDR_PRIx ") in non-legacy mode\n",
 282                          __func__, offset);
 283            return;
 284        }
 285        proxy->guest_page_shift = ctz32(value);
 286        if (proxy->guest_page_shift > 31) {
 287            proxy->guest_page_shift = 0;
 288        }
 289        trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
 290        break;
 291    case VIRTIO_MMIO_QUEUE_SEL:
 292        if (value < VIRTIO_QUEUE_MAX) {
 293            vdev->queue_sel = value;
 294        }
 295        break;
 296    case VIRTIO_MMIO_QUEUE_NUM:
 297        trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
 298        virtio_queue_set_num(vdev, vdev->queue_sel, value);
 299
 300        if (proxy->legacy) {
 301            virtio_queue_update_rings(vdev, vdev->queue_sel);
 302        } else {
 303            proxy->vqs[vdev->queue_sel].num = value;
 304        }
 305        break;
 306    case VIRTIO_MMIO_QUEUE_ALIGN:
 307        if (!proxy->legacy) {
 308            qemu_log_mask(LOG_GUEST_ERROR,
 309                          "%s: write to legacy register (0x%"
 310                          HWADDR_PRIx ") in non-legacy mode\n",
 311                          __func__, offset);
 312            return;
 313        }
 314        virtio_queue_set_align(vdev, vdev->queue_sel, value);
 315        break;
 316    case VIRTIO_MMIO_QUEUE_PFN:
 317        if (!proxy->legacy) {
 318            qemu_log_mask(LOG_GUEST_ERROR,
 319                          "%s: write to legacy register (0x%"
 320                          HWADDR_PRIx ") in non-legacy mode\n",
 321                          __func__, offset);
 322            return;
 323        }
 324        if (value == 0) {
 325            virtio_reset(vdev);
 326        } else {
 327            virtio_queue_set_addr(vdev, vdev->queue_sel,
 328                                  value << proxy->guest_page_shift);
 329        }
 330        break;
 331    case VIRTIO_MMIO_QUEUE_READY:
 332        if (proxy->legacy) {
 333            qemu_log_mask(LOG_GUEST_ERROR,
 334                          "%s: write to non-legacy register (0x%"
 335                          HWADDR_PRIx ") in legacy mode\n",
 336                          __func__, offset);
 337            return;
 338        }
 339        if (value) {
 340            virtio_queue_set_num(vdev, vdev->queue_sel,
 341                                 proxy->vqs[vdev->queue_sel].num);
 342            virtio_queue_set_rings(vdev, vdev->queue_sel,
 343                ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
 344                proxy->vqs[vdev->queue_sel].desc[0],
 345                ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
 346                proxy->vqs[vdev->queue_sel].avail[0],
 347                ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
 348                proxy->vqs[vdev->queue_sel].used[0]);
 349            proxy->vqs[vdev->queue_sel].enabled = 1;
 350        } else {
 351            proxy->vqs[vdev->queue_sel].enabled = 0;
 352        }
 353        break;
 354    case VIRTIO_MMIO_QUEUE_NOTIFY:
 355        if (value < VIRTIO_QUEUE_MAX) {
 356            virtio_queue_notify(vdev, value);
 357        }
 358        break;
 359    case VIRTIO_MMIO_INTERRUPT_ACK:
 360        atomic_and(&vdev->isr, ~value);
 361        virtio_update_irq(vdev);
 362        break;
 363    case VIRTIO_MMIO_STATUS:
 364        if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
 365            virtio_mmio_stop_ioeventfd(proxy);
 366        }
 367
 368        if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
 369            virtio_set_features(vdev,
 370                                ((uint64_t)proxy->guest_features[1]) << 32 |
 371                                proxy->guest_features[0]);
 372        }
 373
 374        virtio_set_status(vdev, value & 0xff);
 375
 376        if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
 377            virtio_mmio_start_ioeventfd(proxy);
 378        }
 379
 380        if (vdev->status == 0) {
 381            virtio_reset(vdev);
 382        }
 383        break;
 384    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 385        if (proxy->legacy) {
 386            qemu_log_mask(LOG_GUEST_ERROR,
 387                          "%s: write to non-legacy register (0x%"
 388                          HWADDR_PRIx ") in legacy mode\n",
 389                          __func__, offset);
 390            return;
 391        }
 392        proxy->vqs[vdev->queue_sel].desc[0] = value;
 393        break;
 394    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 395        if (proxy->legacy) {
 396            qemu_log_mask(LOG_GUEST_ERROR,
 397                          "%s: write to non-legacy register (0x%"
 398                          HWADDR_PRIx ") in legacy mode\n",
 399                          __func__, offset);
 400            return;
 401        }
 402        proxy->vqs[vdev->queue_sel].desc[1] = value;
 403        break;
 404    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 405        if (proxy->legacy) {
 406            qemu_log_mask(LOG_GUEST_ERROR,
 407                          "%s: write to non-legacy register (0x%"
 408                          HWADDR_PRIx ") in legacy mode\n",
 409                          __func__, offset);
 410            return;
 411        }
 412        proxy->vqs[vdev->queue_sel].avail[0] = value;
 413        break;
 414    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 415        if (proxy->legacy) {
 416            qemu_log_mask(LOG_GUEST_ERROR,
 417                          "%s: write to non-legacy register (0x%"
 418                          HWADDR_PRIx ") in legacy mode\n",
 419                          __func__, offset);
 420            return;
 421        }
 422        proxy->vqs[vdev->queue_sel].avail[1] = value;
 423        break;
 424    case VIRTIO_MMIO_QUEUE_USED_LOW:
 425        if (proxy->legacy) {
 426            qemu_log_mask(LOG_GUEST_ERROR,
 427                          "%s: write to non-legacy register (0x%"
 428                          HWADDR_PRIx ") in legacy mode\n",
 429                          __func__, offset);
 430            return;
 431        }
 432        proxy->vqs[vdev->queue_sel].used[0] = value;
 433        break;
 434    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 435        if (proxy->legacy) {
 436            qemu_log_mask(LOG_GUEST_ERROR,
 437                          "%s: write to non-legacy register (0x%"
 438                          HWADDR_PRIx ") in legacy mode\n",
 439                          __func__, offset);
 440            return;
 441        }
 442        proxy->vqs[vdev->queue_sel].used[1] = value;
 443        break;
 444    case VIRTIO_MMIO_MAGIC_VALUE:
 445    case VIRTIO_MMIO_VERSION:
 446    case VIRTIO_MMIO_DEVICE_ID:
 447    case VIRTIO_MMIO_VENDOR_ID:
 448    case VIRTIO_MMIO_DEVICE_FEATURES:
 449    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 450    case VIRTIO_MMIO_INTERRUPT_STATUS:
 451    case VIRTIO_MMIO_CONFIG_GENERATION:
 452        qemu_log_mask(LOG_GUEST_ERROR,
 453                      "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
 454                      __func__, offset);
 455        break;
 456
 457    default:
 458        qemu_log_mask(LOG_GUEST_ERROR,
 459                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 460                      __func__, offset);
 461    }
 462}
 463
 464static const MemoryRegionOps virtio_legacy_mem_ops = {
 465    .read = virtio_mmio_read,
 466    .write = virtio_mmio_write,
 467    .endianness = DEVICE_NATIVE_ENDIAN,
 468};
 469
 470static const MemoryRegionOps virtio_mem_ops = {
 471    .read = virtio_mmio_read,
 472    .write = virtio_mmio_write,
 473    .endianness = DEVICE_LITTLE_ENDIAN,
 474};
 475
 476static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
 477{
 478    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 479    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 480    int level;
 481
 482    if (!vdev) {
 483        return;
 484    }
 485    level = (atomic_read(&vdev->isr) != 0);
 486    trace_virtio_mmio_setting_irq(level);
 487    qemu_set_irq(proxy->irq, level);
 488}
 489
 490static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
 491{
 492    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 493
 494    proxy->host_features_sel = qemu_get_be32(f);
 495    proxy->guest_features_sel = qemu_get_be32(f);
 496    proxy->guest_page_shift = qemu_get_be32(f);
 497    return 0;
 498}
 499
 500static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
 501{
 502    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 503
 504    qemu_put_be32(f, proxy->host_features_sel);
 505    qemu_put_be32(f, proxy->guest_features_sel);
 506    qemu_put_be32(f, proxy->guest_page_shift);
 507}
 508
 509static const VMStateDescription vmstate_virtio_mmio_queue_state = {
 510    .name = "virtio_mmio/queue_state",
 511    .version_id = 1,
 512    .minimum_version_id = 1,
 513    .fields = (VMStateField[]) {
 514        VMSTATE_UINT16(num, VirtIOMMIOQueue),
 515        VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
 516        VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
 517        VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
 518        VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
 519        VMSTATE_END_OF_LIST()
 520    }
 521};
 522
 523static const VMStateDescription vmstate_virtio_mmio_state_sub = {
 524    .name = "virtio_mmio/state",
 525    .version_id = 1,
 526    .minimum_version_id = 1,
 527    .fields = (VMStateField[]) {
 528        VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
 529        VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
 530                             vmstate_virtio_mmio_queue_state,
 531                             VirtIOMMIOQueue),
 532        VMSTATE_END_OF_LIST()
 533    }
 534};
 535
 536static const VMStateDescription vmstate_virtio_mmio = {
 537    .name = "virtio_mmio",
 538    .version_id = 1,
 539    .minimum_version_id = 1,
 540    .minimum_version_id_old = 1,
 541    .fields = (VMStateField[]) {
 542        VMSTATE_END_OF_LIST()
 543    },
 544    .subsections = (const VMStateDescription * []) {
 545        &vmstate_virtio_mmio_state_sub,
 546        NULL
 547    }
 548};
 549
 550static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
 551{
 552    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 553
 554    vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
 555}
 556
 557static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
 558{
 559    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 560
 561    return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
 562}
 563
 564static bool virtio_mmio_has_extra_state(DeviceState *opaque)
 565{
 566    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 567
 568    return !proxy->legacy;
 569}
 570
 571static void virtio_mmio_reset(DeviceState *d)
 572{
 573    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 574    int i;
 575
 576    virtio_mmio_stop_ioeventfd(proxy);
 577    virtio_bus_reset(&proxy->bus);
 578    proxy->host_features_sel = 0;
 579    proxy->guest_features_sel = 0;
 580    proxy->guest_page_shift = 0;
 581
 582    if (!proxy->legacy) {
 583        proxy->guest_features[0] = proxy->guest_features[1] = 0;
 584
 585        for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
 586            proxy->vqs[i].enabled = 0;
 587            proxy->vqs[i].num = 0;
 588            proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
 589            proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
 590            proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
 591        }
 592    }
 593}
 594
 595static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
 596                                          bool with_irqfd)
 597{
 598    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 599    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 600    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 601    VirtQueue *vq = virtio_get_queue(vdev, n);
 602    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
 603
 604    if (assign) {
 605        int r = event_notifier_init(notifier, 0);
 606        if (r < 0) {
 607            return r;
 608        }
 609        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
 610    } else {
 611        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
 612        event_notifier_cleanup(notifier);
 613    }
 614
 615    if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
 616        vdc->guest_notifier_mask(vdev, n, !assign);
 617    }
 618
 619    return 0;
 620}
 621
 622static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
 623                                           bool assign)
 624{
 625    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 626    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 627    /* TODO: need to check if kvm-arm supports irqfd */
 628    bool with_irqfd = false;
 629    int r, n;
 630
 631    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
 632
 633    for (n = 0; n < nvqs; n++) {
 634        if (!virtio_queue_get_num(vdev, n)) {
 635            break;
 636        }
 637
 638        r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
 639        if (r < 0) {
 640            goto assign_error;
 641        }
 642    }
 643
 644    return 0;
 645
 646assign_error:
 647    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
 648    assert(assign);
 649    while (--n >= 0) {
 650        virtio_mmio_set_guest_notifier(d, n, !assign, false);
 651    }
 652    return r;
 653}
 654
 655static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
 656{
 657    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 658    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 659
 660    if (!proxy->legacy) {
 661        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
 662    }
 663}
 664
 665/* virtio-mmio device */
 666
 667static Property virtio_mmio_properties[] = {
 668    DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
 669                     format_transport_address, true),
 670    DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
 671    DEFINE_PROP_END_OF_LIST(),
 672};
 673
 674static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
 675{
 676    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 677    SysBusDevice *sbd = SYS_BUS_DEVICE(d);
 678
 679    qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
 680                        d, NULL);
 681    sysbus_init_irq(sbd, &proxy->irq);
 682    if (proxy->legacy) {
 683        memory_region_init_io(&proxy->iomem, OBJECT(d),
 684                              &virtio_legacy_mem_ops, proxy,
 685                              TYPE_VIRTIO_MMIO, 0x200);
 686    } else {
 687        memory_region_init_io(&proxy->iomem, OBJECT(d),
 688                              &virtio_mem_ops, proxy,
 689                              TYPE_VIRTIO_MMIO, 0x200);
 690    }
 691    sysbus_init_mmio(sbd, &proxy->iomem);
 692}
 693
 694static void virtio_mmio_class_init(ObjectClass *klass, void *data)
 695{
 696    DeviceClass *dc = DEVICE_CLASS(klass);
 697
 698    dc->realize = virtio_mmio_realizefn;
 699    dc->reset = virtio_mmio_reset;
 700    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 701    dc->props = virtio_mmio_properties;
 702}
 703
 704static const TypeInfo virtio_mmio_info = {
 705    .name          = TYPE_VIRTIO_MMIO,
 706    .parent        = TYPE_SYS_BUS_DEVICE,
 707    .instance_size = sizeof(VirtIOMMIOProxy),
 708    .class_init    = virtio_mmio_class_init,
 709};
 710
 711/* virtio-mmio-bus. */
 712
 713static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
 714{
 715    BusState *virtio_mmio_bus;
 716    VirtIOMMIOProxy *virtio_mmio_proxy;
 717    char *proxy_path;
 718    SysBusDevice *proxy_sbd;
 719    char *path;
 720
 721    virtio_mmio_bus = qdev_get_parent_bus(dev);
 722    virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
 723    proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
 724
 725    /*
 726     * If @format_transport_address is false, then we just perform the same as
 727     * virtio_bus_get_dev_path(): we delegate the address formatting for the
 728     * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
 729     * (i.e., the device that implements the virtio-mmio bus) resides on. In
 730     * this case the base address of the virtio-mmio transport will be
 731     * invisible.
 732     */
 733    if (!virtio_mmio_proxy->format_transport_address) {
 734        return proxy_path;
 735    }
 736
 737    /* Otherwise, we append the base address of the transport. */
 738    proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
 739    assert(proxy_sbd->num_mmio == 1);
 740    assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
 741
 742    if (proxy_path) {
 743        path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
 744                               proxy_sbd->mmio[0].addr);
 745    } else {
 746        path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
 747                               proxy_sbd->mmio[0].addr);
 748    }
 749    g_free(proxy_path);
 750    return path;
 751}
 752
 753static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
 754{
 755    BusClass *bus_class = BUS_CLASS(klass);
 756    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
 757
 758    k->notify = virtio_mmio_update_irq;
 759    k->save_config = virtio_mmio_save_config;
 760    k->load_config = virtio_mmio_load_config;
 761    k->save_extra_state = virtio_mmio_save_extra_state;
 762    k->load_extra_state = virtio_mmio_load_extra_state;
 763    k->has_extra_state = virtio_mmio_has_extra_state;
 764    k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
 765    k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
 766    k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
 767    k->pre_plugged = virtio_mmio_pre_plugged;
 768    k->has_variable_vring_alignment = true;
 769    bus_class->max_dev = 1;
 770    bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
 771}
 772
 773static const TypeInfo virtio_mmio_bus_info = {
 774    .name          = TYPE_VIRTIO_MMIO_BUS,
 775    .parent        = TYPE_VIRTIO_BUS,
 776    .instance_size = sizeof(VirtioBusState),
 777    .class_init    = virtio_mmio_bus_class_init,
 778};
 779
 780static void virtio_mmio_register_types(void)
 781{
 782    type_register_static(&virtio_mmio_bus_info);
 783    type_register_static(&virtio_mmio_info);
 784}
 785
 786type_init(virtio_mmio_register_types)
 787