qemu/hw/virtio/virtio-mmio.c
<<
>>
Prefs
   1/*
   2 * Virtio MMIO bindings
   3 *
   4 * Copyright (c) 2011 Linaro Limited
   5 *
   6 * Author:
   7 *  Peter Maydell <peter.maydell@linaro.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License along
  19 * with this program; if not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include "standard-headers/linux/virtio_mmio.h"
  24#include "hw/irq.h"
  25#include "hw/qdev-properties.h"
  26#include "hw/sysbus.h"
  27#include "hw/virtio/virtio.h"
  28#include "migration/qemu-file-types.h"
  29#include "qemu/host-utils.h"
  30#include "qemu/module.h"
  31#include "sysemu/kvm.h"
  32#include "hw/virtio/virtio-mmio.h"
  33#include "qemu/error-report.h"
  34#include "qemu/log.h"
  35#include "trace.h"
  36
  37static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
  38{
  39    return kvm_eventfds_enabled();
  40}
  41
  42static int virtio_mmio_ioeventfd_assign(DeviceState *d,
  43                                        EventNotifier *notifier,
  44                                        int n, bool assign)
  45{
  46    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
  47
  48    if (assign) {
  49        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  50                                  true, n, notifier);
  51    } else {
  52        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
  53                                  true, n, notifier);
  54    }
  55    return 0;
  56}
  57
  58static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
  59{
  60    virtio_bus_start_ioeventfd(&proxy->bus);
  61}
  62
  63static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
  64{
  65    virtio_bus_stop_ioeventfd(&proxy->bus);
  66}
  67
  68static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy)
  69{
  70    int i;
  71
  72    if (proxy->legacy) {
  73        return;
  74    }
  75
  76    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
  77        proxy->vqs[i].enabled = 0;
  78    }
  79}
  80
  81static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
  82{
  83    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
  84    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
  85
  86    trace_virtio_mmio_read(offset);
  87
  88    if (!vdev) {
  89        /* If no backend is present, we treat most registers as
  90         * read-as-zero, except for the magic number, version and
  91         * vendor ID. This is not strictly sanctioned by the virtio
  92         * spec, but it allows us to provide transports with no backend
  93         * plugged in which don't confuse Linux's virtio code: the
  94         * probe won't complain about the bad magic number, but the
  95         * device ID of zero means no backend will claim it.
  96         */
  97        switch (offset) {
  98        case VIRTIO_MMIO_MAGIC_VALUE:
  99            return VIRT_MAGIC;
 100        case VIRTIO_MMIO_VERSION:
 101            if (proxy->legacy) {
 102                return VIRT_VERSION_LEGACY;
 103            } else {
 104                return VIRT_VERSION;
 105            }
 106        case VIRTIO_MMIO_VENDOR_ID:
 107            return VIRT_VENDOR;
 108        default:
 109            return 0;
 110        }
 111    }
 112
 113    if (offset >= VIRTIO_MMIO_CONFIG) {
 114        offset -= VIRTIO_MMIO_CONFIG;
 115        if (proxy->legacy) {
 116            switch (size) {
 117            case 1:
 118                return virtio_config_readb(vdev, offset);
 119            case 2:
 120                return virtio_config_readw(vdev, offset);
 121            case 4:
 122                return virtio_config_readl(vdev, offset);
 123            default:
 124                abort();
 125            }
 126        } else {
 127            switch (size) {
 128            case 1:
 129                return virtio_config_modern_readb(vdev, offset);
 130            case 2:
 131                return virtio_config_modern_readw(vdev, offset);
 132            case 4:
 133                return virtio_config_modern_readl(vdev, offset);
 134            default:
 135                abort();
 136            }
 137        }
 138    }
 139    if (size != 4) {
 140        qemu_log_mask(LOG_GUEST_ERROR,
 141                      "%s: wrong size access to register!\n",
 142                      __func__);
 143        return 0;
 144    }
 145    switch (offset) {
 146    case VIRTIO_MMIO_MAGIC_VALUE:
 147        return VIRT_MAGIC;
 148    case VIRTIO_MMIO_VERSION:
 149        if (proxy->legacy) {
 150            return VIRT_VERSION_LEGACY;
 151        } else {
 152            return VIRT_VERSION;
 153        }
 154    case VIRTIO_MMIO_DEVICE_ID:
 155        return vdev->device_id;
 156    case VIRTIO_MMIO_VENDOR_ID:
 157        return VIRT_VENDOR;
 158    case VIRTIO_MMIO_DEVICE_FEATURES:
 159        if (proxy->legacy) {
 160            if (proxy->host_features_sel) {
 161                return 0;
 162            } else {
 163                return vdev->host_features;
 164            }
 165        } else {
 166            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 167            return (vdev->host_features & ~vdc->legacy_features)
 168                >> (32 * proxy->host_features_sel);
 169        }
 170    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 171        if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
 172            return 0;
 173        }
 174        return VIRTQUEUE_MAX_SIZE;
 175    case VIRTIO_MMIO_QUEUE_PFN:
 176        if (!proxy->legacy) {
 177            qemu_log_mask(LOG_GUEST_ERROR,
 178                          "%s: read from legacy register (0x%"
 179                          HWADDR_PRIx ") in non-legacy mode\n",
 180                          __func__, offset);
 181            return 0;
 182        }
 183        return virtio_queue_get_addr(vdev, vdev->queue_sel)
 184            >> proxy->guest_page_shift;
 185    case VIRTIO_MMIO_QUEUE_READY:
 186        if (proxy->legacy) {
 187            qemu_log_mask(LOG_GUEST_ERROR,
 188                          "%s: read from non-legacy register (0x%"
 189                          HWADDR_PRIx ") in legacy mode\n",
 190                          __func__, offset);
 191            return 0;
 192        }
 193        return proxy->vqs[vdev->queue_sel].enabled;
 194    case VIRTIO_MMIO_INTERRUPT_STATUS:
 195        return qatomic_read(&vdev->isr);
 196    case VIRTIO_MMIO_STATUS:
 197        return vdev->status;
 198    case VIRTIO_MMIO_CONFIG_GENERATION:
 199        if (proxy->legacy) {
 200            qemu_log_mask(LOG_GUEST_ERROR,
 201                          "%s: read from non-legacy register (0x%"
 202                          HWADDR_PRIx ") in legacy mode\n",
 203                          __func__, offset);
 204            return 0;
 205        }
 206        return vdev->generation;
 207   case VIRTIO_MMIO_SHM_LEN_LOW:
 208   case VIRTIO_MMIO_SHM_LEN_HIGH:
 209        /*
 210         * VIRTIO_MMIO_SHM_SEL is unimplemented
 211         * according to the linux driver, if region length is -1
 212         * the shared memory doesn't exist
 213         */
 214        return -1;
 215    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 216    case VIRTIO_MMIO_DRIVER_FEATURES:
 217    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 218    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 219    case VIRTIO_MMIO_QUEUE_SEL:
 220    case VIRTIO_MMIO_QUEUE_NUM:
 221    case VIRTIO_MMIO_QUEUE_ALIGN:
 222    case VIRTIO_MMIO_QUEUE_NOTIFY:
 223    case VIRTIO_MMIO_INTERRUPT_ACK:
 224    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 225    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 226    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 227    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 228    case VIRTIO_MMIO_QUEUE_USED_LOW:
 229    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 230        qemu_log_mask(LOG_GUEST_ERROR,
 231                      "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
 232                      __func__, offset);
 233        return 0;
 234    default:
 235        qemu_log_mask(LOG_GUEST_ERROR,
 236                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 237                      __func__, offset);
 238        return 0;
 239    }
 240    return 0;
 241}
 242
 243static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
 244                              unsigned size)
 245{
 246    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
 247    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 248
 249    trace_virtio_mmio_write_offset(offset, value);
 250
 251    if (!vdev) {
 252        /* If no backend is present, we just make all registers
 253         * write-ignored. This allows us to provide transports with
 254         * no backend plugged in.
 255         */
 256        return;
 257    }
 258
 259    if (offset >= VIRTIO_MMIO_CONFIG) {
 260        offset -= VIRTIO_MMIO_CONFIG;
 261        if (proxy->legacy) {
 262            switch (size) {
 263            case 1:
 264                virtio_config_writeb(vdev, offset, value);
 265                break;
 266            case 2:
 267                virtio_config_writew(vdev, offset, value);
 268                break;
 269            case 4:
 270                virtio_config_writel(vdev, offset, value);
 271                break;
 272            default:
 273                abort();
 274            }
 275            return;
 276        } else {
 277            switch (size) {
 278            case 1:
 279                virtio_config_modern_writeb(vdev, offset, value);
 280                break;
 281            case 2:
 282                virtio_config_modern_writew(vdev, offset, value);
 283                break;
 284            case 4:
 285                virtio_config_modern_writel(vdev, offset, value);
 286                break;
 287            default:
 288                abort();
 289            }
 290            return;
 291        }
 292    }
 293    if (size != 4) {
 294        qemu_log_mask(LOG_GUEST_ERROR,
 295                      "%s: wrong size access to register!\n",
 296                      __func__);
 297        return;
 298    }
 299    switch (offset) {
 300    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
 301        if (value) {
 302            proxy->host_features_sel = 1;
 303        } else {
 304            proxy->host_features_sel = 0;
 305        }
 306        break;
 307    case VIRTIO_MMIO_DRIVER_FEATURES:
 308        if (proxy->legacy) {
 309            if (proxy->guest_features_sel) {
 310                qemu_log_mask(LOG_GUEST_ERROR,
 311                              "%s: attempt to write guest features with "
 312                              "guest_features_sel > 0 in legacy mode\n",
 313                              __func__);
 314            } else {
 315                virtio_set_features(vdev, value);
 316            }
 317        } else {
 318            proxy->guest_features[proxy->guest_features_sel] = value;
 319        }
 320        break;
 321    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
 322        if (value) {
 323            proxy->guest_features_sel = 1;
 324        } else {
 325            proxy->guest_features_sel = 0;
 326        }
 327        break;
 328    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
 329        if (!proxy->legacy) {
 330            qemu_log_mask(LOG_GUEST_ERROR,
 331                          "%s: write to legacy register (0x%"
 332                          HWADDR_PRIx ") in non-legacy mode\n",
 333                          __func__, offset);
 334            return;
 335        }
 336        proxy->guest_page_shift = ctz32(value);
 337        if (proxy->guest_page_shift > 31) {
 338            proxy->guest_page_shift = 0;
 339        }
 340        trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
 341        break;
 342    case VIRTIO_MMIO_QUEUE_SEL:
 343        if (value < VIRTIO_QUEUE_MAX) {
 344            vdev->queue_sel = value;
 345        }
 346        break;
 347    case VIRTIO_MMIO_QUEUE_NUM:
 348        trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
 349        virtio_queue_set_num(vdev, vdev->queue_sel, value);
 350
 351        if (proxy->legacy) {
 352            virtio_queue_update_rings(vdev, vdev->queue_sel);
 353        } else {
 354            proxy->vqs[vdev->queue_sel].num = value;
 355        }
 356        break;
 357    case VIRTIO_MMIO_QUEUE_ALIGN:
 358        if (!proxy->legacy) {
 359            qemu_log_mask(LOG_GUEST_ERROR,
 360                          "%s: write to legacy register (0x%"
 361                          HWADDR_PRIx ") in non-legacy mode\n",
 362                          __func__, offset);
 363            return;
 364        }
 365        virtio_queue_set_align(vdev, vdev->queue_sel, value);
 366        break;
 367    case VIRTIO_MMIO_QUEUE_PFN:
 368        if (!proxy->legacy) {
 369            qemu_log_mask(LOG_GUEST_ERROR,
 370                          "%s: write to legacy register (0x%"
 371                          HWADDR_PRIx ") in non-legacy mode\n",
 372                          __func__, offset);
 373            return;
 374        }
 375        if (value == 0) {
 376            virtio_reset(vdev);
 377        } else {
 378            virtio_queue_set_addr(vdev, vdev->queue_sel,
 379                                  value << proxy->guest_page_shift);
 380        }
 381        break;
 382    case VIRTIO_MMIO_QUEUE_READY:
 383        if (proxy->legacy) {
 384            qemu_log_mask(LOG_GUEST_ERROR,
 385                          "%s: write to non-legacy register (0x%"
 386                          HWADDR_PRIx ") in legacy mode\n",
 387                          __func__, offset);
 388            return;
 389        }
 390        if (value) {
 391            virtio_queue_set_num(vdev, vdev->queue_sel,
 392                                 proxy->vqs[vdev->queue_sel].num);
 393            virtio_queue_set_rings(vdev, vdev->queue_sel,
 394                ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
 395                proxy->vqs[vdev->queue_sel].desc[0],
 396                ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
 397                proxy->vqs[vdev->queue_sel].avail[0],
 398                ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
 399                proxy->vqs[vdev->queue_sel].used[0]);
 400            proxy->vqs[vdev->queue_sel].enabled = 1;
 401        } else {
 402            proxy->vqs[vdev->queue_sel].enabled = 0;
 403        }
 404        break;
 405    case VIRTIO_MMIO_QUEUE_NOTIFY:
 406        if (value < VIRTIO_QUEUE_MAX) {
 407            virtio_queue_notify(vdev, value);
 408        }
 409        break;
 410    case VIRTIO_MMIO_INTERRUPT_ACK:
 411        qatomic_and(&vdev->isr, ~value);
 412        virtio_update_irq(vdev);
 413        break;
 414    case VIRTIO_MMIO_STATUS:
 415        if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
 416            virtio_mmio_stop_ioeventfd(proxy);
 417        }
 418
 419        if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
 420            virtio_set_features(vdev,
 421                                ((uint64_t)proxy->guest_features[1]) << 32 |
 422                                proxy->guest_features[0]);
 423        }
 424
 425        virtio_set_status(vdev, value & 0xff);
 426
 427        if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
 428            virtio_mmio_start_ioeventfd(proxy);
 429        }
 430
 431        if (vdev->status == 0) {
 432            virtio_reset(vdev);
 433            virtio_mmio_soft_reset(proxy);
 434        }
 435        break;
 436    case VIRTIO_MMIO_QUEUE_DESC_LOW:
 437        if (proxy->legacy) {
 438            qemu_log_mask(LOG_GUEST_ERROR,
 439                          "%s: write to non-legacy register (0x%"
 440                          HWADDR_PRIx ") in legacy mode\n",
 441                          __func__, offset);
 442            return;
 443        }
 444        proxy->vqs[vdev->queue_sel].desc[0] = value;
 445        break;
 446    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
 447        if (proxy->legacy) {
 448            qemu_log_mask(LOG_GUEST_ERROR,
 449                          "%s: write to non-legacy register (0x%"
 450                          HWADDR_PRIx ") in legacy mode\n",
 451                          __func__, offset);
 452            return;
 453        }
 454        proxy->vqs[vdev->queue_sel].desc[1] = value;
 455        break;
 456    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
 457        if (proxy->legacy) {
 458            qemu_log_mask(LOG_GUEST_ERROR,
 459                          "%s: write to non-legacy register (0x%"
 460                          HWADDR_PRIx ") in legacy mode\n",
 461                          __func__, offset);
 462            return;
 463        }
 464        proxy->vqs[vdev->queue_sel].avail[0] = value;
 465        break;
 466    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
 467        if (proxy->legacy) {
 468            qemu_log_mask(LOG_GUEST_ERROR,
 469                          "%s: write to non-legacy register (0x%"
 470                          HWADDR_PRIx ") in legacy mode\n",
 471                          __func__, offset);
 472            return;
 473        }
 474        proxy->vqs[vdev->queue_sel].avail[1] = value;
 475        break;
 476    case VIRTIO_MMIO_QUEUE_USED_LOW:
 477        if (proxy->legacy) {
 478            qemu_log_mask(LOG_GUEST_ERROR,
 479                          "%s: write to non-legacy register (0x%"
 480                          HWADDR_PRIx ") in legacy mode\n",
 481                          __func__, offset);
 482            return;
 483        }
 484        proxy->vqs[vdev->queue_sel].used[0] = value;
 485        break;
 486    case VIRTIO_MMIO_QUEUE_USED_HIGH:
 487        if (proxy->legacy) {
 488            qemu_log_mask(LOG_GUEST_ERROR,
 489                          "%s: write to non-legacy register (0x%"
 490                          HWADDR_PRIx ") in legacy mode\n",
 491                          __func__, offset);
 492            return;
 493        }
 494        proxy->vqs[vdev->queue_sel].used[1] = value;
 495        break;
 496    case VIRTIO_MMIO_MAGIC_VALUE:
 497    case VIRTIO_MMIO_VERSION:
 498    case VIRTIO_MMIO_DEVICE_ID:
 499    case VIRTIO_MMIO_VENDOR_ID:
 500    case VIRTIO_MMIO_DEVICE_FEATURES:
 501    case VIRTIO_MMIO_QUEUE_NUM_MAX:
 502    case VIRTIO_MMIO_INTERRUPT_STATUS:
 503    case VIRTIO_MMIO_CONFIG_GENERATION:
 504        qemu_log_mask(LOG_GUEST_ERROR,
 505                      "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
 506                      __func__, offset);
 507        break;
 508
 509    default:
 510        qemu_log_mask(LOG_GUEST_ERROR,
 511                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
 512                      __func__, offset);
 513    }
 514}
 515
 516static const MemoryRegionOps virtio_legacy_mem_ops = {
 517    .read = virtio_mmio_read,
 518    .write = virtio_mmio_write,
 519    .endianness = DEVICE_NATIVE_ENDIAN,
 520};
 521
 522static const MemoryRegionOps virtio_mem_ops = {
 523    .read = virtio_mmio_read,
 524    .write = virtio_mmio_write,
 525    .endianness = DEVICE_LITTLE_ENDIAN,
 526};
 527
 528static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
 529{
 530    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 531    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 532    int level;
 533
 534    if (!vdev) {
 535        return;
 536    }
 537    level = (qatomic_read(&vdev->isr) != 0);
 538    trace_virtio_mmio_setting_irq(level);
 539    qemu_set_irq(proxy->irq, level);
 540}
 541
 542static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
 543{
 544    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 545
 546    proxy->host_features_sel = qemu_get_be32(f);
 547    proxy->guest_features_sel = qemu_get_be32(f);
 548    proxy->guest_page_shift = qemu_get_be32(f);
 549    return 0;
 550}
 551
 552static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
 553{
 554    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 555
 556    qemu_put_be32(f, proxy->host_features_sel);
 557    qemu_put_be32(f, proxy->guest_features_sel);
 558    qemu_put_be32(f, proxy->guest_page_shift);
 559}
 560
 561static const VMStateDescription vmstate_virtio_mmio_queue_state = {
 562    .name = "virtio_mmio/queue_state",
 563    .version_id = 1,
 564    .minimum_version_id = 1,
 565    .fields = (VMStateField[]) {
 566        VMSTATE_UINT16(num, VirtIOMMIOQueue),
 567        VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
 568        VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
 569        VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
 570        VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
 571        VMSTATE_END_OF_LIST()
 572    }
 573};
 574
 575static const VMStateDescription vmstate_virtio_mmio_state_sub = {
 576    .name = "virtio_mmio/state",
 577    .version_id = 1,
 578    .minimum_version_id = 1,
 579    .fields = (VMStateField[]) {
 580        VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
 581        VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
 582                             vmstate_virtio_mmio_queue_state,
 583                             VirtIOMMIOQueue),
 584        VMSTATE_END_OF_LIST()
 585    }
 586};
 587
 588static const VMStateDescription vmstate_virtio_mmio = {
 589    .name = "virtio_mmio",
 590    .version_id = 1,
 591    .minimum_version_id = 1,
 592    .minimum_version_id_old = 1,
 593    .fields = (VMStateField[]) {
 594        VMSTATE_END_OF_LIST()
 595    },
 596    .subsections = (const VMStateDescription * []) {
 597        &vmstate_virtio_mmio_state_sub,
 598        NULL
 599    }
 600};
 601
 602static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
 603{
 604    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 605
 606    vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
 607}
 608
 609static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
 610{
 611    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 612
 613    return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
 614}
 615
 616static bool virtio_mmio_has_extra_state(DeviceState *opaque)
 617{
 618    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
 619
 620    return !proxy->legacy;
 621}
 622
 623static void virtio_mmio_reset(DeviceState *d)
 624{
 625    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 626    int i;
 627
 628    virtio_mmio_stop_ioeventfd(proxy);
 629    virtio_bus_reset(&proxy->bus);
 630    proxy->host_features_sel = 0;
 631    proxy->guest_features_sel = 0;
 632    proxy->guest_page_shift = 0;
 633
 634    if (!proxy->legacy) {
 635        proxy->guest_features[0] = proxy->guest_features[1] = 0;
 636
 637        for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
 638            proxy->vqs[i].enabled = 0;
 639            proxy->vqs[i].num = 0;
 640            proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
 641            proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
 642            proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
 643        }
 644    }
 645}
 646
 647static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
 648                                          bool with_irqfd)
 649{
 650    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 651    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 652    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 653    VirtQueue *vq = virtio_get_queue(vdev, n);
 654    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
 655
 656    if (assign) {
 657        int r = event_notifier_init(notifier, 0);
 658        if (r < 0) {
 659            return r;
 660        }
 661        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
 662    } else {
 663        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
 664        event_notifier_cleanup(notifier);
 665    }
 666
 667    if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
 668        vdc->guest_notifier_mask(vdev, n, !assign);
 669    }
 670
 671    return 0;
 672}
 673
 674static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
 675                                           bool assign)
 676{
 677    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 678    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 679    /* TODO: need to check if kvm-arm supports irqfd */
 680    bool with_irqfd = false;
 681    int r, n;
 682
 683    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
 684
 685    for (n = 0; n < nvqs; n++) {
 686        if (!virtio_queue_get_num(vdev, n)) {
 687            break;
 688        }
 689
 690        r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
 691        if (r < 0) {
 692            goto assign_error;
 693        }
 694    }
 695
 696    return 0;
 697
 698assign_error:
 699    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
 700    assert(assign);
 701    while (--n >= 0) {
 702        virtio_mmio_set_guest_notifier(d, n, !assign, false);
 703    }
 704    return r;
 705}
 706
 707static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
 708{
 709    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 710    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
 711
 712    if (!proxy->legacy) {
 713        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
 714    }
 715}
 716
 717/* virtio-mmio device */
 718
 719static Property virtio_mmio_properties[] = {
 720    DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
 721                     format_transport_address, true),
 722    DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
 723    DEFINE_PROP_END_OF_LIST(),
 724};
 725
 726static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
 727{
 728    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 729    SysBusDevice *sbd = SYS_BUS_DEVICE(d);
 730
 731    qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
 732                        d, NULL);
 733    sysbus_init_irq(sbd, &proxy->irq);
 734    if (proxy->legacy) {
 735        memory_region_init_io(&proxy->iomem, OBJECT(d),
 736                              &virtio_legacy_mem_ops, proxy,
 737                              TYPE_VIRTIO_MMIO, 0x200);
 738    } else {
 739        memory_region_init_io(&proxy->iomem, OBJECT(d),
 740                              &virtio_mem_ops, proxy,
 741                              TYPE_VIRTIO_MMIO, 0x200);
 742    }
 743    sysbus_init_mmio(sbd, &proxy->iomem);
 744}
 745
 746static void virtio_mmio_class_init(ObjectClass *klass, void *data)
 747{
 748    DeviceClass *dc = DEVICE_CLASS(klass);
 749
 750    dc->realize = virtio_mmio_realizefn;
 751    dc->reset = virtio_mmio_reset;
 752    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 753    device_class_set_props(dc, virtio_mmio_properties);
 754}
 755
 756static const TypeInfo virtio_mmio_info = {
 757    .name          = TYPE_VIRTIO_MMIO,
 758    .parent        = TYPE_SYS_BUS_DEVICE,
 759    .instance_size = sizeof(VirtIOMMIOProxy),
 760    .class_init    = virtio_mmio_class_init,
 761};
 762
 763/* virtio-mmio-bus. */
 764
 765static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
 766{
 767    BusState *virtio_mmio_bus;
 768    VirtIOMMIOProxy *virtio_mmio_proxy;
 769    char *proxy_path;
 770    char *path;
 771    MemoryRegionSection section;
 772
 773    virtio_mmio_bus = qdev_get_parent_bus(dev);
 774    virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
 775    proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
 776
 777    /*
 778     * If @format_transport_address is false, then we just perform the same as
 779     * virtio_bus_get_dev_path(): we delegate the address formatting for the
 780     * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
 781     * (i.e., the device that implements the virtio-mmio bus) resides on. In
 782     * this case the base address of the virtio-mmio transport will be
 783     * invisible.
 784     */
 785    if (!virtio_mmio_proxy->format_transport_address) {
 786        return proxy_path;
 787    }
 788
 789    /* Otherwise, we append the base address of the transport. */
 790    section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200);
 791    assert(section.mr);
 792
 793    if (proxy_path) {
 794        path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
 795                               section.offset_within_address_space);
 796    } else {
 797        path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
 798                               section.offset_within_address_space);
 799    }
 800    memory_region_unref(section.mr);
 801
 802    g_free(proxy_path);
 803    return path;
 804}
 805
 806static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
 807{
 808    BusClass *bus_class = BUS_CLASS(klass);
 809    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
 810
 811    k->notify = virtio_mmio_update_irq;
 812    k->save_config = virtio_mmio_save_config;
 813    k->load_config = virtio_mmio_load_config;
 814    k->save_extra_state = virtio_mmio_save_extra_state;
 815    k->load_extra_state = virtio_mmio_load_extra_state;
 816    k->has_extra_state = virtio_mmio_has_extra_state;
 817    k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
 818    k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
 819    k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
 820    k->pre_plugged = virtio_mmio_pre_plugged;
 821    k->has_variable_vring_alignment = true;
 822    bus_class->max_dev = 1;
 823    bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
 824}
 825
 826static const TypeInfo virtio_mmio_bus_info = {
 827    .name          = TYPE_VIRTIO_MMIO_BUS,
 828    .parent        = TYPE_VIRTIO_BUS,
 829    .instance_size = sizeof(VirtioBusState),
 830    .class_init    = virtio_mmio_bus_class_init,
 831};
 832
 833static void virtio_mmio_register_types(void)
 834{
 835    type_register_static(&virtio_mmio_bus_info);
 836    type_register_static(&virtio_mmio_info);
 837}
 838
 839type_init(virtio_mmio_register_types)
 840