qemu/hw/virtio/virtio-iommu.c
<<
>>
Prefs
   1/*
   2 * virtio-iommu device
   3 *
   4 * Copyright (c) 2020 Red Hat, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2 or later, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/log.h"
  22#include "qemu/iov.h"
  23#include "qemu-common.h"
  24#include "hw/qdev-properties.h"
  25#include "hw/virtio/virtio.h"
  26#include "sysemu/kvm.h"
  27#include "qapi/error.h"
  28#include "qemu/error-report.h"
  29#include "trace.h"
  30
  31#include "standard-headers/linux/virtio_ids.h"
  32
  33#include "hw/virtio/virtio-bus.h"
  34#include "hw/virtio/virtio-access.h"
  35#include "hw/virtio/virtio-iommu.h"
  36#include "hw/pci/pci_bus.h"
  37#include "hw/pci/pci.h"
  38
  39/* Max size */
  40#define VIOMMU_DEFAULT_QUEUE_SIZE 256
  41#define VIOMMU_PROBE_SIZE 512
  42
  43typedef struct VirtIOIOMMUDomain {
  44    uint32_t id;
  45    GTree *mappings;
  46    QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
  47} VirtIOIOMMUDomain;
  48
  49typedef struct VirtIOIOMMUEndpoint {
  50    uint32_t id;
  51    VirtIOIOMMUDomain *domain;
  52    IOMMUMemoryRegion *iommu_mr;
  53    QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
  54} VirtIOIOMMUEndpoint;
  55
  56typedef struct VirtIOIOMMUInterval {
  57    uint64_t low;
  58    uint64_t high;
  59} VirtIOIOMMUInterval;
  60
  61typedef struct VirtIOIOMMUMapping {
  62    uint64_t phys_addr;
  63    uint32_t flags;
  64} VirtIOIOMMUMapping;
  65
  66static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
  67{
  68    return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
  69}
  70
  71/**
  72 * The bus number is used for lookup when SID based operations occur.
  73 * In that case we lazily populate the IOMMUPciBus array from the bus hash
  74 * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
  75 * numbers may not be always initialized yet.
  76 */
  77static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
  78{
  79    IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
  80
  81    if (!iommu_pci_bus) {
  82        GHashTableIter iter;
  83
  84        g_hash_table_iter_init(&iter, s->as_by_busptr);
  85        while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
  86            if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
  87                s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
  88                return iommu_pci_bus;
  89            }
  90        }
  91        return NULL;
  92    }
  93    return iommu_pci_bus;
  94}
  95
  96static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
  97{
  98    uint8_t bus_n, devfn;
  99    IOMMUPciBus *iommu_pci_bus;
 100    IOMMUDevice *dev;
 101
 102    bus_n = PCI_BUS_NUM(sid);
 103    iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
 104    if (iommu_pci_bus) {
 105        devfn = sid & (PCI_DEVFN_MAX - 1);
 106        dev = iommu_pci_bus->pbdev[devfn];
 107        if (dev) {
 108            return &dev->iommu_mr;
 109        }
 110    }
 111    return NULL;
 112}
 113
 114static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
 115{
 116    VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
 117    VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
 118
 119    if (inta->high < intb->low) {
 120        return -1;
 121    } else if (intb->high < inta->low) {
 122        return 1;
 123    } else {
 124        return 0;
 125    }
 126}
 127
 128static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
 129                                    hwaddr virt_end, hwaddr paddr,
 130                                    uint32_t flags)
 131{
 132    IOMMUTLBEvent event;
 133    IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
 134                                              flags & VIRTIO_IOMMU_MAP_F_WRITE);
 135
 136    if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) ||
 137        (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) {
 138        return;
 139    }
 140
 141    trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
 142                                  paddr, perm);
 143
 144    event.type = IOMMU_NOTIFIER_MAP;
 145    event.entry.target_as = &address_space_memory;
 146    event.entry.addr_mask = virt_end - virt_start;
 147    event.entry.iova = virt_start;
 148    event.entry.perm = perm;
 149    event.entry.translated_addr = paddr;
 150
 151    memory_region_notify_iommu(mr, 0, event);
 152}
 153
 154static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
 155                                      hwaddr virt_end)
 156{
 157    IOMMUTLBEvent event;
 158    uint64_t delta = virt_end - virt_start;
 159
 160    if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
 161        return;
 162    }
 163
 164    trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
 165
 166    event.type = IOMMU_NOTIFIER_UNMAP;
 167    event.entry.target_as = &address_space_memory;
 168    event.entry.perm = IOMMU_NONE;
 169    event.entry.translated_addr = 0;
 170    event.entry.addr_mask = delta;
 171    event.entry.iova = virt_start;
 172
 173    if (delta == UINT64_MAX) {
 174        memory_region_notify_iommu(mr, 0, event);
 175    }
 176
 177
 178    while (virt_start != virt_end + 1) {
 179        uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
 180
 181        event.entry.addr_mask = mask;
 182        event.entry.iova = virt_start;
 183        memory_region_notify_iommu(mr, 0, event);
 184        virt_start += mask + 1;
 185    }
 186}
 187
 188static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
 189                                             gpointer data)
 190{
 191    VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
 192    IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
 193
 194    virtio_iommu_notify_unmap(mr, interval->low, interval->high);
 195
 196    return false;
 197}
 198
 199static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
 200                                           gpointer data)
 201{
 202    VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
 203    VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
 204    IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
 205
 206    virtio_iommu_notify_map(mr, interval->low, interval->high,
 207                            mapping->phys_addr, mapping->flags);
 208
 209    return false;
 210}
 211
 212static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
 213{
 214    VirtIOIOMMUDomain *domain = ep->domain;
 215
 216    if (!ep->domain) {
 217        return;
 218    }
 219    g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
 220                   ep->iommu_mr);
 221    QLIST_REMOVE(ep, next);
 222    ep->domain = NULL;
 223}
 224
 225static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
 226                                                      uint32_t ep_id)
 227{
 228    VirtIOIOMMUEndpoint *ep;
 229    IOMMUMemoryRegion *mr;
 230
 231    ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
 232    if (ep) {
 233        return ep;
 234    }
 235    mr = virtio_iommu_mr(s, ep_id);
 236    if (!mr) {
 237        return NULL;
 238    }
 239    ep = g_malloc0(sizeof(*ep));
 240    ep->id = ep_id;
 241    ep->iommu_mr = mr;
 242    trace_virtio_iommu_get_endpoint(ep_id);
 243    g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
 244    return ep;
 245}
 246
 247static void virtio_iommu_put_endpoint(gpointer data)
 248{
 249    VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
 250
 251    if (ep->domain) {
 252        virtio_iommu_detach_endpoint_from_domain(ep);
 253    }
 254
 255    trace_virtio_iommu_put_endpoint(ep->id);
 256    g_free(ep);
 257}
 258
 259static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
 260                                                  uint32_t domain_id)
 261{
 262    VirtIOIOMMUDomain *domain;
 263
 264    domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
 265    if (domain) {
 266        return domain;
 267    }
 268    domain = g_malloc0(sizeof(*domain));
 269    domain->id = domain_id;
 270    domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
 271                                   NULL, (GDestroyNotify)g_free,
 272                                   (GDestroyNotify)g_free);
 273    g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
 274    QLIST_INIT(&domain->endpoint_list);
 275    trace_virtio_iommu_get_domain(domain_id);
 276    return domain;
 277}
 278
 279static void virtio_iommu_put_domain(gpointer data)
 280{
 281    VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
 282    VirtIOIOMMUEndpoint *iter, *tmp;
 283
 284    QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
 285        virtio_iommu_detach_endpoint_from_domain(iter);
 286    }
 287    g_tree_destroy(domain->mappings);
 288    trace_virtio_iommu_put_domain(domain->id);
 289    g_free(domain);
 290}
 291
 292static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
 293                                              int devfn)
 294{
 295    VirtIOIOMMU *s = opaque;
 296    IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
 297    static uint32_t mr_index;
 298    IOMMUDevice *sdev;
 299
 300    if (!sbus) {
 301        sbus = g_malloc0(sizeof(IOMMUPciBus) +
 302                         sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
 303        sbus->bus = bus;
 304        g_hash_table_insert(s->as_by_busptr, bus, sbus);
 305    }
 306
 307    sdev = sbus->pbdev[devfn];
 308    if (!sdev) {
 309        char *name = g_strdup_printf("%s-%d-%d",
 310                                     TYPE_VIRTIO_IOMMU_MEMORY_REGION,
 311                                     mr_index++, devfn);
 312        sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice));
 313
 314        sdev->viommu = s;
 315        sdev->bus = bus;
 316        sdev->devfn = devfn;
 317
 318        trace_virtio_iommu_init_iommu_mr(name);
 319
 320        memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
 321                                 TYPE_VIRTIO_IOMMU_MEMORY_REGION,
 322                                 OBJECT(s), name,
 323                                 UINT64_MAX);
 324        address_space_init(&sdev->as,
 325                           MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
 326        g_free(name);
 327    }
 328    return &sdev->as;
 329}
 330
 331static int virtio_iommu_attach(VirtIOIOMMU *s,
 332                               struct virtio_iommu_req_attach *req)
 333{
 334    uint32_t domain_id = le32_to_cpu(req->domain);
 335    uint32_t ep_id = le32_to_cpu(req->endpoint);
 336    VirtIOIOMMUDomain *domain;
 337    VirtIOIOMMUEndpoint *ep;
 338
 339    trace_virtio_iommu_attach(domain_id, ep_id);
 340
 341    ep = virtio_iommu_get_endpoint(s, ep_id);
 342    if (!ep) {
 343        return VIRTIO_IOMMU_S_NOENT;
 344    }
 345
 346    if (ep->domain) {
 347        VirtIOIOMMUDomain *previous_domain = ep->domain;
 348        /*
 349         * the device is already attached to a domain,
 350         * detach it first
 351         */
 352        virtio_iommu_detach_endpoint_from_domain(ep);
 353        if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
 354            g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
 355        }
 356    }
 357
 358    domain = virtio_iommu_get_domain(s, domain_id);
 359    QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
 360
 361    ep->domain = domain;
 362
 363    /* Replay domain mappings on the associated memory region */
 364    g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
 365                   ep->iommu_mr);
 366
 367    return VIRTIO_IOMMU_S_OK;
 368}
 369
 370static int virtio_iommu_detach(VirtIOIOMMU *s,
 371                               struct virtio_iommu_req_detach *req)
 372{
 373    uint32_t domain_id = le32_to_cpu(req->domain);
 374    uint32_t ep_id = le32_to_cpu(req->endpoint);
 375    VirtIOIOMMUDomain *domain;
 376    VirtIOIOMMUEndpoint *ep;
 377
 378    trace_virtio_iommu_detach(domain_id, ep_id);
 379
 380    ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
 381    if (!ep) {
 382        return VIRTIO_IOMMU_S_NOENT;
 383    }
 384
 385    domain = ep->domain;
 386
 387    if (!domain || domain->id != domain_id) {
 388        return VIRTIO_IOMMU_S_INVAL;
 389    }
 390
 391    virtio_iommu_detach_endpoint_from_domain(ep);
 392
 393    if (QLIST_EMPTY(&domain->endpoint_list)) {
 394        g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
 395    }
 396    return VIRTIO_IOMMU_S_OK;
 397}
 398
 399static int virtio_iommu_map(VirtIOIOMMU *s,
 400                            struct virtio_iommu_req_map *req)
 401{
 402    uint32_t domain_id = le32_to_cpu(req->domain);
 403    uint64_t phys_start = le64_to_cpu(req->phys_start);
 404    uint64_t virt_start = le64_to_cpu(req->virt_start);
 405    uint64_t virt_end = le64_to_cpu(req->virt_end);
 406    uint32_t flags = le32_to_cpu(req->flags);
 407    VirtIOIOMMUDomain *domain;
 408    VirtIOIOMMUInterval *interval;
 409    VirtIOIOMMUMapping *mapping;
 410    VirtIOIOMMUEndpoint *ep;
 411
 412    if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
 413        return VIRTIO_IOMMU_S_INVAL;
 414    }
 415
 416    domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
 417    if (!domain) {
 418        return VIRTIO_IOMMU_S_NOENT;
 419    }
 420
 421    interval = g_malloc0(sizeof(*interval));
 422
 423    interval->low = virt_start;
 424    interval->high = virt_end;
 425
 426    mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
 427    if (mapping) {
 428        g_free(interval);
 429        return VIRTIO_IOMMU_S_INVAL;
 430    }
 431
 432    trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
 433
 434    mapping = g_malloc0(sizeof(*mapping));
 435    mapping->phys_addr = phys_start;
 436    mapping->flags = flags;
 437
 438    g_tree_insert(domain->mappings, interval, mapping);
 439
 440    QLIST_FOREACH(ep, &domain->endpoint_list, next) {
 441        virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start,
 442                                flags);
 443    }
 444
 445    return VIRTIO_IOMMU_S_OK;
 446}
 447
 448static int virtio_iommu_unmap(VirtIOIOMMU *s,
 449                              struct virtio_iommu_req_unmap *req)
 450{
 451    uint32_t domain_id = le32_to_cpu(req->domain);
 452    uint64_t virt_start = le64_to_cpu(req->virt_start);
 453    uint64_t virt_end = le64_to_cpu(req->virt_end);
 454    VirtIOIOMMUMapping *iter_val;
 455    VirtIOIOMMUInterval interval, *iter_key;
 456    VirtIOIOMMUDomain *domain;
 457    VirtIOIOMMUEndpoint *ep;
 458    int ret = VIRTIO_IOMMU_S_OK;
 459
 460    trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
 461
 462    domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
 463    if (!domain) {
 464        return VIRTIO_IOMMU_S_NOENT;
 465    }
 466    interval.low = virt_start;
 467    interval.high = virt_end;
 468
 469    while (g_tree_lookup_extended(domain->mappings, &interval,
 470                                  (void **)&iter_key, (void**)&iter_val)) {
 471        uint64_t current_low = iter_key->low;
 472        uint64_t current_high = iter_key->high;
 473
 474        if (interval.low <= current_low && interval.high >= current_high) {
 475            QLIST_FOREACH(ep, &domain->endpoint_list, next) {
 476                virtio_iommu_notify_unmap(ep->iommu_mr, current_low,
 477                                          current_high);
 478            }
 479            g_tree_remove(domain->mappings, iter_key);
 480            trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
 481        } else {
 482            ret = VIRTIO_IOMMU_S_RANGE;
 483            break;
 484        }
 485    }
 486    return ret;
 487}
 488
 489static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
 490                                               uint8_t *buf, size_t free)
 491{
 492    struct virtio_iommu_probe_resv_mem prop = {};
 493    size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
 494    int i;
 495
 496    total = size * s->nb_reserved_regions;
 497
 498    if (total > free) {
 499        return -ENOSPC;
 500    }
 501
 502    for (i = 0; i < s->nb_reserved_regions; i++) {
 503        unsigned subtype = s->reserved_regions[i].type;
 504
 505        assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
 506               subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
 507        prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
 508        prop.head.length = cpu_to_le16(length);
 509        prop.subtype = subtype;
 510        prop.start = cpu_to_le64(s->reserved_regions[i].low);
 511        prop.end = cpu_to_le64(s->reserved_regions[i].high);
 512
 513        memcpy(buf, &prop, size);
 514
 515        trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
 516                                              prop.start, prop.end);
 517        buf += size;
 518    }
 519    return total;
 520}
 521
 522/**
 523 * virtio_iommu_probe - Fill the probe request buffer with
 524 * the properties the device is able to return
 525 */
 526static int virtio_iommu_probe(VirtIOIOMMU *s,
 527                              struct virtio_iommu_req_probe *req,
 528                              uint8_t *buf)
 529{
 530    uint32_t ep_id = le32_to_cpu(req->endpoint);
 531    size_t free = VIOMMU_PROBE_SIZE;
 532    ssize_t count;
 533
 534    if (!virtio_iommu_mr(s, ep_id)) {
 535        return VIRTIO_IOMMU_S_NOENT;
 536    }
 537
 538    count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
 539    if (count < 0) {
 540        return VIRTIO_IOMMU_S_INVAL;
 541    }
 542    buf += count;
 543    free -= count;
 544
 545    return VIRTIO_IOMMU_S_OK;
 546}
 547
 548static int virtio_iommu_iov_to_req(struct iovec *iov,
 549                                   unsigned int iov_cnt,
 550                                   void *req, size_t req_sz)
 551{
 552    size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
 553
 554    sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
 555    if (unlikely(sz != payload_sz)) {
 556        return VIRTIO_IOMMU_S_INVAL;
 557    }
 558    return 0;
 559}
 560
 561#define virtio_iommu_handle_req(__req)                                  \
 562static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s,                \
 563                                         struct iovec *iov,             \
 564                                         unsigned int iov_cnt)          \
 565{                                                                       \
 566    struct virtio_iommu_req_ ## __req req;                              \
 567    int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
 568                                                                        \
 569    return ret ? ret : virtio_iommu_ ## __req(s, &req);                 \
 570}
 571
 572virtio_iommu_handle_req(attach)
 573virtio_iommu_handle_req(detach)
 574virtio_iommu_handle_req(map)
 575virtio_iommu_handle_req(unmap)
 576
 577static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
 578                                     struct iovec *iov,
 579                                     unsigned int iov_cnt,
 580                                     uint8_t *buf)
 581{
 582    struct virtio_iommu_req_probe req;
 583    int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
 584
 585    return ret ? ret : virtio_iommu_probe(s, &req, buf);
 586}
 587
 588static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
 589{
 590    VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
 591    struct virtio_iommu_req_head head;
 592    struct virtio_iommu_req_tail tail = {};
 593    size_t output_size = sizeof(tail), sz;
 594    VirtQueueElement *elem;
 595    unsigned int iov_cnt;
 596    struct iovec *iov;
 597    void *buf = NULL;
 598
 599    for (;;) {
 600        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
 601        if (!elem) {
 602            return;
 603        }
 604
 605        if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
 606            iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
 607            virtio_error(vdev, "virtio-iommu bad head/tail size");
 608            virtqueue_detach_element(vq, elem, 0);
 609            g_free(elem);
 610            break;
 611        }
 612
 613        iov_cnt = elem->out_num;
 614        iov = elem->out_sg;
 615        sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
 616        if (unlikely(sz != sizeof(head))) {
 617            tail.status = VIRTIO_IOMMU_S_DEVERR;
 618            goto out;
 619        }
 620        qemu_mutex_lock(&s->mutex);
 621        switch (head.type) {
 622        case VIRTIO_IOMMU_T_ATTACH:
 623            tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
 624            break;
 625        case VIRTIO_IOMMU_T_DETACH:
 626            tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
 627            break;
 628        case VIRTIO_IOMMU_T_MAP:
 629            tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
 630            break;
 631        case VIRTIO_IOMMU_T_UNMAP:
 632            tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
 633            break;
 634        case VIRTIO_IOMMU_T_PROBE:
 635        {
 636            struct virtio_iommu_req_tail *ptail;
 637
 638            output_size = s->config.probe_size + sizeof(tail);
 639            buf = g_malloc0(output_size);
 640
 641            ptail = (struct virtio_iommu_req_tail *)
 642                        (buf + s->config.probe_size);
 643            ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
 644            break;
 645        }
 646        default:
 647            tail.status = VIRTIO_IOMMU_S_UNSUPP;
 648        }
 649        qemu_mutex_unlock(&s->mutex);
 650
 651out:
 652        sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
 653                          buf ? buf : &tail, output_size);
 654        assert(sz == output_size);
 655
 656        virtqueue_push(vq, elem, sz);
 657        virtio_notify(vdev, vq);
 658        g_free(elem);
 659        g_free(buf);
 660    }
 661}
 662
 663static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
 664                                      int flags, uint32_t endpoint,
 665                                      uint64_t address)
 666{
 667    VirtIODevice *vdev = &viommu->parent_obj;
 668    VirtQueue *vq = viommu->event_vq;
 669    struct virtio_iommu_fault fault;
 670    VirtQueueElement *elem;
 671    size_t sz;
 672
 673    memset(&fault, 0, sizeof(fault));
 674    fault.reason = reason;
 675    fault.flags = cpu_to_le32(flags);
 676    fault.endpoint = cpu_to_le32(endpoint);
 677    fault.address = cpu_to_le64(address);
 678
 679    elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
 680
 681    if (!elem) {
 682        error_report_once(
 683            "no buffer available in event queue to report event");
 684        return;
 685    }
 686
 687    if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
 688        virtio_error(vdev, "error buffer of wrong size");
 689        virtqueue_detach_element(vq, elem, 0);
 690        g_free(elem);
 691        return;
 692    }
 693
 694    sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
 695                      &fault, sizeof(fault));
 696    assert(sz == sizeof(fault));
 697
 698    trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
 699    virtqueue_push(vq, elem, sz);
 700    virtio_notify(vdev, vq);
 701    g_free(elem);
 702
 703}
 704
 705static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
 706                                            IOMMUAccessFlags flag,
 707                                            int iommu_idx)
 708{
 709    IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
 710    VirtIOIOMMUInterval interval, *mapping_key;
 711    VirtIOIOMMUMapping *mapping_value;
 712    VirtIOIOMMU *s = sdev->viommu;
 713    bool read_fault, write_fault;
 714    VirtIOIOMMUEndpoint *ep;
 715    uint32_t sid, flags;
 716    bool bypass_allowed;
 717    bool found;
 718    int i;
 719
 720    interval.low = addr;
 721    interval.high = addr + 1;
 722
 723    IOMMUTLBEntry entry = {
 724        .target_as = &address_space_memory,
 725        .iova = addr,
 726        .translated_addr = addr,
 727        .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1,
 728        .perm = IOMMU_NONE,
 729    };
 730
 731    bypass_allowed = virtio_vdev_has_feature(&s->parent_obj,
 732                                             VIRTIO_IOMMU_F_BYPASS);
 733
 734    sid = virtio_iommu_get_bdf(sdev);
 735
 736    trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
 737    qemu_mutex_lock(&s->mutex);
 738
 739    ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
 740    if (!ep) {
 741        if (!bypass_allowed) {
 742            error_report_once("%s sid=%d is not known!!", __func__, sid);
 743            virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
 744                                      VIRTIO_IOMMU_FAULT_F_ADDRESS,
 745                                      sid, addr);
 746        } else {
 747            entry.perm = flag;
 748        }
 749        goto unlock;
 750    }
 751
 752    for (i = 0; i < s->nb_reserved_regions; i++) {
 753        ReservedRegion *reg = &s->reserved_regions[i];
 754
 755        if (addr >= reg->low && addr <= reg->high) {
 756            switch (reg->type) {
 757            case VIRTIO_IOMMU_RESV_MEM_T_MSI:
 758                entry.perm = flag;
 759                break;
 760            case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
 761            default:
 762                virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
 763                                          VIRTIO_IOMMU_FAULT_F_ADDRESS,
 764                                          sid, addr);
 765                break;
 766            }
 767            goto unlock;
 768        }
 769    }
 770
 771    if (!ep->domain) {
 772        if (!bypass_allowed) {
 773            error_report_once("%s %02x:%02x.%01x not attached to any domain",
 774                              __func__, PCI_BUS_NUM(sid),
 775                              PCI_SLOT(sid), PCI_FUNC(sid));
 776            virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
 777                                      VIRTIO_IOMMU_FAULT_F_ADDRESS,
 778                                      sid, addr);
 779        } else {
 780            entry.perm = flag;
 781        }
 782        goto unlock;
 783    }
 784
 785    found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
 786                                   (void **)&mapping_key,
 787                                   (void **)&mapping_value);
 788    if (!found) {
 789        error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
 790                          __func__, addr, sid);
 791        virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
 792                                  VIRTIO_IOMMU_FAULT_F_ADDRESS,
 793                                  sid, addr);
 794        goto unlock;
 795    }
 796
 797    read_fault = (flag & IOMMU_RO) &&
 798                    !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
 799    write_fault = (flag & IOMMU_WO) &&
 800                    !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
 801
 802    flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
 803    flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
 804    if (flags) {
 805        error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
 806                          __func__, addr, flag, mapping_value->flags);
 807        flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
 808        virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
 809                                  flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
 810                                  sid, addr);
 811        goto unlock;
 812    }
 813    entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
 814    entry.perm = flag;
 815    trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
 816
 817unlock:
 818    qemu_mutex_unlock(&s->mutex);
 819    return entry;
 820}
 821
 822static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
 823{
 824    VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
 825    struct virtio_iommu_config *config = &dev->config;
 826
 827    trace_virtio_iommu_get_config(config->page_size_mask,
 828                                  config->input_range.start,
 829                                  config->input_range.end,
 830                                  config->domain_range.end,
 831                                  config->probe_size);
 832    memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config));
 833}
 834
 835static void virtio_iommu_set_config(VirtIODevice *vdev,
 836                                      const uint8_t *config_data)
 837{
 838    struct virtio_iommu_config config;
 839
 840    memcpy(&config, config_data, sizeof(struct virtio_iommu_config));
 841    trace_virtio_iommu_set_config(config.page_size_mask,
 842                                  config.input_range.start,
 843                                  config.input_range.end,
 844                                  config.domain_range.end,
 845                                  config.probe_size);
 846}
 847
 848static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
 849                                          Error **errp)
 850{
 851    VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
 852
 853    f |= dev->features;
 854    trace_virtio_iommu_get_features(f);
 855    return f;
 856}
 857
 858static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
 859{
 860    guint ua = GPOINTER_TO_UINT(a);
 861    guint ub = GPOINTER_TO_UINT(b);
 862    return (ua > ub) - (ua < ub);
 863}
 864
 865static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data)
 866{
 867    VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
 868    VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
 869    IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
 870
 871    trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high,
 872                             mapping->phys_addr);
 873    virtio_iommu_notify_map(mr, interval->low, interval->high,
 874                            mapping->phys_addr, mapping->flags);
 875    return false;
 876}
 877
 878static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
 879{
 880    IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
 881    VirtIOIOMMU *s = sdev->viommu;
 882    uint32_t sid;
 883    VirtIOIOMMUEndpoint *ep;
 884
 885    sid = virtio_iommu_get_bdf(sdev);
 886
 887    qemu_mutex_lock(&s->mutex);
 888
 889    if (!s->endpoints) {
 890        goto unlock;
 891    }
 892
 893    ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
 894    if (!ep || !ep->domain) {
 895        goto unlock;
 896    }
 897
 898    g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
 899
 900unlock:
 901    qemu_mutex_unlock(&s->mutex);
 902}
 903
 904static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
 905                                            IOMMUNotifierFlag old,
 906                                            IOMMUNotifierFlag new,
 907                                            Error **errp)
 908{
 909    if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
 910        error_setg(errp, "Virtio-iommu does not support dev-iotlb yet");
 911        return -EINVAL;
 912    }
 913
 914    if (old == IOMMU_NOTIFIER_NONE) {
 915        trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name);
 916    } else if (new == IOMMU_NOTIFIER_NONE) {
 917        trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name);
 918    }
 919    return 0;
 920}
 921
 922/*
 923 * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
 924 * for example 0xfffffffffffff000. When an assigned device has page size
 925 * restrictions due to the hardware IOMMU configuration, apply this restriction
 926 * to the mask.
 927 */
 928static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
 929                                           uint64_t new_mask,
 930                                           Error **errp)
 931{
 932    IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
 933    VirtIOIOMMU *s = sdev->viommu;
 934    uint64_t cur_mask = s->config.page_size_mask;
 935
 936    trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask,
 937                                          new_mask);
 938
 939    if ((cur_mask & new_mask) == 0) {
 940        error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
 941                   " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask);
 942        return -1;
 943    }
 944
 945    /*
 946     * After the machine is finalized, we can't change the mask anymore. If by
 947     * chance the hotplugged device supports the same granule, we can still
 948     * accept it. Having a different masks is possible but the guest will use
 949     * sub-optimal block sizes, so warn about it.
 950     */
 951    if (phase_check(PHASE_MACHINE_READY)) {
 952        int new_granule = ctz64(new_mask);
 953        int cur_granule = ctz64(cur_mask);
 954
 955        if (new_granule != cur_granule) {
 956            error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
 957                       " is incompatible with mask 0x%"PRIx64, cur_mask,
 958                       new_mask);
 959            return -1;
 960        } else if (new_mask != cur_mask) {
 961            warn_report("virtio-iommu page mask 0x%"PRIx64
 962                        " does not match 0x%"PRIx64, cur_mask, new_mask);
 963        }
 964        return 0;
 965    }
 966
 967    s->config.page_size_mask &= new_mask;
 968    return 0;
 969}
 970
 971static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
 972{
 973    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
 974    VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
 975
 976    virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU,
 977                sizeof(struct virtio_iommu_config));
 978
 979    memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
 980
 981    s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
 982                             virtio_iommu_handle_command);
 983    s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
 984
 985    s->config.page_size_mask = TARGET_PAGE_MASK;
 986    s->config.input_range.end = -1UL;
 987    s->config.domain_range.end = 32;
 988    s->config.probe_size = VIOMMU_PROBE_SIZE;
 989
 990    virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
 991    virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
 992    virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
 993    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
 994    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
 995    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
 996    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS);
 997    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
 998    virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
 999
1000    qemu_mutex_init(&s->mutex);
1001
1002    s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
1003
1004    if (s->primary_bus) {
1005        pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s);
1006    } else {
1007        error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
1008    }
1009}
1010
1011static void virtio_iommu_device_unrealize(DeviceState *dev)
1012{
1013    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1014    VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1015
1016    g_hash_table_destroy(s->as_by_busptr);
1017    if (s->domains) {
1018        g_tree_destroy(s->domains);
1019    }
1020    if (s->endpoints) {
1021        g_tree_destroy(s->endpoints);
1022    }
1023
1024    virtio_delete_queue(s->req_vq);
1025    virtio_delete_queue(s->event_vq);
1026    virtio_cleanup(vdev);
1027}
1028
1029static void virtio_iommu_device_reset(VirtIODevice *vdev)
1030{
1031    VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
1032
1033    trace_virtio_iommu_device_reset();
1034
1035    if (s->domains) {
1036        g_tree_destroy(s->domains);
1037    }
1038    if (s->endpoints) {
1039        g_tree_destroy(s->endpoints);
1040    }
1041    s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
1042                                 NULL, NULL, virtio_iommu_put_domain);
1043    s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
1044                                   NULL, NULL, virtio_iommu_put_endpoint);
1045}
1046
1047static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
1048{
1049    trace_virtio_iommu_device_status(status);
1050}
1051
1052static void virtio_iommu_instance_init(Object *obj)
1053{
1054}
1055
1056#define VMSTATE_INTERVAL                               \
1057{                                                      \
1058    .name = "interval",                                \
1059    .version_id = 1,                                   \
1060    .minimum_version_id = 1,                           \
1061    .fields = (VMStateField[]) {                       \
1062        VMSTATE_UINT64(low, VirtIOIOMMUInterval),      \
1063        VMSTATE_UINT64(high, VirtIOIOMMUInterval),     \
1064        VMSTATE_END_OF_LIST()                          \
1065    }                                                  \
1066}
1067
1068#define VMSTATE_MAPPING                               \
1069{                                                     \
1070    .name = "mapping",                                \
1071    .version_id = 1,                                  \
1072    .minimum_version_id = 1,                          \
1073    .fields = (VMStateField[]) {                      \
1074        VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1075        VMSTATE_UINT32(flags, VirtIOIOMMUMapping),    \
1076        VMSTATE_END_OF_LIST()                         \
1077    },                                                \
1078}
1079
1080static const VMStateDescription vmstate_interval_mapping[2] = {
1081    VMSTATE_MAPPING,   /* value */
1082    VMSTATE_INTERVAL   /* key   */
1083};
1084
1085static int domain_preload(void *opaque)
1086{
1087    VirtIOIOMMUDomain *domain = opaque;
1088
1089    domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
1090                                       NULL, g_free, g_free);
1091    return 0;
1092}
1093
1094static const VMStateDescription vmstate_endpoint = {
1095    .name = "endpoint",
1096    .version_id = 1,
1097    .minimum_version_id = 1,
1098    .fields = (VMStateField[]) {
1099        VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
1100        VMSTATE_END_OF_LIST()
1101    }
1102};
1103
1104static const VMStateDescription vmstate_domain = {
1105    .name = "domain",
1106    .version_id = 1,
1107    .minimum_version_id = 1,
1108    .pre_load = domain_preload,
1109    .fields = (VMStateField[]) {
1110        VMSTATE_UINT32(id, VirtIOIOMMUDomain),
1111        VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
1112                        vmstate_interval_mapping,
1113                        VirtIOIOMMUInterval, VirtIOIOMMUMapping),
1114        VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
1115                        vmstate_endpoint, VirtIOIOMMUEndpoint, next),
1116        VMSTATE_END_OF_LIST()
1117    }
1118};
1119
1120static gboolean reconstruct_endpoints(gpointer key, gpointer value,
1121                                      gpointer data)
1122{
1123    VirtIOIOMMU *s = (VirtIOIOMMU *)data;
1124    VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
1125    VirtIOIOMMUEndpoint *iter;
1126    IOMMUMemoryRegion *mr;
1127
1128    QLIST_FOREACH(iter, &d->endpoint_list, next) {
1129        mr = virtio_iommu_mr(s, iter->id);
1130        assert(mr);
1131
1132        iter->domain = d;
1133        iter->iommu_mr = mr;
1134        g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
1135    }
1136    return false; /* continue the domain traversal */
1137}
1138
1139static int iommu_post_load(void *opaque, int version_id)
1140{
1141    VirtIOIOMMU *s = opaque;
1142
1143    g_tree_foreach(s->domains, reconstruct_endpoints, s);
1144    return 0;
1145}
1146
1147static const VMStateDescription vmstate_virtio_iommu_device = {
1148    .name = "virtio-iommu-device",
1149    .minimum_version_id = 1,
1150    .version_id = 1,
1151    .post_load = iommu_post_load,
1152    .fields = (VMStateField[]) {
1153        VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1,
1154                                   &vmstate_domain, VirtIOIOMMUDomain),
1155        VMSTATE_END_OF_LIST()
1156    },
1157};
1158
1159static const VMStateDescription vmstate_virtio_iommu = {
1160    .name = "virtio-iommu",
1161    .minimum_version_id = 1,
1162    .priority = MIG_PRI_IOMMU,
1163    .version_id = 1,
1164    .fields = (VMStateField[]) {
1165        VMSTATE_VIRTIO_DEVICE,
1166        VMSTATE_END_OF_LIST()
1167    },
1168};
1169
1170static Property virtio_iommu_properties[] = {
1171    DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *),
1172    DEFINE_PROP_END_OF_LIST(),
1173};
1174
1175static void virtio_iommu_class_init(ObjectClass *klass, void *data)
1176{
1177    DeviceClass *dc = DEVICE_CLASS(klass);
1178    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1179
1180    device_class_set_props(dc, virtio_iommu_properties);
1181    dc->vmsd = &vmstate_virtio_iommu;
1182
1183    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1184    vdc->realize = virtio_iommu_device_realize;
1185    vdc->unrealize = virtio_iommu_device_unrealize;
1186    vdc->reset = virtio_iommu_device_reset;
1187    vdc->get_config = virtio_iommu_get_config;
1188    vdc->set_config = virtio_iommu_set_config;
1189    vdc->get_features = virtio_iommu_get_features;
1190    vdc->set_status = virtio_iommu_set_status;
1191    vdc->vmsd = &vmstate_virtio_iommu_device;
1192}
1193
1194static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
1195                                                  void *data)
1196{
1197    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1198
1199    imrc->translate = virtio_iommu_translate;
1200    imrc->replay = virtio_iommu_replay;
1201    imrc->notify_flag_changed = virtio_iommu_notify_flag_changed;
1202    imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask;
1203}
1204
1205static const TypeInfo virtio_iommu_info = {
1206    .name = TYPE_VIRTIO_IOMMU,
1207    .parent = TYPE_VIRTIO_DEVICE,
1208    .instance_size = sizeof(VirtIOIOMMU),
1209    .instance_init = virtio_iommu_instance_init,
1210    .class_init = virtio_iommu_class_init,
1211};
1212
1213static const TypeInfo virtio_iommu_memory_region_info = {
1214    .parent = TYPE_IOMMU_MEMORY_REGION,
1215    .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
1216    .class_init = virtio_iommu_memory_region_class_init,
1217};
1218
1219static void virtio_register_types(void)
1220{
1221    type_register_static(&virtio_iommu_info);
1222    type_register_static(&virtio_iommu_memory_region_info);
1223}
1224
1225type_init(virtio_register_types)
1226