qemu/hw/misc/ivshmem.c
<<
>>
Prefs
   1/*
   2 * Inter-VM Shared Memory PCI device.
   3 *
   4 * Author:
   5 *      Cam Macdonell <cam@cs.ualberta.ca>
   6 *
   7 * Based On: cirrus_vga.c
   8 *          Copyright (c) 2004 Fabrice Bellard
   9 *          Copyright (c) 2004 Makoto Suzuki (suzu)
  10 *
  11 *      and rtl8139.c
  12 *          Copyright (c) 2006 Igor Kovalenko
  13 *
  14 * This code is licensed under the GNU GPL v2.
  15 *
  16 * Contributions after 2012-01-13 are licensed under the terms of the
  17 * GNU GPL, version 2 or (at your option) any later version.
  18 */
  19#include "hw/hw.h"
  20#include "hw/i386/pc.h"
  21#include "hw/pci/pci.h"
  22#include "hw/pci/msix.h"
  23#include "sysemu/kvm.h"
  24#include "migration/migration.h"
  25#include "qapi/qmp/qerror.h"
  26#include "qemu/event_notifier.h"
  27#include "qemu/fifo8.h"
  28#include "sysemu/char.h"
  29
  30#include <sys/mman.h>
  31#include <sys/types.h>
  32#include <limits.h>
  33
  34#define PCI_VENDOR_ID_IVSHMEM   PCI_VENDOR_ID_REDHAT_QUMRANET
  35#define PCI_DEVICE_ID_IVSHMEM   0x1110
  36
  37#define IVSHMEM_IOEVENTFD   0
  38#define IVSHMEM_MSI     1
  39
  40#define IVSHMEM_PEER    0
  41#define IVSHMEM_MASTER  1
  42
  43#define IVSHMEM_REG_BAR_SIZE 0x100
  44
  45//#define DEBUG_IVSHMEM
  46#ifdef DEBUG_IVSHMEM
  47#define IVSHMEM_DPRINTF(fmt, ...)        \
  48    do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0)
  49#else
  50#define IVSHMEM_DPRINTF(fmt, ...)
  51#endif
  52
  53#define TYPE_IVSHMEM "ivshmem"
  54#define IVSHMEM(obj) \
  55    OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM)
  56
  57typedef struct Peer {
  58    int nb_eventfds;
  59    EventNotifier *eventfds;
  60} Peer;
  61
  62typedef struct EventfdEntry {
  63    PCIDevice *pdev;
  64    int vector;
  65} EventfdEntry;
  66
  67typedef struct IVShmemState {
  68    /*< private >*/
  69    PCIDevice parent_obj;
  70    /*< public >*/
  71
  72    uint32_t intrmask;
  73    uint32_t intrstatus;
  74    uint32_t doorbell;
  75
  76    CharDriverState **eventfd_chr;
  77    CharDriverState *server_chr;
  78    Fifo8 incoming_fifo;
  79    MemoryRegion ivshmem_mmio;
  80
  81    /* We might need to register the BAR before we actually have the memory.
  82     * So prepare a container MemoryRegion for the BAR immediately and
  83     * add a subregion when we have the memory.
  84     */
  85    MemoryRegion bar;
  86    MemoryRegion ivshmem;
  87    uint64_t ivshmem_size; /* size of shared memory region */
  88    uint32_t ivshmem_attr;
  89    uint32_t ivshmem_64bit;
  90    int shm_fd; /* shared memory file descriptor */
  91
  92    Peer *peers;
  93    int nb_peers; /* how many guests we have space for */
  94    int max_peer; /* maximum numbered peer */
  95
  96    int vm_id;
  97    uint32_t vectors;
  98    uint32_t features;
  99    EventfdEntry *eventfd_table;
 100
 101    Error *migration_blocker;
 102
 103    char * shmobj;
 104    char * sizearg;
 105    char * role;
 106    int role_val;   /* scalar to avoid multiple string comparisons */
 107} IVShmemState;
 108
 109/* registers for the Inter-VM shared memory device */
 110enum ivshmem_registers {
 111    INTRMASK = 0,
 112    INTRSTATUS = 4,
 113    IVPOSITION = 8,
 114    DOORBELL = 12,
 115};
 116
 117static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
 118                                                    unsigned int feature) {
 119    return (ivs->features & (1 << feature));
 120}
 121
 122static inline bool is_power_of_two(uint64_t x) {
 123    return (x & (x - 1)) == 0;
 124}
 125
 126/* accessing registers - based on rtl8139 */
 127static void ivshmem_update_irq(IVShmemState *s, int val)
 128{
 129    PCIDevice *d = PCI_DEVICE(s);
 130    int isr;
 131    isr = (s->intrstatus & s->intrmask) & 0xffffffff;
 132
 133    /* don't print ISR resets */
 134    if (isr) {
 135        IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
 136                        isr ? 1 : 0, s->intrstatus, s->intrmask);
 137    }
 138
 139    pci_set_irq(d, (isr != 0));
 140}
 141
 142static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
 143{
 144    IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
 145
 146    s->intrmask = val;
 147
 148    ivshmem_update_irq(s, val);
 149}
 150
 151static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
 152{
 153    uint32_t ret = s->intrmask;
 154
 155    IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
 156
 157    return ret;
 158}
 159
 160static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
 161{
 162    IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
 163
 164    s->intrstatus = val;
 165
 166    ivshmem_update_irq(s, val);
 167}
 168
 169static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
 170{
 171    uint32_t ret = s->intrstatus;
 172
 173    /* reading ISR clears all interrupts */
 174    s->intrstatus = 0;
 175
 176    ivshmem_update_irq(s, 0);
 177
 178    return ret;
 179}
 180
 181static void ivshmem_io_write(void *opaque, hwaddr addr,
 182                             uint64_t val, unsigned size)
 183{
 184    IVShmemState *s = opaque;
 185
 186    uint16_t dest = val >> 16;
 187    uint16_t vector = val & 0xff;
 188
 189    addr &= 0xfc;
 190
 191    IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
 192    switch (addr)
 193    {
 194        case INTRMASK:
 195            ivshmem_IntrMask_write(s, val);
 196            break;
 197
 198        case INTRSTATUS:
 199            ivshmem_IntrStatus_write(s, val);
 200            break;
 201
 202        case DOORBELL:
 203            /* check that dest VM ID is reasonable */
 204            if (dest > s->max_peer) {
 205                IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
 206                break;
 207            }
 208
 209            /* check doorbell range */
 210            if (vector < s->peers[dest].nb_eventfds) {
 211                IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
 212                event_notifier_set(&s->peers[dest].eventfds[vector]);
 213            }
 214            break;
 215        default:
 216            IVSHMEM_DPRINTF("Invalid VM Doorbell VM %d\n", dest);
 217    }
 218}
 219
 220static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
 221                                unsigned size)
 222{
 223
 224    IVShmemState *s = opaque;
 225    uint32_t ret;
 226
 227    switch (addr)
 228    {
 229        case INTRMASK:
 230            ret = ivshmem_IntrMask_read(s);
 231            break;
 232
 233        case INTRSTATUS:
 234            ret = ivshmem_IntrStatus_read(s);
 235            break;
 236
 237        case IVPOSITION:
 238            /* return my VM ID if the memory is mapped */
 239            if (s->shm_fd > 0) {
 240                ret = s->vm_id;
 241            } else {
 242                ret = -1;
 243            }
 244            break;
 245
 246        default:
 247            IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
 248            ret = 0;
 249    }
 250
 251    return ret;
 252}
 253
 254static const MemoryRegionOps ivshmem_mmio_ops = {
 255    .read = ivshmem_io_read,
 256    .write = ivshmem_io_write,
 257    .endianness = DEVICE_NATIVE_ENDIAN,
 258    .impl = {
 259        .min_access_size = 4,
 260        .max_access_size = 4,
 261    },
 262};
 263
 264static void ivshmem_receive(void *opaque, const uint8_t *buf, int size)
 265{
 266    IVShmemState *s = opaque;
 267
 268    ivshmem_IntrStatus_write(s, *buf);
 269
 270    IVSHMEM_DPRINTF("ivshmem_receive 0x%02x\n", *buf);
 271}
 272
 273static int ivshmem_can_receive(void * opaque)
 274{
 275    return 8;
 276}
 277
 278static void ivshmem_event(void *opaque, int event)
 279{
 280    IVSHMEM_DPRINTF("ivshmem_event %d\n", event);
 281}
 282
 283static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
 284
 285    EventfdEntry *entry = opaque;
 286    PCIDevice *pdev = entry->pdev;
 287
 288    IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, entry->vector);
 289    msix_notify(pdev, entry->vector);
 290}
 291
 292static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
 293                                                  int vector)
 294{
 295    /* create a event character device based on the passed eventfd */
 296    IVShmemState *s = opaque;
 297    CharDriverState * chr;
 298    int eventfd = event_notifier_get_fd(n);
 299
 300    chr = qemu_chr_open_eventfd(eventfd);
 301
 302    if (chr == NULL) {
 303        error_report("creating eventfd for eventfd %d failed", eventfd);
 304        exit(1);
 305    }
 306    qemu_chr_fe_claim_no_fail(chr);
 307
 308    /* if MSI is supported we need multiple interrupts */
 309    if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
 310        s->eventfd_table[vector].pdev = PCI_DEVICE(s);
 311        s->eventfd_table[vector].vector = vector;
 312
 313        qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd,
 314                      ivshmem_event, &s->eventfd_table[vector]);
 315    } else {
 316        qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive,
 317                      ivshmem_event, s);
 318    }
 319
 320    return chr;
 321
 322}
 323
 324static int check_shm_size(IVShmemState *s, int fd) {
 325    /* check that the guest isn't going to try and map more memory than the
 326     * the object has allocated return -1 to indicate error */
 327
 328    struct stat buf;
 329
 330    if (fstat(fd, &buf) < 0) {
 331        error_report("exiting: fstat on fd %d failed: %s",
 332                     fd, strerror(errno));
 333        return -1;
 334    }
 335
 336    if (s->ivshmem_size > buf.st_size) {
 337        error_report("Requested memory size greater"
 338                     " than shared object size (%" PRIu64 " > %" PRIu64")",
 339                     s->ivshmem_size, (uint64_t)buf.st_size);
 340        return -1;
 341    } else {
 342        return 0;
 343    }
 344}
 345
 346/* create the shared memory BAR when we are not using the server, so we can
 347 * create the BAR and map the memory immediately */
 348static void create_shared_memory_BAR(IVShmemState *s, int fd) {
 349
 350    void * ptr;
 351
 352    s->shm_fd = fd;
 353
 354    ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
 355
 356    memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
 357                               s->ivshmem_size, ptr);
 358    vmstate_register_ram(&s->ivshmem, DEVICE(s));
 359    memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
 360
 361    /* region for shared memory */
 362    pci_register_bar(PCI_DEVICE(s), 2, s->ivshmem_attr, &s->bar);
 363}
 364
 365static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
 366{
 367    memory_region_add_eventfd(&s->ivshmem_mmio,
 368                              DOORBELL,
 369                              4,
 370                              true,
 371                              (posn << 16) | i,
 372                              &s->peers[posn].eventfds[i]);
 373}
 374
 375static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
 376{
 377    memory_region_del_eventfd(&s->ivshmem_mmio,
 378                              DOORBELL,
 379                              4,
 380                              true,
 381                              (posn << 16) | i,
 382                              &s->peers[posn].eventfds[i]);
 383}
 384
 385static void close_guest_eventfds(IVShmemState *s, int posn)
 386{
 387    int i, guest_curr_max;
 388
 389    if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
 390        return;
 391    }
 392    if (posn < 0 || posn >= s->nb_peers) {
 393        return;
 394    }
 395
 396    guest_curr_max = s->peers[posn].nb_eventfds;
 397
 398    memory_region_transaction_begin();
 399    for (i = 0; i < guest_curr_max; i++) {
 400        ivshmem_del_eventfd(s, posn, i);
 401    }
 402    memory_region_transaction_commit();
 403    for (i = 0; i < guest_curr_max; i++) {
 404        event_notifier_cleanup(&s->peers[posn].eventfds[i]);
 405    }
 406
 407    g_free(s->peers[posn].eventfds);
 408    s->peers[posn].nb_eventfds = 0;
 409}
 410
 411/* this function increase the dynamic storage need to store data about other
 412 * guests */
 413static int increase_dynamic_storage(IVShmemState *s, int new_min_size)
 414{
 415
 416    int j, old_nb_alloc;
 417
 418    /* check for integer overflow */
 419    if (new_min_size >= INT_MAX / sizeof(Peer) - 1 || new_min_size <= 0) {
 420        return -1;
 421    }
 422
 423    old_nb_alloc = s->nb_peers;
 424
 425    if (new_min_size >= s->nb_peers) {
 426        /* +1 because #new_min_size is used as last array index */
 427        s->nb_peers = new_min_size + 1;
 428    } else {
 429        return 0;
 430    }
 431
 432    IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
 433    s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
 434
 435    /* zero out new pointers */
 436    for (j = old_nb_alloc; j < s->nb_peers; j++) {
 437        s->peers[j].eventfds = NULL;
 438        s->peers[j].nb_eventfds = 0;
 439    }
 440
 441    return 0;
 442}
 443
 444static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
 445{
 446    IVShmemState *s = opaque;
 447    int incoming_fd, tmp_fd;
 448    int guest_max_eventfd;
 449    long incoming_posn;
 450
 451    if (fifo8_is_empty(&s->incoming_fifo) && size == sizeof(incoming_posn)) {
 452        memcpy(&incoming_posn, buf, size);
 453    } else {
 454        const uint8_t *p;
 455        uint32_t num;
 456
 457        IVSHMEM_DPRINTF("short read of %d bytes\n", size);
 458        num = MAX(size, sizeof(long) - fifo8_num_used(&s->incoming_fifo));
 459        fifo8_push_all(&s->incoming_fifo, buf, num);
 460        if (fifo8_num_used(&s->incoming_fifo) < sizeof(incoming_posn)) {
 461            return;
 462        }
 463        size -= num;
 464        buf += num;
 465        p = fifo8_pop_buf(&s->incoming_fifo, sizeof(incoming_posn), &num);
 466        g_assert(num == sizeof(incoming_posn));
 467        memcpy(&incoming_posn, p, sizeof(incoming_posn));
 468        if (size > 0) {
 469            fifo8_push_all(&s->incoming_fifo, buf, size);
 470        }
 471    }
 472
 473    if (incoming_posn < -1) {
 474        IVSHMEM_DPRINTF("invalid incoming_posn %ld\n", incoming_posn);
 475        return;
 476    }
 477
 478    /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
 479    tmp_fd = qemu_chr_fe_get_msgfd(s->server_chr);
 480    IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, tmp_fd);
 481
 482    /* make sure we have enough space for this guest */
 483    if (incoming_posn >= s->nb_peers) {
 484        if (increase_dynamic_storage(s, incoming_posn) < 0) {
 485            error_report("increase_dynamic_storage() failed");
 486            if (tmp_fd != -1) {
 487                close(tmp_fd);
 488            }
 489            return;
 490        }
 491    }
 492
 493    if (tmp_fd == -1) {
 494        /* if posn is positive and unseen before then this is our posn*/
 495        if ((incoming_posn >= 0) &&
 496                            (s->peers[incoming_posn].eventfds == NULL)) {
 497            /* receive our posn */
 498            s->vm_id = incoming_posn;
 499            return;
 500        } else {
 501            /* otherwise an fd == -1 means an existing guest has gone away */
 502            IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn);
 503            close_guest_eventfds(s, incoming_posn);
 504            return;
 505        }
 506    }
 507
 508    /* because of the implementation of get_msgfd, we need a dup */
 509    incoming_fd = dup(tmp_fd);
 510
 511    if (incoming_fd == -1) {
 512        error_report("could not allocate file descriptor %s", strerror(errno));
 513        close(tmp_fd);
 514        return;
 515    }
 516
 517    /* if the position is -1, then it's shared memory region fd */
 518    if (incoming_posn == -1) {
 519
 520        void * map_ptr;
 521
 522        s->max_peer = 0;
 523
 524        if (check_shm_size(s, incoming_fd) == -1) {
 525            exit(1);
 526        }
 527
 528        /* mmap the region and map into the BAR2 */
 529        map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
 530                                                            incoming_fd, 0);
 531        memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
 532                                   "ivshmem.bar2", s->ivshmem_size, map_ptr);
 533        vmstate_register_ram(&s->ivshmem, DEVICE(s));
 534
 535        IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
 536                        map_ptr, s->ivshmem_size);
 537
 538        memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
 539
 540        /* only store the fd if it is successfully mapped */
 541        s->shm_fd = incoming_fd;
 542
 543        return;
 544    }
 545
 546    /* each guest has an array of eventfds, and we keep track of how many
 547     * guests for each VM */
 548    guest_max_eventfd = s->peers[incoming_posn].nb_eventfds;
 549
 550    if (guest_max_eventfd == 0) {
 551        /* one eventfd per MSI vector */
 552        s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
 553    }
 554
 555    /* this is an eventfd for a particular guest VM */
 556    IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
 557                    guest_max_eventfd, incoming_fd);
 558    event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
 559                           incoming_fd);
 560
 561    /* increment count for particular guest */
 562    s->peers[incoming_posn].nb_eventfds++;
 563
 564    /* keep track of the maximum VM ID */
 565    if (incoming_posn > s->max_peer) {
 566        s->max_peer = incoming_posn;
 567    }
 568
 569    if (incoming_posn == s->vm_id) {
 570        s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
 571                   &s->peers[s->vm_id].eventfds[guest_max_eventfd],
 572                   guest_max_eventfd);
 573    }
 574
 575    if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
 576        ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
 577    }
 578}
 579
 580/* Select the MSI-X vectors used by device.
 581 * ivshmem maps events to vectors statically, so
 582 * we just enable all vectors on init and after reset. */
 583static void ivshmem_use_msix(IVShmemState * s)
 584{
 585    PCIDevice *d = PCI_DEVICE(s);
 586    int i;
 587
 588    if (!msix_present(d)) {
 589        return;
 590    }
 591
 592    for (i = 0; i < s->vectors; i++) {
 593        msix_vector_use(d, i);
 594    }
 595}
 596
 597static void ivshmem_reset(DeviceState *d)
 598{
 599    IVShmemState *s = IVSHMEM(d);
 600
 601    s->intrstatus = 0;
 602    ivshmem_use_msix(s);
 603}
 604
 605static uint64_t ivshmem_get_size(IVShmemState * s) {
 606
 607    uint64_t value;
 608    char *ptr;
 609
 610    value = strtoull(s->sizearg, &ptr, 10);
 611    switch (*ptr) {
 612        case 0: case 'M': case 'm':
 613            value <<= 20;
 614            break;
 615        case 'G': case 'g':
 616            value <<= 30;
 617            break;
 618        default:
 619            error_report("invalid ram size: %s", s->sizearg);
 620            exit(1);
 621    }
 622
 623    /* BARs must be a power of 2 */
 624    if (!is_power_of_two(value)) {
 625        error_report("size must be power of 2");
 626        exit(1);
 627    }
 628
 629    return value;
 630}
 631
 632static void ivshmem_setup_msi(IVShmemState * s)
 633{
 634    if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
 635        IVSHMEM_DPRINTF("msix initialization failed\n");
 636        exit(1);
 637    }
 638
 639    IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
 640
 641    /* allocate QEMU char devices for receiving interrupts */
 642    s->eventfd_table = g_malloc0(s->vectors * sizeof(EventfdEntry));
 643
 644    ivshmem_use_msix(s);
 645}
 646
 647static void ivshmem_save(QEMUFile* f, void *opaque)
 648{
 649    IVShmemState *proxy = opaque;
 650    PCIDevice *pci_dev = PCI_DEVICE(proxy);
 651
 652    IVSHMEM_DPRINTF("ivshmem_save\n");
 653    pci_device_save(pci_dev, f);
 654
 655    if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
 656        msix_save(pci_dev, f);
 657    } else {
 658        qemu_put_be32(f, proxy->intrstatus);
 659        qemu_put_be32(f, proxy->intrmask);
 660    }
 661
 662}
 663
 664static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
 665{
 666    IVSHMEM_DPRINTF("ivshmem_load\n");
 667
 668    IVShmemState *proxy = opaque;
 669    PCIDevice *pci_dev = PCI_DEVICE(proxy);
 670    int ret;
 671
 672    if (version_id > 0) {
 673        return -EINVAL;
 674    }
 675
 676    if (proxy->role_val == IVSHMEM_PEER) {
 677        error_report("'peer' devices are not migratable");
 678        return -EINVAL;
 679    }
 680
 681    ret = pci_device_load(pci_dev, f);
 682    if (ret) {
 683        return ret;
 684    }
 685
 686    if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
 687        msix_load(pci_dev, f);
 688        ivshmem_use_msix(proxy);
 689    } else {
 690        proxy->intrstatus = qemu_get_be32(f);
 691        proxy->intrmask = qemu_get_be32(f);
 692    }
 693
 694    return 0;
 695}
 696
 697static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
 698                                 uint32_t val, int len)
 699{
 700    pci_default_write_config(pci_dev, address, val, len);
 701    msix_write_config(pci_dev, address, val, len);
 702}
 703
 704static int pci_ivshmem_init(PCIDevice *dev)
 705{
 706    IVShmemState *s = IVSHMEM(dev);
 707    uint8_t *pci_conf;
 708
 709    if (s->sizearg == NULL)
 710        s->ivshmem_size = 4 << 20; /* 4 MB default */
 711    else {
 712        s->ivshmem_size = ivshmem_get_size(s);
 713    }
 714
 715    fifo8_create(&s->incoming_fifo, sizeof(long));
 716
 717    register_savevm(DEVICE(dev), "ivshmem", 0, 0, ivshmem_save, ivshmem_load,
 718                                                                        dev);
 719
 720    /* IRQFD requires MSI */
 721    if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
 722        !ivshmem_has_feature(s, IVSHMEM_MSI)) {
 723        error_report("ioeventfd/irqfd requires MSI");
 724        exit(1);
 725    }
 726
 727    /* check that role is reasonable */
 728    if (s->role) {
 729        if (strncmp(s->role, "peer", 5) == 0) {
 730            s->role_val = IVSHMEM_PEER;
 731        } else if (strncmp(s->role, "master", 7) == 0) {
 732            s->role_val = IVSHMEM_MASTER;
 733        } else {
 734            error_report("'role' must be 'peer' or 'master'");
 735            exit(1);
 736        }
 737    } else {
 738        s->role_val = IVSHMEM_MASTER; /* default */
 739    }
 740
 741    if (s->role_val == IVSHMEM_PEER) {
 742        error_setg(&s->migration_blocker,
 743                   "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
 744        migrate_add_blocker(s->migration_blocker);
 745    }
 746
 747    pci_conf = dev->config;
 748    pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
 749
 750    pci_config_set_interrupt_pin(pci_conf, 1);
 751
 752    s->shm_fd = 0;
 753
 754    memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s,
 755                          "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
 756
 757    /* region for registers*/
 758    pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
 759                     &s->ivshmem_mmio);
 760
 761    memory_region_init(&s->bar, OBJECT(s), "ivshmem-bar2-container", s->ivshmem_size);
 762    s->ivshmem_attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
 763        PCI_BASE_ADDRESS_MEM_PREFETCH;
 764    if (s->ivshmem_64bit) {
 765        s->ivshmem_attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 766    }
 767
 768    if ((s->server_chr != NULL) &&
 769                        (strncmp(s->server_chr->filename, "unix:", 5) == 0)) {
 770        /* if we get a UNIX socket as the parameter we will talk
 771         * to the ivshmem server to receive the memory region */
 772
 773        if (s->shmobj != NULL) {
 774            error_report("WARNING: do not specify both 'chardev' "
 775                         "and 'shm' with ivshmem");
 776        }
 777
 778        IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
 779                        s->server_chr->filename);
 780
 781        if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
 782            ivshmem_setup_msi(s);
 783        }
 784
 785        /* we allocate enough space for 16 guests and grow as needed */
 786        s->nb_peers = 16;
 787        s->vm_id = -1;
 788
 789        /* allocate/initialize space for interrupt handling */
 790        s->peers = g_malloc0(s->nb_peers * sizeof(Peer));
 791
 792        pci_register_bar(dev, 2, s->ivshmem_attr, &s->bar);
 793
 794        s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *));
 795
 796        qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
 797                     ivshmem_event, s);
 798    } else {
 799        /* just map the file immediately, we're not using a server */
 800        int fd;
 801
 802        if (s->shmobj == NULL) {
 803            error_report("Must specify 'chardev' or 'shm' to ivshmem");
 804            exit(1);
 805        }
 806
 807        IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
 808
 809        /* try opening with O_EXCL and if it succeeds zero the memory
 810         * by truncating to 0 */
 811        if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
 812                        S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
 813           /* truncate file to length PCI device's memory */
 814            if (ftruncate(fd, s->ivshmem_size) != 0) {
 815                error_report("could not truncate shared file");
 816            }
 817
 818        } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
 819                        S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
 820            error_report("could not open shared file");
 821            exit(1);
 822
 823        }
 824
 825        if (check_shm_size(s, fd) == -1) {
 826            exit(1);
 827        }
 828
 829        create_shared_memory_BAR(s, fd);
 830
 831    }
 832
 833    dev->config_write = ivshmem_write_config;
 834
 835    return 0;
 836}
 837
 838static void pci_ivshmem_uninit(PCIDevice *dev)
 839{
 840    IVShmemState *s = IVSHMEM(dev);
 841
 842    if (s->migration_blocker) {
 843        migrate_del_blocker(s->migration_blocker);
 844        error_free(s->migration_blocker);
 845    }
 846
 847    memory_region_del_subregion(&s->bar, &s->ivshmem);
 848    vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
 849    unregister_savevm(DEVICE(dev), "ivshmem", s);
 850    fifo8_destroy(&s->incoming_fifo);
 851}
 852
 853static Property ivshmem_properties[] = {
 854    DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
 855    DEFINE_PROP_STRING("size", IVShmemState, sizearg),
 856    DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
 857    DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
 858    DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
 859    DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
 860    DEFINE_PROP_STRING("role", IVShmemState, role),
 861    DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1),
 862    DEFINE_PROP_END_OF_LIST(),
 863};
 864
 865static void ivshmem_class_init(ObjectClass *klass, void *data)
 866{
 867    DeviceClass *dc = DEVICE_CLASS(klass);
 868    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
 869
 870    k->init = pci_ivshmem_init;
 871    k->exit = pci_ivshmem_uninit;
 872    k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
 873    k->device_id = PCI_DEVICE_ID_IVSHMEM;
 874    k->class_id = PCI_CLASS_MEMORY_RAM;
 875    dc->reset = ivshmem_reset;
 876    dc->props = ivshmem_properties;
 877    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 878}
 879
 880static const TypeInfo ivshmem_info = {
 881    .name          = TYPE_IVSHMEM,
 882    .parent        = TYPE_PCI_DEVICE,
 883    .instance_size = sizeof(IVShmemState),
 884    .class_init    = ivshmem_class_init,
 885};
 886
 887static void ivshmem_register_types(void)
 888{
 889    type_register_static(&ivshmem_info);
 890}
 891
 892type_init(ivshmem_register_types)
 893