linux/virt/kvm/coalesced_mmio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KVM coalesced MMIO
   4 *
   5 * Copyright (c) 2008 Bull S.A.S.
   6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
   7 *
   8 *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
   9 *
  10 */
  11
  12#include <kvm/iodev.h>
  13
  14#include <linux/kvm_host.h>
  15#include <linux/slab.h>
  16#include <linux/kvm.h>
  17
  18#include "coalesced_mmio.h"
  19
  20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
  21{
  22        return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
  23}
  24
  25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
  26                                   gpa_t addr, int len)
  27{
  28        /* is it in a batchable area ?
  29         * (addr,len) is fully included in
  30         * (zone->addr, zone->size)
  31         */
  32        if (len < 0)
  33                return 0;
  34        if (addr + len < addr)
  35                return 0;
  36        if (addr < dev->zone.addr)
  37                return 0;
  38        if (addr + len > dev->zone.addr + dev->zone.size)
  39                return 0;
  40        return 1;
  41}
  42
  43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
  44{
  45        struct kvm_coalesced_mmio_ring *ring;
  46        unsigned avail;
  47
  48        /* Are we able to batch it ? */
  49
  50        /* last is the first free entry
  51         * check if we don't meet the first used entry
  52         * there is always one unused entry in the buffer
  53         */
  54        ring = dev->kvm->coalesced_mmio_ring;
  55        avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
  56        if (avail == 0) {
  57                /* full */
  58                return 0;
  59        }
  60
  61        return 1;
  62}
  63
  64static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
  65                                struct kvm_io_device *this, gpa_t addr,
  66                                int len, const void *val)
  67{
  68        struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  69        struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
  70        __u32 insert;
  71
  72        if (!coalesced_mmio_in_range(dev, addr, len))
  73                return -EOPNOTSUPP;
  74
  75        spin_lock(&dev->kvm->ring_lock);
  76
  77        insert = READ_ONCE(ring->last);
  78        if (!coalesced_mmio_has_room(dev, insert) ||
  79            insert >= KVM_COALESCED_MMIO_MAX) {
  80                spin_unlock(&dev->kvm->ring_lock);
  81                return -EOPNOTSUPP;
  82        }
  83
  84        /* copy data in first free entry of the ring */
  85
  86        ring->coalesced_mmio[insert].phys_addr = addr;
  87        ring->coalesced_mmio[insert].len = len;
  88        memcpy(ring->coalesced_mmio[insert].data, val, len);
  89        ring->coalesced_mmio[insert].pio = dev->zone.pio;
  90        smp_wmb();
  91        ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
  92        spin_unlock(&dev->kvm->ring_lock);
  93        return 0;
  94}
  95
  96static void coalesced_mmio_destructor(struct kvm_io_device *this)
  97{
  98        struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  99
 100        list_del(&dev->list);
 101
 102        kfree(dev);
 103}
 104
 105static const struct kvm_io_device_ops coalesced_mmio_ops = {
 106        .write      = coalesced_mmio_write,
 107        .destructor = coalesced_mmio_destructor,
 108};
 109
 110int kvm_coalesced_mmio_init(struct kvm *kvm)
 111{
 112        struct page *page;
 113
 114        page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
 115        if (!page)
 116                return -ENOMEM;
 117
 118        kvm->coalesced_mmio_ring = page_address(page);
 119
 120        /*
 121         * We're using this spinlock to sync access to the coalesced ring.
 122         * The list doesn't need its own lock since device registration and
 123         * unregistration should only happen when kvm->slots_lock is held.
 124         */
 125        spin_lock_init(&kvm->ring_lock);
 126        INIT_LIST_HEAD(&kvm->coalesced_zones);
 127
 128        return 0;
 129}
 130
 131void kvm_coalesced_mmio_free(struct kvm *kvm)
 132{
 133        if (kvm->coalesced_mmio_ring)
 134                free_page((unsigned long)kvm->coalesced_mmio_ring);
 135}
 136
 137int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
 138                                         struct kvm_coalesced_mmio_zone *zone)
 139{
 140        int ret;
 141        struct kvm_coalesced_mmio_dev *dev;
 142
 143        if (zone->pio != 1 && zone->pio != 0)
 144                return -EINVAL;
 145
 146        dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
 147                      GFP_KERNEL_ACCOUNT);
 148        if (!dev)
 149                return -ENOMEM;
 150
 151        kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
 152        dev->kvm = kvm;
 153        dev->zone = *zone;
 154
 155        mutex_lock(&kvm->slots_lock);
 156        ret = kvm_io_bus_register_dev(kvm,
 157                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
 158                                zone->addr, zone->size, &dev->dev);
 159        if (ret < 0)
 160                goto out_free_dev;
 161        list_add_tail(&dev->list, &kvm->coalesced_zones);
 162        mutex_unlock(&kvm->slots_lock);
 163
 164        return 0;
 165
 166out_free_dev:
 167        mutex_unlock(&kvm->slots_lock);
 168        kfree(dev);
 169
 170        return ret;
 171}
 172
 173int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
 174                                           struct kvm_coalesced_mmio_zone *zone)
 175{
 176        struct kvm_coalesced_mmio_dev *dev, *tmp;
 177        int r;
 178
 179        if (zone->pio != 1 && zone->pio != 0)
 180                return -EINVAL;
 181
 182        mutex_lock(&kvm->slots_lock);
 183
 184        list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
 185                if (zone->pio == dev->zone.pio &&
 186                    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
 187                        r = kvm_io_bus_unregister_dev(kvm,
 188                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
 189
 190                        /*
 191                         * On failure, unregister destroys all devices on the
 192                         * bus _except_ the target device, i.e. coalesced_zones
 193                         * has been modified.  No need to restart the walk as
 194                         * there aren't any zones left.
 195                         */
 196                        if (r)
 197                                break;
 198                        kvm_iodevice_destructor(&dev->dev);
 199                }
 200        }
 201
 202        mutex_unlock(&kvm->slots_lock);
 203
 204        /*
 205         * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
 206         * perspective, the coalesced MMIO is most definitely unregistered.
 207         */
 208        return 0;
 209}
 210