1
2
3
4
5
6
7
8
9
10#include "iodev.h"
11
12#include <linux/kvm_host.h>
13#include <linux/kvm.h>
14
15#include "coalesced_mmio.h"
16
17static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
18{
19 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
20}
21
22static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
23 gpa_t addr, int len)
24{
25 struct kvm_coalesced_mmio_zone *zone;
26 struct kvm_coalesced_mmio_ring *ring;
27 unsigned avail;
28 int i;
29
30
31
32
33
34
35
36 ring = dev->kvm->coalesced_mmio_ring;
37 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
38 if (avail < KVM_MAX_VCPUS) {
39
40 return 0;
41 }
42
43
44
45 for (i = 0; i < dev->nb_zones; i++) {
46 zone = &dev->zone[i];
47
48
49
50
51
52 if (zone->addr <= addr &&
53 addr + len <= zone->addr + zone->size)
54 return 1;
55 }
56 return 0;
57}
58
59static int coalesced_mmio_write(struct kvm_io_device *this,
60 gpa_t addr, int len, const void *val)
61{
62 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
63 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
64 if (!coalesced_mmio_in_range(dev, addr, len))
65 return -EOPNOTSUPP;
66
67 spin_lock(&dev->lock);
68
69
70
71 ring->coalesced_mmio[ring->last].phys_addr = addr;
72 ring->coalesced_mmio[ring->last].len = len;
73 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
74 smp_wmb();
75 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
76 spin_unlock(&dev->lock);
77 return 0;
78}
79
80static void coalesced_mmio_destructor(struct kvm_io_device *this)
81{
82 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
83
84 kfree(dev);
85}
86
87static const struct kvm_io_device_ops coalesced_mmio_ops = {
88 .write = coalesced_mmio_write,
89 .destructor = coalesced_mmio_destructor,
90};
91
92int kvm_coalesced_mmio_init(struct kvm *kvm)
93{
94 struct kvm_coalesced_mmio_dev *dev;
95 int ret;
96
97 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
98 if (!dev)
99 return -ENOMEM;
100 spin_lock_init(&dev->lock);
101 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
102 dev->kvm = kvm;
103 kvm->coalesced_mmio_dev = dev;
104
105 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
106 if (ret < 0)
107 kfree(dev);
108
109 return ret;
110}
111
112int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
113 struct kvm_coalesced_mmio_zone *zone)
114{
115 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
116
117 if (dev == NULL)
118 return -EINVAL;
119
120 down_write(&kvm->slots_lock);
121 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
122 up_write(&kvm->slots_lock);
123 return -ENOBUFS;
124 }
125
126 dev->zone[dev->nb_zones] = *zone;
127 dev->nb_zones++;
128
129 up_write(&kvm->slots_lock);
130 return 0;
131}
132
133int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
134 struct kvm_coalesced_mmio_zone *zone)
135{
136 int i;
137 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
138 struct kvm_coalesced_mmio_zone *z;
139
140 if (dev == NULL)
141 return -EINVAL;
142
143 down_write(&kvm->slots_lock);
144
145 i = dev->nb_zones;
146 while(i) {
147 z = &dev->zone[i - 1];
148
149
150
151
152
153 if (zone->addr <= z->addr &&
154 z->addr + z->size <= zone->addr + zone->size) {
155 dev->nb_zones--;
156 *z = dev->zone[dev->nb_zones];
157 }
158 i--;
159 }
160
161 up_write(&kvm->slots_lock);
162
163 return 0;
164}
165