1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "qemu/osdep.h"
16#include "qapi/error.h"
17#include "trace.h"
18#include "qemu/iov.h"
19#include "qemu/main-loop.h"
20#include "qemu/thread.h"
21#include "qemu/error-report.h"
22#include "hw/virtio/virtio-access.h"
23#include "hw/virtio/virtio-blk.h"
24#include "virtio-blk.h"
25#include "block/aio.h"
26#include "hw/virtio/virtio-bus.h"
27#include "qom/object_interfaces.h"
28
29struct VirtIOBlockDataPlane {
30 bool starting;
31 bool stopping;
32
33 VirtIOBlkConf *conf;
34 VirtIODevice *vdev;
35 QEMUBH *bh;
36 unsigned long *batch_notify_vqs;
37 bool batch_notifications;
38
39
40
41
42
43
44 IOThread *iothread;
45 AioContext *ctx;
46};
47
48
49void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
50{
51 if (s->batch_notifications) {
52 set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
53 qemu_bh_schedule(s->bh);
54 } else {
55 virtio_notify_irqfd(s->vdev, vq);
56 }
57}
58
59static void notify_guest_bh(void *opaque)
60{
61 VirtIOBlockDataPlane *s = opaque;
62 unsigned nvqs = s->conf->num_queues;
63 unsigned long bitmap[BITS_TO_LONGS(nvqs)];
64 unsigned j;
65
66 memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
67 memset(s->batch_notify_vqs, 0, sizeof(bitmap));
68
69 for (j = 0; j < nvqs; j += BITS_PER_LONG) {
70 unsigned long bits = bitmap[j / BITS_PER_LONG];
71
72 while (bits != 0) {
73 unsigned i = j + ctzl(bits);
74 VirtQueue *vq = virtio_get_queue(s->vdev, i);
75
76 virtio_notify_irqfd(s->vdev, vq);
77
78 bits &= bits - 1;
79 }
80 }
81}
82
83
84bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
85 VirtIOBlockDataPlane **dataplane,
86 Error **errp)
87{
88 VirtIOBlockDataPlane *s;
89 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
90 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
91
92 *dataplane = NULL;
93
94 if (conf->iothread) {
95 if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
96 error_setg(errp,
97 "device is incompatible with iothread "
98 "(transport does not support notifiers)");
99 return false;
100 }
101 if (!virtio_device_ioeventfd_enabled(vdev)) {
102 error_setg(errp, "ioeventfd is required for iothread");
103 return false;
104 }
105
106
107
108
109 if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
110 error_prepend(errp, "cannot start virtio-blk dataplane: ");
111 return false;
112 }
113 }
114
115 if (!virtio_device_ioeventfd_enabled(vdev)) {
116 return false;
117 }
118
119 s = g_new0(VirtIOBlockDataPlane, 1);
120 s->vdev = vdev;
121 s->conf = conf;
122
123 if (conf->iothread) {
124 s->iothread = conf->iothread;
125 object_ref(OBJECT(s->iothread));
126 s->ctx = iothread_get_aio_context(s->iothread);
127 } else {
128 s->ctx = qemu_get_aio_context();
129 }
130 s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
131 s->batch_notify_vqs = bitmap_new(conf->num_queues);
132
133 *dataplane = s;
134
135 return true;
136}
137
138
139void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
140{
141 VirtIOBlock *vblk;
142
143 if (!s) {
144 return;
145 }
146
147 vblk = VIRTIO_BLK(s->vdev);
148 assert(!vblk->dataplane_started);
149 g_free(s->batch_notify_vqs);
150 qemu_bh_delete(s->bh);
151 if (s->iothread) {
152 object_unref(OBJECT(s->iothread));
153 }
154 g_free(s);
155}
156
157static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
158 VirtQueue *vq)
159{
160 VirtIOBlock *s = (VirtIOBlock *)vdev;
161
162 assert(s->dataplane);
163 assert(s->dataplane_started);
164
165 return virtio_blk_handle_vq(s, vq);
166}
167
168
169int virtio_blk_data_plane_start(VirtIODevice *vdev)
170{
171 VirtIOBlock *vblk = VIRTIO_BLK(vdev);
172 VirtIOBlockDataPlane *s = vblk->dataplane;
173 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
174 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
175 AioContext *old_context;
176 unsigned i;
177 unsigned nvqs = s->conf->num_queues;
178 Error *local_err = NULL;
179 int r;
180
181 if (vblk->dataplane_started || s->starting) {
182 return 0;
183 }
184
185 s->starting = true;
186
187 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
188 s->batch_notifications = true;
189 } else {
190 s->batch_notifications = false;
191 }
192
193
194 r = k->set_guest_notifiers(qbus->parent, nvqs, true);
195 if (r != 0) {
196 error_report("virtio-blk failed to set guest notifier (%d), "
197 "ensure -accel kvm is set.", r);
198 goto fail_guest_notifiers;
199 }
200
201
202
203
204
205 memory_region_transaction_begin();
206
207
208 for (i = 0; i < nvqs; i++) {
209 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
210 if (r != 0) {
211 int j = i;
212
213 fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
214 while (i--) {
215 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
216 }
217
218
219
220
221
222 memory_region_transaction_commit();
223
224 while (j--) {
225 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
226 }
227 goto fail_host_notifiers;
228 }
229 }
230
231 memory_region_transaction_commit();
232
233 s->starting = false;
234 vblk->dataplane_started = true;
235 trace_virtio_blk_data_plane_start(s);
236
237 old_context = blk_get_aio_context(s->conf->conf.blk);
238 aio_context_acquire(old_context);
239 r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
240 aio_context_release(old_context);
241 if (r < 0) {
242 error_report_err(local_err);
243 goto fail_aio_context;
244 }
245
246
247 virtio_blk_process_queued_requests(vblk, false);
248
249
250 for (i = 0; i < nvqs; i++) {
251 VirtQueue *vq = virtio_get_queue(s->vdev, i);
252
253 event_notifier_set(virtio_queue_get_host_notifier(vq));
254 }
255
256
257 aio_context_acquire(s->ctx);
258 for (i = 0; i < nvqs; i++) {
259 VirtQueue *vq = virtio_get_queue(s->vdev, i);
260
261 virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
262 virtio_blk_data_plane_handle_output);
263 }
264 aio_context_release(s->ctx);
265 return 0;
266
267 fail_aio_context:
268 memory_region_transaction_begin();
269
270 for (i = 0; i < nvqs; i++) {
271 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
272 }
273
274 memory_region_transaction_commit();
275
276 for (i = 0; i < nvqs; i++) {
277 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
278 }
279 fail_host_notifiers:
280 k->set_guest_notifiers(qbus->parent, nvqs, false);
281 fail_guest_notifiers:
282
283
284
285
286 virtio_blk_process_queued_requests(vblk, false);
287 vblk->dataplane_disabled = true;
288 s->starting = false;
289 vblk->dataplane_started = true;
290 return -ENOSYS;
291}
292
293
294
295
296
297static void virtio_blk_data_plane_stop_bh(void *opaque)
298{
299 VirtIOBlockDataPlane *s = opaque;
300 unsigned i;
301
302 for (i = 0; i < s->conf->num_queues; i++) {
303 VirtQueue *vq = virtio_get_queue(s->vdev, i);
304
305 virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
306 }
307}
308
309
310void virtio_blk_data_plane_stop(VirtIODevice *vdev)
311{
312 VirtIOBlock *vblk = VIRTIO_BLK(vdev);
313 VirtIOBlockDataPlane *s = vblk->dataplane;
314 BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
315 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
316 unsigned i;
317 unsigned nvqs = s->conf->num_queues;
318
319 if (!vblk->dataplane_started || s->stopping) {
320 return;
321 }
322
323
324 if (vblk->dataplane_disabled) {
325 vblk->dataplane_disabled = false;
326 vblk->dataplane_started = false;
327 return;
328 }
329 s->stopping = true;
330 trace_virtio_blk_data_plane_stop(s);
331
332 aio_context_acquire(s->ctx);
333 aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
334
335
336
337 blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
338
339 aio_context_release(s->ctx);
340
341
342
343
344
345 memory_region_transaction_begin();
346
347 for (i = 0; i < nvqs; i++) {
348 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
349 }
350
351
352
353
354
355 memory_region_transaction_commit();
356
357 for (i = 0; i < nvqs; i++) {
358 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
359 }
360
361 qemu_bh_cancel(s->bh);
362 notify_guest_bh(s);
363
364
365 k->set_guest_notifiers(qbus->parent, nvqs, false);
366
367 vblk->dataplane_started = false;
368 s->stopping = false;
369}
370