1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu-common.h"
12#include "block/aio.h"
13#include "qemu/queue.h"
14#include "block/block.h"
15#include "block/raw-aio.h"
16#include "qemu/event_notifier.h"
17#include "qemu/coroutine.h"
18
19#include <libaio.h>
20
21
22
23
24
25
26
27
28
29#define MAX_EVENTS 128
30
31struct qemu_laiocb {
32 BlockAIOCB common;
33 Coroutine *co;
34 LinuxAioState *ctx;
35 struct iocb iocb;
36 ssize_t ret;
37 size_t nbytes;
38 QEMUIOVector *qiov;
39 bool is_read;
40 QSIMPLEQ_ENTRY(qemu_laiocb) next;
41};
42
43typedef struct {
44 int plugged;
45 unsigned int in_queue;
46 unsigned int in_flight;
47 bool blocked;
48 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
49} LaioQueue;
50
51struct LinuxAioState {
52 AioContext *aio_context;
53
54 io_context_t ctx;
55 EventNotifier e;
56
57
58 LaioQueue io_q;
59
60
61 QEMUBH *completion_bh;
62 int event_idx;
63 int event_max;
64};
65
66static void ioq_submit(LinuxAioState *s);
67
68static inline ssize_t io_event_ret(struct io_event *ev)
69{
70 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
71}
72
73
74
75
76static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
77{
78 int ret;
79
80 ret = laiocb->ret;
81 if (ret != -ECANCELED) {
82 if (ret == laiocb->nbytes) {
83 ret = 0;
84 } else if (ret >= 0) {
85
86 if (laiocb->is_read) {
87 qemu_iovec_memset(laiocb->qiov, ret, 0,
88 laiocb->qiov->size - ret);
89 } else {
90 ret = -ENOSPC;
91 }
92 }
93 }
94
95 laiocb->ret = ret;
96 if (laiocb->co) {
97
98
99
100
101
102 if (!qemu_coroutine_entered(laiocb->co)) {
103 aio_co_wake(laiocb->co);
104 }
105 } else {
106 laiocb->common.cb(laiocb->common.opaque, ret);
107 qemu_aio_unref(laiocb);
108 }
109}
110
111
112
113
114
115
116
117struct aio_ring {
118 unsigned id;
119 unsigned nr;
120 unsigned head;
121 unsigned tail;
122
123 unsigned magic;
124 unsigned compat_features;
125 unsigned incompat_features;
126 unsigned header_length;
127
128 struct io_event io_events[0];
129};
130
131
132
133
134
135
136
137
138
139
140
141static inline unsigned int io_getevents_peek(io_context_t ctx,
142 struct io_event **events)
143{
144 struct aio_ring *ring = (struct aio_ring *)ctx;
145 unsigned int head = ring->head, tail = ring->tail;
146 unsigned int nr;
147
148 nr = tail >= head ? tail - head : ring->nr - head;
149 *events = ring->io_events + head;
150
151
152 smp_rmb();
153
154 return nr;
155}
156
157
158
159
160
161
162
163
164static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
165{
166 struct aio_ring *ring = (struct aio_ring *)ctx;
167
168 if (nr) {
169 ring->head = (ring->head + nr) % ring->nr;
170 }
171}
172
173
174
175
176
177
178
179
180
181static inline unsigned int
182io_getevents_advance_and_peek(io_context_t ctx,
183 struct io_event **events,
184 unsigned int nr)
185{
186 io_getevents_commit(ctx, nr);
187 return io_getevents_peek(ctx, events);
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202static void qemu_laio_process_completions(LinuxAioState *s)
203{
204 struct io_event *events;
205
206
207 qemu_bh_schedule(s->completion_bh);
208
209 while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
210 s->event_idx))) {
211 for (s->event_idx = 0; s->event_idx < s->event_max; ) {
212 struct iocb *iocb = events[s->event_idx].obj;
213 struct qemu_laiocb *laiocb =
214 container_of(iocb, struct qemu_laiocb, iocb);
215
216 laiocb->ret = io_event_ret(&events[s->event_idx]);
217
218
219 s->io_q.in_flight--;
220 s->event_idx++;
221 qemu_laio_process_completion(laiocb);
222 }
223 }
224
225 qemu_bh_cancel(s->completion_bh);
226
227
228
229
230 s->event_max = 0;
231 s->event_idx = 0;
232}
233
234static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
235{
236 qemu_laio_process_completions(s);
237
238 aio_context_acquire(s->aio_context);
239 if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
240 ioq_submit(s);
241 }
242 aio_context_release(s->aio_context);
243}
244
245static void qemu_laio_completion_bh(void *opaque)
246{
247 LinuxAioState *s = opaque;
248
249 qemu_laio_process_completions_and_submit(s);
250}
251
252static void qemu_laio_completion_cb(EventNotifier *e)
253{
254 LinuxAioState *s = container_of(e, LinuxAioState, e);
255
256 if (event_notifier_test_and_clear(&s->e)) {
257 qemu_laio_process_completions_and_submit(s);
258 }
259}
260
261static bool qemu_laio_poll_cb(void *opaque)
262{
263 EventNotifier *e = opaque;
264 LinuxAioState *s = container_of(e, LinuxAioState, e);
265 struct io_event *events;
266
267 if (!io_getevents_peek(s->ctx, &events)) {
268 return false;
269 }
270
271 qemu_laio_process_completions_and_submit(s);
272 return true;
273}
274
275static void laio_cancel(BlockAIOCB *blockacb)
276{
277 struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
278 struct io_event event;
279 int ret;
280
281 if (laiocb->ret != -EINPROGRESS) {
282 return;
283 }
284 ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
285 laiocb->ret = -ECANCELED;
286 if (ret != 0) {
287
288 return;
289 }
290
291 laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
292}
293
294static const AIOCBInfo laio_aiocb_info = {
295 .aiocb_size = sizeof(struct qemu_laiocb),
296 .cancel_async = laio_cancel,
297};
298
299static void ioq_init(LaioQueue *io_q)
300{
301 QSIMPLEQ_INIT(&io_q->pending);
302 io_q->plugged = 0;
303 io_q->in_queue = 0;
304 io_q->in_flight = 0;
305 io_q->blocked = false;
306}
307
308static void ioq_submit(LinuxAioState *s)
309{
310 int ret, len;
311 struct qemu_laiocb *aiocb;
312 struct iocb *iocbs[MAX_EVENTS];
313 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
314
315 do {
316 if (s->io_q.in_flight >= MAX_EVENTS) {
317 break;
318 }
319 len = 0;
320 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
321 iocbs[len++] = &aiocb->iocb;
322 if (s->io_q.in_flight + len >= MAX_EVENTS) {
323 break;
324 }
325 }
326
327 ret = io_submit(s->ctx, len, iocbs);
328 if (ret == -EAGAIN) {
329 break;
330 }
331 if (ret < 0) {
332
333 aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
334 QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
335 s->io_q.in_queue--;
336 aiocb->ret = ret;
337 qemu_laio_process_completion(aiocb);
338 continue;
339 }
340
341 s->io_q.in_flight += ret;
342 s->io_q.in_queue -= ret;
343 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
344 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
345 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
346 s->io_q.blocked = (s->io_q.in_queue > 0);
347
348 if (s->io_q.in_flight) {
349
350
351 qemu_laio_process_completions(s);
352
353
354
355
356
357
358
359 }
360}
361
362void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
363{
364 s->io_q.plugged++;
365}
366
367void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
368{
369 assert(s->io_q.plugged);
370 if (--s->io_q.plugged == 0 &&
371 !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
372 ioq_submit(s);
373 }
374}
375
376static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
377 int type)
378{
379 LinuxAioState *s = laiocb->ctx;
380 struct iocb *iocbs = &laiocb->iocb;
381 QEMUIOVector *qiov = laiocb->qiov;
382
383 switch (type) {
384 case QEMU_AIO_WRITE:
385 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
386 break;
387 case QEMU_AIO_READ:
388 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
389 break;
390
391 default:
392 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
393 __func__, type);
394 return -EIO;
395 }
396 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
397
398 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
399 s->io_q.in_queue++;
400 if (!s->io_q.blocked &&
401 (!s->io_q.plugged ||
402 s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
403 ioq_submit(s);
404 }
405
406 return 0;
407}
408
409int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
410 uint64_t offset, QEMUIOVector *qiov, int type)
411{
412 int ret;
413 struct qemu_laiocb laiocb = {
414 .co = qemu_coroutine_self(),
415 .nbytes = qiov->size,
416 .ctx = s,
417 .ret = -EINPROGRESS,
418 .is_read = (type == QEMU_AIO_READ),
419 .qiov = qiov,
420 };
421
422 ret = laio_do_submit(fd, &laiocb, offset, type);
423 if (ret < 0) {
424 return ret;
425 }
426
427 if (laiocb.ret == -EINPROGRESS) {
428 qemu_coroutine_yield();
429 }
430 return laiocb.ret;
431}
432
433BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
434 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
435 BlockCompletionFunc *cb, void *opaque, int type)
436{
437 struct qemu_laiocb *laiocb;
438 off_t offset = sector_num * BDRV_SECTOR_SIZE;
439 int ret;
440
441 laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
442 laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
443 laiocb->ctx = s;
444 laiocb->ret = -EINPROGRESS;
445 laiocb->is_read = (type == QEMU_AIO_READ);
446 laiocb->qiov = qiov;
447
448 ret = laio_do_submit(fd, laiocb, offset, type);
449 if (ret < 0) {
450 qemu_aio_unref(laiocb);
451 return NULL;
452 }
453
454 return &laiocb->common;
455}
456
457void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
458{
459 aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
460 qemu_bh_delete(s->completion_bh);
461 s->aio_context = NULL;
462}
463
464void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
465{
466 s->aio_context = new_context;
467 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
468 aio_set_event_notifier(new_context, &s->e, false,
469 qemu_laio_completion_cb,
470 qemu_laio_poll_cb);
471}
472
473LinuxAioState *laio_init(void)
474{
475 LinuxAioState *s;
476
477 s = g_malloc0(sizeof(*s));
478 if (event_notifier_init(&s->e, false) < 0) {
479 goto out_free_state;
480 }
481
482 if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
483 goto out_close_efd;
484 }
485
486 ioq_init(&s->io_q);
487
488 return s;
489
490out_close_efd:
491 event_notifier_cleanup(&s->e);
492out_free_state:
493 g_free(s);
494 return NULL;
495}
496
497void laio_cleanup(LinuxAioState *s)
498{
499 event_notifier_cleanup(&s->e);
500
501 if (io_destroy(s->ctx) != 0) {
502 fprintf(stderr, "%s: destroy AIO context %p failed\n",
503 __func__, &s->ctx);
504 }
505 g_free(s);
506}
507