1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "block/block.h"
27#include "block/blockjob_int.h"
28#include "sysemu/block-backend.h"
29#include "qapi/error.h"
30#include "qemu/main-loop.h"
31#include "iothread.h"
32
33static QemuEvent done_event;
34
35typedef struct BDRVTestState {
36 int drain_count;
37 AioContext *bh_indirection_ctx;
38 bool sleep_in_drain_begin;
39} BDRVTestState;
40
41static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
42{
43 BDRVTestState *s = bs->opaque;
44 s->drain_count++;
45 if (s->sleep_in_drain_begin) {
46 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
47 }
48}
49
50static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
51{
52 BDRVTestState *s = bs->opaque;
53 s->drain_count--;
54}
55
56static void bdrv_test_close(BlockDriverState *bs)
57{
58 BDRVTestState *s = bs->opaque;
59 g_assert_cmpint(s->drain_count, >, 0);
60}
61
62static void co_reenter_bh(void *opaque)
63{
64 aio_co_wake(opaque);
65}
66
67static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
68 int64_t offset, int64_t bytes,
69 QEMUIOVector *qiov,
70 BdrvRequestFlags flags)
71{
72 BDRVTestState *s = bs->opaque;
73
74
75
76
77
78 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
79
80 if (s->bh_indirection_ctx) {
81 aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
82 qemu_coroutine_self());
83 qemu_coroutine_yield();
84 }
85
86 return 0;
87}
88
89static int bdrv_test_change_backing_file(BlockDriverState *bs,
90 const char *backing_file,
91 const char *backing_fmt)
92{
93 return 0;
94}
95
96static BlockDriver bdrv_test = {
97 .format_name = "test",
98 .instance_size = sizeof(BDRVTestState),
99 .supports_backing = true,
100
101 .bdrv_close = bdrv_test_close,
102 .bdrv_co_preadv = bdrv_test_co_preadv,
103
104 .bdrv_co_drain_begin = bdrv_test_co_drain_begin,
105 .bdrv_co_drain_end = bdrv_test_co_drain_end,
106
107 .bdrv_child_perm = bdrv_default_perms,
108
109 .bdrv_change_backing_file = bdrv_test_change_backing_file,
110};
111
112static void aio_ret_cb(void *opaque, int ret)
113{
114 int *aio_ret = opaque;
115 *aio_ret = ret;
116}
117
118typedef struct CallInCoroutineData {
119 void (*entry)(void);
120 bool done;
121} CallInCoroutineData;
122
123static coroutine_fn void call_in_coroutine_entry(void *opaque)
124{
125 CallInCoroutineData *data = opaque;
126
127 data->entry();
128 data->done = true;
129}
130
131static void call_in_coroutine(void (*entry)(void))
132{
133 Coroutine *co;
134 CallInCoroutineData data = {
135 .entry = entry,
136 .done = false,
137 };
138
139 co = qemu_coroutine_create(call_in_coroutine_entry, &data);
140 qemu_coroutine_enter(co);
141 while (!data.done) {
142 aio_poll(qemu_get_aio_context(), true);
143 }
144}
145
146enum drain_type {
147 BDRV_DRAIN_ALL,
148 BDRV_DRAIN,
149 BDRV_SUBTREE_DRAIN,
150 DRAIN_TYPE_MAX,
151};
152
153static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
154{
155 switch (drain_type) {
156 case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
157 case BDRV_DRAIN: bdrv_drained_begin(bs); break;
158 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break;
159 default: g_assert_not_reached();
160 }
161}
162
163static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
164{
165 switch (drain_type) {
166 case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
167 case BDRV_DRAIN: bdrv_drained_end(bs); break;
168 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break;
169 default: g_assert_not_reached();
170 }
171}
172
173static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
174{
175 if (drain_type != BDRV_DRAIN_ALL) {
176 aio_context_acquire(bdrv_get_aio_context(bs));
177 }
178 do_drain_begin(drain_type, bs);
179 if (drain_type != BDRV_DRAIN_ALL) {
180 aio_context_release(bdrv_get_aio_context(bs));
181 }
182}
183
184static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
185{
186 if (drain_type != BDRV_DRAIN_ALL) {
187 aio_context_acquire(bdrv_get_aio_context(bs));
188 }
189 do_drain_end(drain_type, bs);
190 if (drain_type != BDRV_DRAIN_ALL) {
191 aio_context_release(bdrv_get_aio_context(bs));
192 }
193}
194
195static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
196{
197 BlockBackend *blk;
198 BlockDriverState *bs, *backing;
199 BDRVTestState *s, *backing_s;
200 BlockAIOCB *acb;
201 int aio_ret;
202
203 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
204
205 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
206 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
207 &error_abort);
208 s = bs->opaque;
209 blk_insert_bs(blk, bs, &error_abort);
210
211 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
212 backing_s = backing->opaque;
213 bdrv_set_backing_hd(bs, backing, &error_abort);
214
215
216 g_assert_cmpint(s->drain_count, ==, 0);
217 g_assert_cmpint(backing_s->drain_count, ==, 0);
218
219 do_drain_begin(drain_type, bs);
220
221 g_assert_cmpint(s->drain_count, ==, 1);
222 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
223
224 do_drain_end(drain_type, bs);
225
226 g_assert_cmpint(s->drain_count, ==, 0);
227 g_assert_cmpint(backing_s->drain_count, ==, 0);
228
229
230 aio_ret = -EINPROGRESS;
231 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
232 g_assert(acb != NULL);
233 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
234
235 g_assert_cmpint(s->drain_count, ==, 0);
236 g_assert_cmpint(backing_s->drain_count, ==, 0);
237
238 do_drain_begin(drain_type, bs);
239
240 g_assert_cmpint(aio_ret, ==, 0);
241 g_assert_cmpint(s->drain_count, ==, 1);
242 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
243
244 do_drain_end(drain_type, bs);
245
246 g_assert_cmpint(s->drain_count, ==, 0);
247 g_assert_cmpint(backing_s->drain_count, ==, 0);
248
249 bdrv_unref(backing);
250 bdrv_unref(bs);
251 blk_unref(blk);
252}
253
254static void test_drv_cb_drain_all(void)
255{
256 test_drv_cb_common(BDRV_DRAIN_ALL, true);
257}
258
259static void test_drv_cb_drain(void)
260{
261 test_drv_cb_common(BDRV_DRAIN, false);
262}
263
264static void test_drv_cb_drain_subtree(void)
265{
266 test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
267}
268
269static void test_drv_cb_co_drain_all(void)
270{
271 call_in_coroutine(test_drv_cb_drain_all);
272}
273
274static void test_drv_cb_co_drain(void)
275{
276 call_in_coroutine(test_drv_cb_drain);
277}
278
279static void test_drv_cb_co_drain_subtree(void)
280{
281 call_in_coroutine(test_drv_cb_drain_subtree);
282}
283
284static void test_quiesce_common(enum drain_type drain_type, bool recursive)
285{
286 BlockBackend *blk;
287 BlockDriverState *bs, *backing;
288
289 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
290 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
291 &error_abort);
292 blk_insert_bs(blk, bs, &error_abort);
293
294 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
295 bdrv_set_backing_hd(bs, backing, &error_abort);
296
297 g_assert_cmpint(bs->quiesce_counter, ==, 0);
298 g_assert_cmpint(backing->quiesce_counter, ==, 0);
299
300 do_drain_begin(drain_type, bs);
301
302 g_assert_cmpint(bs->quiesce_counter, ==, 1);
303 g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
304
305 do_drain_end(drain_type, bs);
306
307 g_assert_cmpint(bs->quiesce_counter, ==, 0);
308 g_assert_cmpint(backing->quiesce_counter, ==, 0);
309
310 bdrv_unref(backing);
311 bdrv_unref(bs);
312 blk_unref(blk);
313}
314
315static void test_quiesce_drain_all(void)
316{
317 test_quiesce_common(BDRV_DRAIN_ALL, true);
318}
319
320static void test_quiesce_drain(void)
321{
322 test_quiesce_common(BDRV_DRAIN, false);
323}
324
325static void test_quiesce_drain_subtree(void)
326{
327 test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
328}
329
330static void test_quiesce_co_drain_all(void)
331{
332 call_in_coroutine(test_quiesce_drain_all);
333}
334
335static void test_quiesce_co_drain(void)
336{
337 call_in_coroutine(test_quiesce_drain);
338}
339
340static void test_quiesce_co_drain_subtree(void)
341{
342 call_in_coroutine(test_quiesce_drain_subtree);
343}
344
345static void test_nested(void)
346{
347 BlockBackend *blk;
348 BlockDriverState *bs, *backing;
349 BDRVTestState *s, *backing_s;
350 enum drain_type outer, inner;
351
352 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
353 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
354 &error_abort);
355 s = bs->opaque;
356 blk_insert_bs(blk, bs, &error_abort);
357
358 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
359 backing_s = backing->opaque;
360 bdrv_set_backing_hd(bs, backing, &error_abort);
361
362 for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
363 for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
364 int backing_quiesce = (outer != BDRV_DRAIN) +
365 (inner != BDRV_DRAIN);
366
367 g_assert_cmpint(bs->quiesce_counter, ==, 0);
368 g_assert_cmpint(backing->quiesce_counter, ==, 0);
369 g_assert_cmpint(s->drain_count, ==, 0);
370 g_assert_cmpint(backing_s->drain_count, ==, 0);
371
372 do_drain_begin(outer, bs);
373 do_drain_begin(inner, bs);
374
375 g_assert_cmpint(bs->quiesce_counter, ==, 2);
376 g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
377 g_assert_cmpint(s->drain_count, ==, 2);
378 g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce);
379
380 do_drain_end(inner, bs);
381 do_drain_end(outer, bs);
382
383 g_assert_cmpint(bs->quiesce_counter, ==, 0);
384 g_assert_cmpint(backing->quiesce_counter, ==, 0);
385 g_assert_cmpint(s->drain_count, ==, 0);
386 g_assert_cmpint(backing_s->drain_count, ==, 0);
387 }
388 }
389
390 bdrv_unref(backing);
391 bdrv_unref(bs);
392 blk_unref(blk);
393}
394
395static void test_multiparent(void)
396{
397 BlockBackend *blk_a, *blk_b;
398 BlockDriverState *bs_a, *bs_b, *backing;
399 BDRVTestState *a_s, *b_s, *backing_s;
400
401 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
402 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
403 &error_abort);
404 a_s = bs_a->opaque;
405 blk_insert_bs(blk_a, bs_a, &error_abort);
406
407 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
408 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
409 &error_abort);
410 b_s = bs_b->opaque;
411 blk_insert_bs(blk_b, bs_b, &error_abort);
412
413 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
414 backing_s = backing->opaque;
415 bdrv_set_backing_hd(bs_a, backing, &error_abort);
416 bdrv_set_backing_hd(bs_b, backing, &error_abort);
417
418 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
419 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
420 g_assert_cmpint(backing->quiesce_counter, ==, 0);
421 g_assert_cmpint(a_s->drain_count, ==, 0);
422 g_assert_cmpint(b_s->drain_count, ==, 0);
423 g_assert_cmpint(backing_s->drain_count, ==, 0);
424
425 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
426
427 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
428 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
429 g_assert_cmpint(backing->quiesce_counter, ==, 1);
430 g_assert_cmpint(a_s->drain_count, ==, 1);
431 g_assert_cmpint(b_s->drain_count, ==, 1);
432 g_assert_cmpint(backing_s->drain_count, ==, 1);
433
434 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
435
436 g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
437 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
438 g_assert_cmpint(backing->quiesce_counter, ==, 2);
439 g_assert_cmpint(a_s->drain_count, ==, 2);
440 g_assert_cmpint(b_s->drain_count, ==, 2);
441 g_assert_cmpint(backing_s->drain_count, ==, 2);
442
443 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
444
445 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
446 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
447 g_assert_cmpint(backing->quiesce_counter, ==, 1);
448 g_assert_cmpint(a_s->drain_count, ==, 1);
449 g_assert_cmpint(b_s->drain_count, ==, 1);
450 g_assert_cmpint(backing_s->drain_count, ==, 1);
451
452 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
453
454 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
455 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
456 g_assert_cmpint(backing->quiesce_counter, ==, 0);
457 g_assert_cmpint(a_s->drain_count, ==, 0);
458 g_assert_cmpint(b_s->drain_count, ==, 0);
459 g_assert_cmpint(backing_s->drain_count, ==, 0);
460
461 bdrv_unref(backing);
462 bdrv_unref(bs_a);
463 bdrv_unref(bs_b);
464 blk_unref(blk_a);
465 blk_unref(blk_b);
466}
467
468static void test_graph_change_drain_subtree(void)
469{
470 BlockBackend *blk_a, *blk_b;
471 BlockDriverState *bs_a, *bs_b, *backing;
472 BDRVTestState *a_s, *b_s, *backing_s;
473
474 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
475 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
476 &error_abort);
477 a_s = bs_a->opaque;
478 blk_insert_bs(blk_a, bs_a, &error_abort);
479
480 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
481 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
482 &error_abort);
483 b_s = bs_b->opaque;
484 blk_insert_bs(blk_b, bs_b, &error_abort);
485
486 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
487 backing_s = backing->opaque;
488 bdrv_set_backing_hd(bs_a, backing, &error_abort);
489
490 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
491 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
492 g_assert_cmpint(backing->quiesce_counter, ==, 0);
493 g_assert_cmpint(a_s->drain_count, ==, 0);
494 g_assert_cmpint(b_s->drain_count, ==, 0);
495 g_assert_cmpint(backing_s->drain_count, ==, 0);
496
497 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
498 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
499 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
500 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
501 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
502
503 bdrv_set_backing_hd(bs_b, backing, &error_abort);
504 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
505 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
506 g_assert_cmpint(backing->quiesce_counter, ==, 5);
507 g_assert_cmpint(a_s->drain_count, ==, 5);
508 g_assert_cmpint(b_s->drain_count, ==, 5);
509 g_assert_cmpint(backing_s->drain_count, ==, 5);
510
511 bdrv_set_backing_hd(bs_b, NULL, &error_abort);
512 g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
513 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
514 g_assert_cmpint(backing->quiesce_counter, ==, 3);
515 g_assert_cmpint(a_s->drain_count, ==, 3);
516 g_assert_cmpint(b_s->drain_count, ==, 2);
517 g_assert_cmpint(backing_s->drain_count, ==, 3);
518
519 bdrv_set_backing_hd(bs_b, backing, &error_abort);
520 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
521 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
522 g_assert_cmpint(backing->quiesce_counter, ==, 5);
523 g_assert_cmpint(a_s->drain_count, ==, 5);
524 g_assert_cmpint(b_s->drain_count, ==, 5);
525 g_assert_cmpint(backing_s->drain_count, ==, 5);
526
527 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
528 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
529 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
530 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
531 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
532
533 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
534 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
535 g_assert_cmpint(backing->quiesce_counter, ==, 0);
536 g_assert_cmpint(a_s->drain_count, ==, 0);
537 g_assert_cmpint(b_s->drain_count, ==, 0);
538 g_assert_cmpint(backing_s->drain_count, ==, 0);
539
540 bdrv_unref(backing);
541 bdrv_unref(bs_a);
542 bdrv_unref(bs_b);
543 blk_unref(blk_a);
544 blk_unref(blk_b);
545}
546
547static void test_graph_change_drain_all(void)
548{
549 BlockBackend *blk_a, *blk_b;
550 BlockDriverState *bs_a, *bs_b;
551 BDRVTestState *a_s, *b_s;
552
553
554 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
555 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
556 &error_abort);
557 a_s = bs_a->opaque;
558 blk_insert_bs(blk_a, bs_a, &error_abort);
559
560 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
561 g_assert_cmpint(a_s->drain_count, ==, 0);
562
563
564 bdrv_drain_all_begin();
565
566 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
567 g_assert_cmpint(a_s->drain_count, ==, 1);
568
569
570 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
571 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
572 &error_abort);
573 b_s = bs_b->opaque;
574 blk_insert_bs(blk_b, bs_b, &error_abort);
575
576 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
577 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
578 g_assert_cmpint(a_s->drain_count, ==, 1);
579 g_assert_cmpint(b_s->drain_count, ==, 1);
580
581
582 blk_unref(blk_a);
583
584 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
585 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
586 g_assert_cmpint(a_s->drain_count, ==, 1);
587 g_assert_cmpint(b_s->drain_count, ==, 1);
588
589 bdrv_unref(bs_a);
590
591 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
592 g_assert_cmpint(b_s->drain_count, ==, 1);
593
594
595 bdrv_drain_all_end();
596
597 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
598 g_assert_cmpint(b_s->drain_count, ==, 0);
599 g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0);
600
601 bdrv_unref(bs_b);
602 blk_unref(blk_b);
603}
604
605struct test_iothread_data {
606 BlockDriverState *bs;
607 enum drain_type drain_type;
608 int *aio_ret;
609};
610
611static void test_iothread_drain_entry(void *opaque)
612{
613 struct test_iothread_data *data = opaque;
614
615 aio_context_acquire(bdrv_get_aio_context(data->bs));
616 do_drain_begin(data->drain_type, data->bs);
617 g_assert_cmpint(*data->aio_ret, ==, 0);
618 do_drain_end(data->drain_type, data->bs);
619 aio_context_release(bdrv_get_aio_context(data->bs));
620
621 qemu_event_set(&done_event);
622}
623
624static void test_iothread_aio_cb(void *opaque, int ret)
625{
626 int *aio_ret = opaque;
627 *aio_ret = ret;
628 qemu_event_set(&done_event);
629}
630
631static void test_iothread_main_thread_bh(void *opaque)
632{
633 struct test_iothread_data *data = opaque;
634
635
636
637 aio_context_acquire(bdrv_get_aio_context(data->bs));
638 bdrv_flush(data->bs);
639 aio_context_release(bdrv_get_aio_context(data->bs));
640}
641
642
643
644
645
646
647
648
649
650
651static void test_iothread_common(enum drain_type drain_type, int drain_thread)
652{
653 BlockBackend *blk;
654 BlockDriverState *bs;
655 BDRVTestState *s;
656 BlockAIOCB *acb;
657 int aio_ret;
658 struct test_iothread_data data;
659
660 IOThread *a = iothread_new();
661 IOThread *b = iothread_new();
662 AioContext *ctx_a = iothread_get_aio_context(a);
663 AioContext *ctx_b = iothread_get_aio_context(b);
664
665 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
666
667
668 if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
669 goto out;
670 }
671
672 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
673 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
674 &error_abort);
675 s = bs->opaque;
676 blk_insert_bs(blk, bs, &error_abort);
677 blk_set_disable_request_queuing(blk, true);
678
679 blk_set_aio_context(blk, ctx_a, &error_abort);
680 aio_context_acquire(ctx_a);
681
682 s->bh_indirection_ctx = ctx_b;
683
684 aio_ret = -EINPROGRESS;
685 qemu_event_reset(&done_event);
686
687 if (drain_thread == 0) {
688 acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
689 } else {
690 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
691 }
692 g_assert(acb != NULL);
693 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
694
695 aio_context_release(ctx_a);
696
697 data = (struct test_iothread_data) {
698 .bs = bs,
699 .drain_type = drain_type,
700 .aio_ret = &aio_ret,
701 };
702
703 switch (drain_thread) {
704 case 0:
705 if (drain_type != BDRV_DRAIN_ALL) {
706 aio_context_acquire(ctx_a);
707 }
708
709 aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
710
711
712
713
714
715
716 do_drain_begin(drain_type, bs);
717 g_assert_cmpint(bs->in_flight, ==, 0);
718
719 if (drain_type != BDRV_DRAIN_ALL) {
720 aio_context_release(ctx_a);
721 }
722 qemu_event_wait(&done_event);
723 if (drain_type != BDRV_DRAIN_ALL) {
724 aio_context_acquire(ctx_a);
725 }
726
727 g_assert_cmpint(aio_ret, ==, 0);
728 do_drain_end(drain_type, bs);
729
730 if (drain_type != BDRV_DRAIN_ALL) {
731 aio_context_release(ctx_a);
732 }
733 break;
734 case 1:
735 aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data);
736 qemu_event_wait(&done_event);
737 break;
738 default:
739 g_assert_not_reached();
740 }
741
742 aio_context_acquire(ctx_a);
743 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
744 aio_context_release(ctx_a);
745
746 bdrv_unref(bs);
747 blk_unref(blk);
748
749out:
750 iothread_join(a);
751 iothread_join(b);
752}
753
754static void test_iothread_drain_all(void)
755{
756 test_iothread_common(BDRV_DRAIN_ALL, 0);
757 test_iothread_common(BDRV_DRAIN_ALL, 1);
758}
759
760static void test_iothread_drain(void)
761{
762 test_iothread_common(BDRV_DRAIN, 0);
763 test_iothread_common(BDRV_DRAIN, 1);
764}
765
766static void test_iothread_drain_subtree(void)
767{
768 test_iothread_common(BDRV_SUBTREE_DRAIN, 0);
769 test_iothread_common(BDRV_SUBTREE_DRAIN, 1);
770}
771
772
773typedef struct TestBlockJob {
774 BlockJob common;
775 int run_ret;
776 int prepare_ret;
777 bool running;
778 bool should_complete;
779} TestBlockJob;
780
781static int test_job_prepare(Job *job)
782{
783 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
784
785
786 blk_flush(s->common.blk);
787 return s->prepare_ret;
788}
789
790static void test_job_commit(Job *job)
791{
792 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
793
794
795 blk_flush(s->common.blk);
796}
797
798static void test_job_abort(Job *job)
799{
800 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
801
802
803 blk_flush(s->common.blk);
804}
805
806static int coroutine_fn test_job_run(Job *job, Error **errp)
807{
808 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
809
810
811
812 s->running = true;
813
814 job_transition_to_ready(&s->common.job);
815 while (!s->should_complete) {
816
817
818
819 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
820
821 job_pause_point(&s->common.job);
822 }
823
824 return s->run_ret;
825}
826
827static void test_job_complete(Job *job, Error **errp)
828{
829 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
830 s->should_complete = true;
831}
832
833BlockJobDriver test_job_driver = {
834 .job_driver = {
835 .instance_size = sizeof(TestBlockJob),
836 .free = block_job_free,
837 .user_resume = block_job_user_resume,
838 .run = test_job_run,
839 .complete = test_job_complete,
840 .prepare = test_job_prepare,
841 .commit = test_job_commit,
842 .abort = test_job_abort,
843 },
844};
845
846enum test_job_result {
847 TEST_JOB_SUCCESS,
848 TEST_JOB_FAIL_RUN,
849 TEST_JOB_FAIL_PREPARE,
850};
851
852enum test_job_drain_node {
853 TEST_JOB_DRAIN_SRC,
854 TEST_JOB_DRAIN_SRC_CHILD,
855 TEST_JOB_DRAIN_SRC_PARENT,
856};
857
858static void test_blockjob_common_drain_node(enum drain_type drain_type,
859 bool use_iothread,
860 enum test_job_result result,
861 enum test_job_drain_node drain_node)
862{
863 BlockBackend *blk_src, *blk_target;
864 BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
865 BlockJob *job;
866 TestBlockJob *tjob;
867 IOThread *iothread = NULL;
868 AioContext *ctx;
869 int ret;
870
871 src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
872 &error_abort);
873 src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
874 BDRV_O_RDWR, &error_abort);
875 src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
876 BDRV_O_RDWR, &error_abort);
877
878 bdrv_set_backing_hd(src_overlay, src, &error_abort);
879 bdrv_unref(src);
880 bdrv_set_backing_hd(src, src_backing, &error_abort);
881 bdrv_unref(src_backing);
882
883 blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
884 blk_insert_bs(blk_src, src_overlay, &error_abort);
885
886 switch (drain_node) {
887 case TEST_JOB_DRAIN_SRC:
888 drain_bs = src;
889 break;
890 case TEST_JOB_DRAIN_SRC_CHILD:
891 drain_bs = src_backing;
892 break;
893 case TEST_JOB_DRAIN_SRC_PARENT:
894 drain_bs = src_overlay;
895 break;
896 default:
897 g_assert_not_reached();
898 }
899
900 if (use_iothread) {
901 iothread = iothread_new();
902 ctx = iothread_get_aio_context(iothread);
903 blk_set_aio_context(blk_src, ctx, &error_abort);
904 } else {
905 ctx = qemu_get_aio_context();
906 }
907
908 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
909 &error_abort);
910 blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
911 blk_insert_bs(blk_target, target, &error_abort);
912 blk_set_allow_aio_context_change(blk_target, true);
913
914 aio_context_acquire(ctx);
915 tjob = block_job_create("job0", &test_job_driver, NULL, src,
916 0, BLK_PERM_ALL,
917 0, 0, NULL, NULL, &error_abort);
918 job = &tjob->common;
919 block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
920
921 switch (result) {
922 case TEST_JOB_SUCCESS:
923 break;
924 case TEST_JOB_FAIL_RUN:
925 tjob->run_ret = -EIO;
926 break;
927 case TEST_JOB_FAIL_PREPARE:
928 tjob->prepare_ret = -EIO;
929 break;
930 }
931
932 job_start(&job->job);
933 aio_context_release(ctx);
934
935 if (use_iothread) {
936
937
938
939 while (!tjob->running) {
940 aio_poll(qemu_get_aio_context(), false);
941 }
942 }
943
944 g_assert_cmpint(job->job.pause_count, ==, 0);
945 g_assert_false(job->job.paused);
946 g_assert_true(tjob->running);
947 g_assert_true(job->job.busy);
948
949 do_drain_begin_unlocked(drain_type, drain_bs);
950
951 if (drain_type == BDRV_DRAIN_ALL) {
952
953 g_assert_cmpint(job->job.pause_count, ==, 2);
954 } else {
955 g_assert_cmpint(job->job.pause_count, ==, 1);
956 }
957 g_assert_true(job->job.paused);
958 g_assert_false(job->job.busy);
959
960 do_drain_end_unlocked(drain_type, drain_bs);
961
962 if (use_iothread) {
963
964 while (job->job.paused) {
965 aio_poll(qemu_get_aio_context(), false);
966 }
967 }
968
969 g_assert_cmpint(job->job.pause_count, ==, 0);
970 g_assert_false(job->job.paused);
971 g_assert_true(job->job.busy);
972
973 do_drain_begin_unlocked(drain_type, target);
974
975 if (drain_type == BDRV_DRAIN_ALL) {
976
977 g_assert_cmpint(job->job.pause_count, ==, 2);
978 } else {
979 g_assert_cmpint(job->job.pause_count, ==, 1);
980 }
981 g_assert_true(job->job.paused);
982 g_assert_false(job->job.busy);
983
984 do_drain_end_unlocked(drain_type, target);
985
986 if (use_iothread) {
987
988 while (job->job.paused) {
989 aio_poll(qemu_get_aio_context(), false);
990 }
991 }
992
993 g_assert_cmpint(job->job.pause_count, ==, 0);
994 g_assert_false(job->job.paused);
995 g_assert_true(job->job.busy);
996
997 aio_context_acquire(ctx);
998 ret = job_complete_sync(&job->job, &error_abort);
999 g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
1000
1001 if (use_iothread) {
1002 blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
1003 assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
1004 }
1005 aio_context_release(ctx);
1006
1007 blk_unref(blk_src);
1008 blk_unref(blk_target);
1009 bdrv_unref(src_overlay);
1010 bdrv_unref(target);
1011
1012 if (iothread) {
1013 iothread_join(iothread);
1014 }
1015}
1016
1017static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
1018 enum test_job_result result)
1019{
1020 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1021 TEST_JOB_DRAIN_SRC);
1022 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1023 TEST_JOB_DRAIN_SRC_CHILD);
1024 if (drain_type == BDRV_SUBTREE_DRAIN) {
1025 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1026 TEST_JOB_DRAIN_SRC_PARENT);
1027 }
1028}
1029
1030static void test_blockjob_drain_all(void)
1031{
1032 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
1033}
1034
1035static void test_blockjob_drain(void)
1036{
1037 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
1038}
1039
1040static void test_blockjob_drain_subtree(void)
1041{
1042 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
1043}
1044
1045static void test_blockjob_error_drain_all(void)
1046{
1047 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
1048 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
1049}
1050
1051static void test_blockjob_error_drain(void)
1052{
1053 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
1054 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1055}
1056
1057static void test_blockjob_error_drain_subtree(void)
1058{
1059 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
1060 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1061}
1062
1063static void test_blockjob_iothread_drain_all(void)
1064{
1065 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
1066}
1067
1068static void test_blockjob_iothread_drain(void)
1069{
1070 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
1071}
1072
1073static void test_blockjob_iothread_drain_subtree(void)
1074{
1075 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
1076}
1077
1078static void test_blockjob_iothread_error_drain_all(void)
1079{
1080 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
1081 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
1082}
1083
1084static void test_blockjob_iothread_error_drain(void)
1085{
1086 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
1087 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1088}
1089
1090static void test_blockjob_iothread_error_drain_subtree(void)
1091{
1092 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
1093 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1094}
1095
1096
1097typedef struct BDRVTestTopState {
1098 BdrvChild *wait_child;
1099} BDRVTestTopState;
1100
1101static void bdrv_test_top_close(BlockDriverState *bs)
1102{
1103 BdrvChild *c, *next_c;
1104 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1105 bdrv_unref_child(bs, c);
1106 }
1107}
1108
1109static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs,
1110 int64_t offset, int64_t bytes,
1111 QEMUIOVector *qiov,
1112 BdrvRequestFlags flags)
1113{
1114 BDRVTestTopState *tts = bs->opaque;
1115 return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
1116}
1117
1118static BlockDriver bdrv_test_top_driver = {
1119 .format_name = "test_top_driver",
1120 .instance_size = sizeof(BDRVTestTopState),
1121
1122 .bdrv_close = bdrv_test_top_close,
1123 .bdrv_co_preadv = bdrv_test_top_co_preadv,
1124
1125 .bdrv_child_perm = bdrv_default_perms,
1126};
1127
1128typedef struct TestCoDeleteByDrainData {
1129 BlockBackend *blk;
1130 bool detach_instead_of_delete;
1131 bool done;
1132} TestCoDeleteByDrainData;
1133
1134static void coroutine_fn test_co_delete_by_drain(void *opaque)
1135{
1136 TestCoDeleteByDrainData *dbdd = opaque;
1137 BlockBackend *blk = dbdd->blk;
1138 BlockDriverState *bs = blk_bs(blk);
1139 BDRVTestTopState *tts = bs->opaque;
1140 void *buffer = g_malloc(65536);
1141 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
1152
1153 g_assert_cmpint(bs->refcnt, ==, 1);
1154
1155 if (!dbdd->detach_instead_of_delete) {
1156 blk_unref(blk);
1157 } else {
1158 BdrvChild *c, *next_c;
1159 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1160 bdrv_unref_child(bs, c);
1161 }
1162 }
1163
1164 dbdd->done = true;
1165 g_free(buffer);
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175static void do_test_delete_by_drain(bool detach_instead_of_delete,
1176 enum drain_type drain_type)
1177{
1178 BlockBackend *blk;
1179 BlockDriverState *bs, *child_bs, *null_bs;
1180 BDRVTestTopState *tts;
1181 TestCoDeleteByDrainData dbdd;
1182 Coroutine *co;
1183
1184 bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1185 &error_abort);
1186 bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1187 tts = bs->opaque;
1188
1189 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1190 &error_abort);
1191 bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1192 BDRV_CHILD_DATA, &error_abort);
1193
1194
1195
1196 child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1197 &error_abort);
1198 child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1199
1200 tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1201 &child_of_bds,
1202 BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1203 &error_abort);
1204
1205
1206
1207 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1208 &error_abort);
1209 bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1210 &error_abort);
1211
1212 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1213 blk_insert_bs(blk, bs, &error_abort);
1214
1215
1216 bdrv_unref(bs);
1217
1218 g_assert_cmpint(bs->refcnt, ==, 1);
1219 g_assert_cmpint(child_bs->refcnt, ==, 1);
1220 g_assert_cmpint(null_bs->refcnt, ==, 1);
1221
1222
1223 dbdd = (TestCoDeleteByDrainData){
1224 .blk = blk,
1225 .detach_instead_of_delete = detach_instead_of_delete,
1226 .done = false,
1227 };
1228 co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1229 qemu_coroutine_enter(co);
1230
1231
1232
1233
1234
1235
1236 switch (drain_type) {
1237 case BDRV_DRAIN:
1238 bdrv_ref(child_bs);
1239 bdrv_drain(child_bs);
1240 bdrv_unref(child_bs);
1241 break;
1242 case BDRV_SUBTREE_DRAIN:
1243
1244
1245
1246 assert(detach_instead_of_delete);
1247 bdrv_subtree_drained_begin(bs);
1248 bdrv_subtree_drained_end(bs);
1249 break;
1250 case BDRV_DRAIN_ALL:
1251 bdrv_drain_all_begin();
1252 bdrv_drain_all_end();
1253 break;
1254 default:
1255 g_assert_not_reached();
1256 }
1257
1258 while (!dbdd.done) {
1259 aio_poll(qemu_get_aio_context(), true);
1260 }
1261
1262 if (detach_instead_of_delete) {
1263
1264
1265 blk_unref(blk);
1266 }
1267}
1268
1269static void test_delete_by_drain(void)
1270{
1271 do_test_delete_by_drain(false, BDRV_DRAIN);
1272}
1273
1274static void test_detach_by_drain_all(void)
1275{
1276 do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1277}
1278
1279static void test_detach_by_drain(void)
1280{
1281 do_test_delete_by_drain(true, BDRV_DRAIN);
1282}
1283
1284static void test_detach_by_drain_subtree(void)
1285{
1286 do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN);
1287}
1288
1289
1290struct detach_by_parent_data {
1291 BlockDriverState *parent_b;
1292 BdrvChild *child_b;
1293 BlockDriverState *c;
1294 BdrvChild *child_c;
1295 bool by_parent_cb;
1296};
1297static struct detach_by_parent_data detach_by_parent_data;
1298
1299static void detach_indirect_bh(void *opaque)
1300{
1301 struct detach_by_parent_data *data = opaque;
1302
1303 bdrv_unref_child(data->parent_b, data->child_b);
1304
1305 bdrv_ref(data->c);
1306 data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1307 &child_of_bds, BDRV_CHILD_DATA,
1308 &error_abort);
1309}
1310
1311static void detach_by_parent_aio_cb(void *opaque, int ret)
1312{
1313 struct detach_by_parent_data *data = &detach_by_parent_data;
1314
1315 g_assert_cmpint(ret, ==, 0);
1316 if (data->by_parent_cb) {
1317 detach_indirect_bh(data);
1318 }
1319}
1320
1321static void detach_by_driver_cb_drained_begin(BdrvChild *child)
1322{
1323 aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1324 detach_indirect_bh, &detach_by_parent_data);
1325 child_of_bds.drained_begin(child);
1326}
1327
1328static BdrvChildClass detach_by_driver_cb_class;
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static void test_detach_indirect(bool by_parent_cb)
1351{
1352 BlockBackend *blk;
1353 BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1354 BdrvChild *child_a, *child_b;
1355 BlockAIOCB *acb;
1356
1357 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1358
1359 if (!by_parent_cb) {
1360 detach_by_driver_cb_class = child_of_bds;
1361 detach_by_driver_cb_class.drained_begin =
1362 detach_by_driver_cb_drained_begin;
1363 }
1364
1365
1366 parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1367 &error_abort);
1368 parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1369 &error_abort);
1370
1371 a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1372 b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1373 c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1374
1375
1376 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1377 blk_insert_bs(blk, parent_a, &error_abort);
1378 bdrv_unref(parent_a);
1379
1380
1381
1382 if (!by_parent_cb) {
1383 BDRVTestState *s = parent_a->opaque;
1384 s->sleep_in_drain_begin = true;
1385 }
1386
1387
1388 bdrv_ref(b);
1389 bdrv_ref(a);
1390 child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1391 BDRV_CHILD_DATA, &error_abort);
1392 child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1393 BDRV_CHILD_COW, &error_abort);
1394
1395 bdrv_ref(a);
1396 bdrv_attach_child(parent_a, a, "PA-A",
1397 by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1398 BDRV_CHILD_DATA, &error_abort);
1399
1400 g_assert_cmpint(parent_a->refcnt, ==, 1);
1401 g_assert_cmpint(parent_b->refcnt, ==, 1);
1402 g_assert_cmpint(a->refcnt, ==, 3);
1403 g_assert_cmpint(b->refcnt, ==, 2);
1404 g_assert_cmpint(c->refcnt, ==, 1);
1405
1406 g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1407 g_assert(QLIST_NEXT(child_a, next) == child_b);
1408 g_assert(QLIST_NEXT(child_b, next) == NULL);
1409
1410
1411 detach_by_parent_data = (struct detach_by_parent_data) {
1412 .parent_b = parent_b,
1413 .child_b = child_b,
1414 .c = c,
1415 .by_parent_cb = by_parent_cb,
1416 };
1417 acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1418 g_assert(acb != NULL);
1419
1420
1421 bdrv_subtree_drained_begin(parent_b);
1422
1423 g_assert(detach_by_parent_data.child_c != NULL);
1424
1425 g_assert_cmpint(parent_a->refcnt, ==, 1);
1426 g_assert_cmpint(parent_b->refcnt, ==, 1);
1427 g_assert_cmpint(a->refcnt, ==, 3);
1428 g_assert_cmpint(b->refcnt, ==, 1);
1429 g_assert_cmpint(c->refcnt, ==, 2);
1430
1431 g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1432 g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1433 g_assert(QLIST_NEXT(child_a, next) == NULL);
1434
1435 g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1436 g_assert_cmpint(parent_b->quiesce_counter, ==, 1);
1437 g_assert_cmpint(a->quiesce_counter, ==, 1);
1438 g_assert_cmpint(b->quiesce_counter, ==, 0);
1439 g_assert_cmpint(c->quiesce_counter, ==, 1);
1440
1441 bdrv_subtree_drained_end(parent_b);
1442
1443 bdrv_unref(parent_b);
1444 blk_unref(blk);
1445
1446 g_assert_cmpint(a->refcnt, ==, 1);
1447 g_assert_cmpint(b->refcnt, ==, 1);
1448 g_assert_cmpint(c->refcnt, ==, 1);
1449 bdrv_unref(a);
1450 bdrv_unref(b);
1451 bdrv_unref(c);
1452}
1453
1454static void test_detach_by_parent_cb(void)
1455{
1456 test_detach_indirect(true);
1457}
1458
1459static void test_detach_by_driver_cb(void)
1460{
1461 test_detach_indirect(false);
1462}
1463
1464static void test_append_to_drained(void)
1465{
1466 BlockBackend *blk;
1467 BlockDriverState *base, *overlay;
1468 BDRVTestState *base_s, *overlay_s;
1469
1470 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1471 base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1472 base_s = base->opaque;
1473 blk_insert_bs(blk, base, &error_abort);
1474
1475 overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1476 &error_abort);
1477 overlay_s = overlay->opaque;
1478
1479 do_drain_begin(BDRV_DRAIN, base);
1480 g_assert_cmpint(base->quiesce_counter, ==, 1);
1481 g_assert_cmpint(base_s->drain_count, ==, 1);
1482 g_assert_cmpint(base->in_flight, ==, 0);
1483
1484 bdrv_append(overlay, base, &error_abort);
1485 g_assert_cmpint(base->in_flight, ==, 0);
1486 g_assert_cmpint(overlay->in_flight, ==, 0);
1487
1488 g_assert_cmpint(base->quiesce_counter, ==, 1);
1489 g_assert_cmpint(base_s->drain_count, ==, 1);
1490 g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1491 g_assert_cmpint(overlay_s->drain_count, ==, 1);
1492
1493 do_drain_end(BDRV_DRAIN, base);
1494
1495 g_assert_cmpint(base->quiesce_counter, ==, 0);
1496 g_assert_cmpint(base_s->drain_count, ==, 0);
1497 g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1498 g_assert_cmpint(overlay_s->drain_count, ==, 0);
1499
1500 bdrv_unref(overlay);
1501 bdrv_unref(base);
1502 blk_unref(blk);
1503}
1504
1505static void test_set_aio_context(void)
1506{
1507 BlockDriverState *bs;
1508 IOThread *a = iothread_new();
1509 IOThread *b = iothread_new();
1510 AioContext *ctx_a = iothread_get_aio_context(a);
1511 AioContext *ctx_b = iothread_get_aio_context(b);
1512
1513 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1514 &error_abort);
1515
1516 bdrv_drained_begin(bs);
1517 bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
1518
1519 aio_context_acquire(ctx_a);
1520 bdrv_drained_end(bs);
1521
1522 bdrv_drained_begin(bs);
1523 bdrv_try_set_aio_context(bs, ctx_b, &error_abort);
1524 aio_context_release(ctx_a);
1525 aio_context_acquire(ctx_b);
1526 bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort);
1527 aio_context_release(ctx_b);
1528 bdrv_drained_end(bs);
1529
1530 bdrv_unref(bs);
1531 iothread_join(a);
1532 iothread_join(b);
1533}
1534
1535
1536typedef struct TestDropBackingBlockJob {
1537 BlockJob common;
1538 bool should_complete;
1539 bool *did_complete;
1540 BlockDriverState *detach_also;
1541} TestDropBackingBlockJob;
1542
1543static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1544{
1545 TestDropBackingBlockJob *s =
1546 container_of(job, TestDropBackingBlockJob, common.job);
1547
1548 while (!s->should_complete) {
1549 job_sleep_ns(job, 0);
1550 }
1551
1552 return 0;
1553}
1554
1555static void test_drop_backing_job_commit(Job *job)
1556{
1557 TestDropBackingBlockJob *s =
1558 container_of(job, TestDropBackingBlockJob, common.job);
1559
1560 bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort);
1561 bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1562
1563 *s->did_complete = true;
1564}
1565
1566static const BlockJobDriver test_drop_backing_job_driver = {
1567 .job_driver = {
1568 .instance_size = sizeof(TestDropBackingBlockJob),
1569 .free = block_job_free,
1570 .user_resume = block_job_user_resume,
1571 .run = test_drop_backing_job_run,
1572 .commit = test_drop_backing_job_commit,
1573 }
1574};
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static void test_blockjob_commit_by_drained_end(void)
1640{
1641 BlockDriverState *bs_child, *bs_parents[3];
1642 TestDropBackingBlockJob *job;
1643 bool job_has_completed = false;
1644 int i;
1645
1646 bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1647 &error_abort);
1648
1649 for (i = 0; i < 3; i++) {
1650 char name[32];
1651 snprintf(name, sizeof(name), "parent-node-%i", i);
1652 bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1653 &error_abort);
1654 bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1655 }
1656
1657 job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1658 bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1659 &error_abort);
1660
1661 job->detach_also = bs_parents[0];
1662 job->did_complete = &job_has_completed;
1663
1664 job_start(&job->common.job);
1665
1666 job->should_complete = true;
1667 bdrv_drained_begin(bs_child);
1668 g_assert(!job_has_completed);
1669 bdrv_drained_end(bs_child);
1670 g_assert(job_has_completed);
1671
1672 bdrv_unref(bs_parents[0]);
1673 bdrv_unref(bs_parents[1]);
1674 bdrv_unref(bs_parents[2]);
1675 bdrv_unref(bs_child);
1676}
1677
1678
1679typedef struct TestSimpleBlockJob {
1680 BlockJob common;
1681 bool should_complete;
1682 bool *did_complete;
1683} TestSimpleBlockJob;
1684
1685static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1686{
1687 TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1688
1689 while (!s->should_complete) {
1690 job_sleep_ns(job, 0);
1691 }
1692
1693 return 0;
1694}
1695
1696static void test_simple_job_clean(Job *job)
1697{
1698 TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1699 *s->did_complete = true;
1700}
1701
1702static const BlockJobDriver test_simple_job_driver = {
1703 .job_driver = {
1704 .instance_size = sizeof(TestSimpleBlockJob),
1705 .free = block_job_free,
1706 .user_resume = block_job_user_resume,
1707 .run = test_simple_job_run,
1708 .clean = test_simple_job_clean,
1709 },
1710};
1711
1712static int drop_intermediate_poll_update_filename(BdrvChild *child,
1713 BlockDriverState *new_base,
1714 const char *filename,
1715 Error **errp)
1716{
1717
1718
1719
1720
1721
1722
1723 aio_poll(qemu_get_current_aio_context(), false);
1724
1725 aio_poll(qemu_get_current_aio_context(), false);
1726
1727 return 0;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776static void test_drop_intermediate_poll(void)
1777{
1778 static BdrvChildClass chain_child_class;
1779 BlockDriverState *chain[3];
1780 TestSimpleBlockJob *job;
1781 BlockDriverState *job_node;
1782 bool job_has_completed = false;
1783 int i;
1784 int ret;
1785
1786 chain_child_class = child_of_bds;
1787 chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1788
1789 for (i = 0; i < 3; i++) {
1790 char name[32];
1791 snprintf(name, 32, "node-%i", i);
1792
1793 chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1794 }
1795
1796 job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1797 &error_abort);
1798 bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1799
1800
1801
1802
1803
1804 for (i = 0; i < 3; i++) {
1805 if (i) {
1806
1807 chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1],
1808 "chain", &chain_child_class,
1809 BDRV_CHILD_COW, &error_abort);
1810 }
1811 }
1812
1813 job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1814 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1815
1816
1817 bdrv_unref(job_node);
1818
1819 job->did_complete = &job_has_completed;
1820
1821 job_start(&job->common.job);
1822 job->should_complete = true;
1823
1824 g_assert(!job_has_completed);
1825 ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
1826 g_assert(ret == 0);
1827 g_assert(job_has_completed);
1828
1829 bdrv_unref(chain[2]);
1830}
1831
1832
1833typedef struct BDRVReplaceTestState {
1834 bool was_drained;
1835 bool was_undrained;
1836 bool has_read;
1837
1838 int drain_count;
1839
1840 bool yield_before_read;
1841 Coroutine *io_co;
1842 Coroutine *drain_co;
1843} BDRVReplaceTestState;
1844
1845static void bdrv_replace_test_close(BlockDriverState *bs)
1846{
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs,
1860 int64_t offset,
1861 int64_t bytes,
1862 QEMUIOVector *qiov,
1863 BdrvRequestFlags flags)
1864{
1865 BDRVReplaceTestState *s = bs->opaque;
1866
1867 if (bs->backing) {
1868 int ret;
1869
1870 g_assert(!s->drain_count);
1871
1872 s->io_co = qemu_coroutine_self();
1873 if (s->yield_before_read) {
1874 s->yield_before_read = false;
1875 qemu_coroutine_yield();
1876 }
1877 s->io_co = NULL;
1878
1879 ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1880 s->has_read = true;
1881
1882
1883 if (s->drain_co) {
1884 aio_co_wake(s->drain_co);
1885 }
1886
1887 return ret;
1888 }
1889
1890 s->has_read = true;
1891 return 0;
1892}
1893
1894
1895
1896
1897
1898
1899static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs)
1900{
1901 BDRVReplaceTestState *s = bs->opaque;
1902
1903 if (!s->drain_count) {
1904
1905 s->drain_co = qemu_coroutine_self();
1906 while (s->io_co) {
1907 aio_co_wake(s->io_co);
1908 s->io_co = NULL;
1909 qemu_coroutine_yield();
1910 }
1911 s->drain_co = NULL;
1912
1913 s->was_drained = true;
1914 }
1915 s->drain_count++;
1916}
1917
1918
1919
1920
1921
1922
1923static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs)
1924{
1925 BDRVReplaceTestState *s = bs->opaque;
1926
1927 g_assert(s->drain_count > 0);
1928 if (!--s->drain_count) {
1929 int ret;
1930
1931 s->was_undrained = true;
1932
1933 if (bs->backing) {
1934 char data;
1935 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
1936
1937
1938 ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1939 g_assert(ret >= 0);
1940 }
1941 }
1942}
1943
1944static BlockDriver bdrv_replace_test = {
1945 .format_name = "replace_test",
1946 .instance_size = sizeof(BDRVReplaceTestState),
1947
1948 .bdrv_close = bdrv_replace_test_close,
1949 .bdrv_co_preadv = bdrv_replace_test_co_preadv,
1950
1951 .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin,
1952 .bdrv_co_drain_end = bdrv_replace_test_co_drain_end,
1953
1954 .bdrv_child_perm = bdrv_default_perms,
1955};
1956
1957static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1958{
1959 int ret;
1960 char data;
1961
1962 ret = blk_co_pread(opaque, 0, 1, &data, 0);
1963 g_assert(ret >= 0);
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static void do_test_replace_child_mid_drain(int old_drain_count,
1995 int new_drain_count)
1996{
1997 BlockBackend *parent_blk;
1998 BlockDriverState *parent_bs;
1999 BlockDriverState *old_child_bs, *new_child_bs;
2000 BDRVReplaceTestState *parent_s;
2001 BDRVReplaceTestState *old_child_s, *new_child_s;
2002 Coroutine *io_co;
2003 int i;
2004
2005 parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
2006 &error_abort);
2007 parent_s = parent_bs->opaque;
2008
2009 parent_blk = blk_new(qemu_get_aio_context(),
2010 BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
2011 blk_insert_bs(parent_blk, parent_bs, &error_abort);
2012
2013 old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
2014 &error_abort);
2015 new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
2016 &error_abort);
2017 old_child_s = old_child_bs->opaque;
2018 new_child_s = new_child_bs->opaque;
2019
2020
2021 parent_bs->total_sectors = 1;
2022 old_child_bs->total_sectors = 1;
2023 new_child_bs->total_sectors = 1;
2024
2025 bdrv_ref(old_child_bs);
2026 parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child",
2027 &child_of_bds, BDRV_CHILD_COW,
2028 &error_abort);
2029
2030 for (i = 0; i < old_drain_count; i++) {
2031 bdrv_drained_begin(old_child_bs);
2032 }
2033 for (i = 0; i < new_drain_count; i++) {
2034 bdrv_drained_begin(new_child_bs);
2035 }
2036
2037 if (!old_drain_count) {
2038
2039
2040
2041
2042 parent_s->yield_before_read = true;
2043 io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
2044 parent_blk);
2045 qemu_coroutine_enter(io_co);
2046 }
2047
2048
2049 g_assert(!parent_s->has_read);
2050
2051
2052 parent_s->was_drained = false;
2053 parent_s->was_undrained = false;
2054
2055 g_assert(parent_bs->quiesce_counter == old_drain_count);
2056 bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
2057 g_assert(parent_bs->quiesce_counter == new_drain_count);
2058
2059 if (!old_drain_count && !new_drain_count) {
2060
2061
2062
2063
2064
2065 g_assert(parent_s->was_drained && parent_s->was_undrained);
2066 } else if (!old_drain_count && new_drain_count) {
2067
2068
2069
2070
2071 g_assert(parent_s->was_drained && !parent_s->was_undrained);
2072 } else if (old_drain_count && !new_drain_count) {
2073
2074
2075
2076
2077 g_assert(!parent_s->was_drained && parent_s->was_undrained);
2078 } else {
2079
2080
2081
2082
2083 g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2084 }
2085
2086 if (!old_drain_count || !new_drain_count) {
2087
2088
2089
2090
2091
2092
2093
2094 g_assert(parent_s->has_read);
2095 } else {
2096
2097
2098
2099
2100 g_assert(!parent_s->has_read);
2101 }
2102
2103
2104 g_assert(!(old_drain_count && old_child_s->has_read));
2105 g_assert(!(new_drain_count && new_child_s->has_read));
2106
2107 for (i = 0; i < new_drain_count; i++) {
2108 bdrv_drained_end(new_child_bs);
2109 }
2110 for (i = 0; i < old_drain_count; i++) {
2111 bdrv_drained_end(old_child_bs);
2112 }
2113
2114
2115
2116
2117
2118 g_assert(parent_s->has_read);
2119 g_assert(new_child_s->has_read);
2120
2121 blk_unref(parent_blk);
2122 bdrv_unref(parent_bs);
2123 bdrv_unref(old_child_bs);
2124 bdrv_unref(new_child_bs);
2125}
2126
2127static void test_replace_child_mid_drain(void)
2128{
2129 int old_drain_count, new_drain_count;
2130
2131 for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2132 for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2133 do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2134 }
2135 }
2136}
2137
2138int main(int argc, char **argv)
2139{
2140 int ret;
2141
2142 bdrv_init();
2143 qemu_init_main_loop(&error_abort);
2144
2145 g_test_init(&argc, &argv, NULL);
2146 qemu_event_init(&done_event, false);
2147
2148 g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2149 g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2150 g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
2151 test_drv_cb_drain_subtree);
2152
2153 g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2154 test_drv_cb_co_drain_all);
2155 g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2156 g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
2157 test_drv_cb_co_drain_subtree);
2158
2159
2160 g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2161 g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2162 g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
2163 test_quiesce_drain_subtree);
2164
2165 g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2166 test_quiesce_co_drain_all);
2167 g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2168 g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
2169 test_quiesce_co_drain_subtree);
2170
2171 g_test_add_func("/bdrv-drain/nested", test_nested);
2172 g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
2173
2174 g_test_add_func("/bdrv-drain/graph-change/drain_subtree",
2175 test_graph_change_drain_subtree);
2176 g_test_add_func("/bdrv-drain/graph-change/drain_all",
2177 test_graph_change_drain_all);
2178
2179 g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2180 g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2181 g_test_add_func("/bdrv-drain/iothread/drain_subtree",
2182 test_iothread_drain_subtree);
2183
2184 g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2185 g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2186 g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
2187 test_blockjob_drain_subtree);
2188
2189 g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2190 test_blockjob_error_drain_all);
2191 g_test_add_func("/bdrv-drain/blockjob/error/drain",
2192 test_blockjob_error_drain);
2193 g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
2194 test_blockjob_error_drain_subtree);
2195
2196 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2197 test_blockjob_iothread_drain_all);
2198 g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2199 test_blockjob_iothread_drain);
2200 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
2201 test_blockjob_iothread_drain_subtree);
2202
2203 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2204 test_blockjob_iothread_error_drain_all);
2205 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2206 test_blockjob_iothread_error_drain);
2207 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
2208 test_blockjob_iothread_error_drain_subtree);
2209
2210 g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2211 g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2212 g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2213 g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree);
2214 g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2215 g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2216
2217 g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2218
2219 g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2220
2221 g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2222 test_blockjob_commit_by_drained_end);
2223
2224 g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2225 test_drop_intermediate_poll);
2226
2227 g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2228 test_replace_child_mid_drain);
2229
2230 ret = g_test_run();
2231 qemu_event_destroy(&done_event);
2232 return ret;
2233}
2234