1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "block/block.h"
27#include "block/blockjob_int.h"
28#include "sysemu/block-backend.h"
29#include "qapi/error.h"
30#include "iothread.h"
31
32static QemuEvent done_event;
33
34typedef struct BDRVTestState {
35 int drain_count;
36 AioContext *bh_indirection_ctx;
37 bool sleep_in_drain_begin;
38} BDRVTestState;
39
40static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
41{
42 BDRVTestState *s = bs->opaque;
43 s->drain_count++;
44 if (s->sleep_in_drain_begin) {
45 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
46 }
47}
48
49static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
50{
51 BDRVTestState *s = bs->opaque;
52 s->drain_count--;
53}
54
55static void bdrv_test_close(BlockDriverState *bs)
56{
57 BDRVTestState *s = bs->opaque;
58 g_assert_cmpint(s->drain_count, >, 0);
59}
60
61static void co_reenter_bh(void *opaque)
62{
63 aio_co_wake(opaque);
64}
65
66static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
67 uint64_t offset, uint64_t bytes,
68 QEMUIOVector *qiov, int flags)
69{
70 BDRVTestState *s = bs->opaque;
71
72
73
74
75
76 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
77
78 if (s->bh_indirection_ctx) {
79 aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
80 qemu_coroutine_self());
81 qemu_coroutine_yield();
82 }
83
84 return 0;
85}
86
87static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c,
88 const BdrvChildRole *role,
89 BlockReopenQueue *reopen_queue,
90 uint64_t perm, uint64_t shared,
91 uint64_t *nperm, uint64_t *nshared)
92{
93
94
95 if (role != &child_file && role != &child_backing) {
96 role = &child_file;
97 }
98
99 bdrv_format_default_perms(bs, c, role, reopen_queue, perm, shared,
100 nperm, nshared);
101}
102
103static BlockDriver bdrv_test = {
104 .format_name = "test",
105 .instance_size = sizeof(BDRVTestState),
106
107 .bdrv_close = bdrv_test_close,
108 .bdrv_co_preadv = bdrv_test_co_preadv,
109
110 .bdrv_co_drain_begin = bdrv_test_co_drain_begin,
111 .bdrv_co_drain_end = bdrv_test_co_drain_end,
112
113 .bdrv_child_perm = bdrv_test_child_perm,
114};
115
116static void aio_ret_cb(void *opaque, int ret)
117{
118 int *aio_ret = opaque;
119 *aio_ret = ret;
120}
121
122typedef struct CallInCoroutineData {
123 void (*entry)(void);
124 bool done;
125} CallInCoroutineData;
126
127static coroutine_fn void call_in_coroutine_entry(void *opaque)
128{
129 CallInCoroutineData *data = opaque;
130
131 data->entry();
132 data->done = true;
133}
134
135static void call_in_coroutine(void (*entry)(void))
136{
137 Coroutine *co;
138 CallInCoroutineData data = {
139 .entry = entry,
140 .done = false,
141 };
142
143 co = qemu_coroutine_create(call_in_coroutine_entry, &data);
144 qemu_coroutine_enter(co);
145 while (!data.done) {
146 aio_poll(qemu_get_aio_context(), true);
147 }
148}
149
150enum drain_type {
151 BDRV_DRAIN_ALL,
152 BDRV_DRAIN,
153 BDRV_SUBTREE_DRAIN,
154 DRAIN_TYPE_MAX,
155};
156
157static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
158{
159 switch (drain_type) {
160 case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
161 case BDRV_DRAIN: bdrv_drained_begin(bs); break;
162 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break;
163 default: g_assert_not_reached();
164 }
165}
166
167static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
168{
169 switch (drain_type) {
170 case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
171 case BDRV_DRAIN: bdrv_drained_end(bs); break;
172 case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break;
173 default: g_assert_not_reached();
174 }
175}
176
177static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
178{
179 if (drain_type != BDRV_DRAIN_ALL) {
180 aio_context_acquire(bdrv_get_aio_context(bs));
181 }
182 do_drain_begin(drain_type, bs);
183 if (drain_type != BDRV_DRAIN_ALL) {
184 aio_context_release(bdrv_get_aio_context(bs));
185 }
186}
187
188static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
189{
190 if (drain_type != BDRV_DRAIN_ALL) {
191 aio_context_acquire(bdrv_get_aio_context(bs));
192 }
193 do_drain_end(drain_type, bs);
194 if (drain_type != BDRV_DRAIN_ALL) {
195 aio_context_release(bdrv_get_aio_context(bs));
196 }
197}
198
199static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
200{
201 BlockBackend *blk;
202 BlockDriverState *bs, *backing;
203 BDRVTestState *s, *backing_s;
204 BlockAIOCB *acb;
205 int aio_ret;
206
207 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
208
209 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
210 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
211 &error_abort);
212 s = bs->opaque;
213 blk_insert_bs(blk, bs, &error_abort);
214
215 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
216 backing_s = backing->opaque;
217 bdrv_set_backing_hd(bs, backing, &error_abort);
218
219
220 g_assert_cmpint(s->drain_count, ==, 0);
221 g_assert_cmpint(backing_s->drain_count, ==, 0);
222
223 do_drain_begin(drain_type, bs);
224
225 g_assert_cmpint(s->drain_count, ==, 1);
226 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
227
228 do_drain_end(drain_type, bs);
229
230 g_assert_cmpint(s->drain_count, ==, 0);
231 g_assert_cmpint(backing_s->drain_count, ==, 0);
232
233
234 aio_ret = -EINPROGRESS;
235 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
236 g_assert(acb != NULL);
237 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
238
239 g_assert_cmpint(s->drain_count, ==, 0);
240 g_assert_cmpint(backing_s->drain_count, ==, 0);
241
242 do_drain_begin(drain_type, bs);
243
244 g_assert_cmpint(aio_ret, ==, 0);
245 g_assert_cmpint(s->drain_count, ==, 1);
246 g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
247
248 do_drain_end(drain_type, bs);
249
250 g_assert_cmpint(s->drain_count, ==, 0);
251 g_assert_cmpint(backing_s->drain_count, ==, 0);
252
253 bdrv_unref(backing);
254 bdrv_unref(bs);
255 blk_unref(blk);
256}
257
258static void test_drv_cb_drain_all(void)
259{
260 test_drv_cb_common(BDRV_DRAIN_ALL, true);
261}
262
263static void test_drv_cb_drain(void)
264{
265 test_drv_cb_common(BDRV_DRAIN, false);
266}
267
268static void test_drv_cb_drain_subtree(void)
269{
270 test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
271}
272
273static void test_drv_cb_co_drain_all(void)
274{
275 call_in_coroutine(test_drv_cb_drain_all);
276}
277
278static void test_drv_cb_co_drain(void)
279{
280 call_in_coroutine(test_drv_cb_drain);
281}
282
283static void test_drv_cb_co_drain_subtree(void)
284{
285 call_in_coroutine(test_drv_cb_drain_subtree);
286}
287
288static void test_quiesce_common(enum drain_type drain_type, bool recursive)
289{
290 BlockBackend *blk;
291 BlockDriverState *bs, *backing;
292
293 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
294 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
295 &error_abort);
296 blk_insert_bs(blk, bs, &error_abort);
297
298 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
299 bdrv_set_backing_hd(bs, backing, &error_abort);
300
301 g_assert_cmpint(bs->quiesce_counter, ==, 0);
302 g_assert_cmpint(backing->quiesce_counter, ==, 0);
303
304 do_drain_begin(drain_type, bs);
305
306 g_assert_cmpint(bs->quiesce_counter, ==, 1);
307 g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
308
309 do_drain_end(drain_type, bs);
310
311 g_assert_cmpint(bs->quiesce_counter, ==, 0);
312 g_assert_cmpint(backing->quiesce_counter, ==, 0);
313
314 bdrv_unref(backing);
315 bdrv_unref(bs);
316 blk_unref(blk);
317}
318
319static void test_quiesce_drain_all(void)
320{
321 test_quiesce_common(BDRV_DRAIN_ALL, true);
322}
323
324static void test_quiesce_drain(void)
325{
326 test_quiesce_common(BDRV_DRAIN, false);
327}
328
329static void test_quiesce_drain_subtree(void)
330{
331 test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
332}
333
334static void test_quiesce_co_drain_all(void)
335{
336 call_in_coroutine(test_quiesce_drain_all);
337}
338
339static void test_quiesce_co_drain(void)
340{
341 call_in_coroutine(test_quiesce_drain);
342}
343
344static void test_quiesce_co_drain_subtree(void)
345{
346 call_in_coroutine(test_quiesce_drain_subtree);
347}
348
349static void test_nested(void)
350{
351 BlockBackend *blk;
352 BlockDriverState *bs, *backing;
353 BDRVTestState *s, *backing_s;
354 enum drain_type outer, inner;
355
356 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
357 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
358 &error_abort);
359 s = bs->opaque;
360 blk_insert_bs(blk, bs, &error_abort);
361
362 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
363 backing_s = backing->opaque;
364 bdrv_set_backing_hd(bs, backing, &error_abort);
365
366 for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
367 for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
368 int backing_quiesce = (outer != BDRV_DRAIN) +
369 (inner != BDRV_DRAIN);
370
371 g_assert_cmpint(bs->quiesce_counter, ==, 0);
372 g_assert_cmpint(backing->quiesce_counter, ==, 0);
373 g_assert_cmpint(s->drain_count, ==, 0);
374 g_assert_cmpint(backing_s->drain_count, ==, 0);
375
376 do_drain_begin(outer, bs);
377 do_drain_begin(inner, bs);
378
379 g_assert_cmpint(bs->quiesce_counter, ==, 2);
380 g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
381 g_assert_cmpint(s->drain_count, ==, 2);
382 g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce);
383
384 do_drain_end(inner, bs);
385 do_drain_end(outer, bs);
386
387 g_assert_cmpint(bs->quiesce_counter, ==, 0);
388 g_assert_cmpint(backing->quiesce_counter, ==, 0);
389 g_assert_cmpint(s->drain_count, ==, 0);
390 g_assert_cmpint(backing_s->drain_count, ==, 0);
391 }
392 }
393
394 bdrv_unref(backing);
395 bdrv_unref(bs);
396 blk_unref(blk);
397}
398
399static void test_multiparent(void)
400{
401 BlockBackend *blk_a, *blk_b;
402 BlockDriverState *bs_a, *bs_b, *backing;
403 BDRVTestState *a_s, *b_s, *backing_s;
404
405 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
406 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
407 &error_abort);
408 a_s = bs_a->opaque;
409 blk_insert_bs(blk_a, bs_a, &error_abort);
410
411 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
412 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
413 &error_abort);
414 b_s = bs_b->opaque;
415 blk_insert_bs(blk_b, bs_b, &error_abort);
416
417 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
418 backing_s = backing->opaque;
419 bdrv_set_backing_hd(bs_a, backing, &error_abort);
420 bdrv_set_backing_hd(bs_b, backing, &error_abort);
421
422 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
423 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
424 g_assert_cmpint(backing->quiesce_counter, ==, 0);
425 g_assert_cmpint(a_s->drain_count, ==, 0);
426 g_assert_cmpint(b_s->drain_count, ==, 0);
427 g_assert_cmpint(backing_s->drain_count, ==, 0);
428
429 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
430
431 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
432 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
433 g_assert_cmpint(backing->quiesce_counter, ==, 1);
434 g_assert_cmpint(a_s->drain_count, ==, 1);
435 g_assert_cmpint(b_s->drain_count, ==, 1);
436 g_assert_cmpint(backing_s->drain_count, ==, 1);
437
438 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
439
440 g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
441 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
442 g_assert_cmpint(backing->quiesce_counter, ==, 2);
443 g_assert_cmpint(a_s->drain_count, ==, 2);
444 g_assert_cmpint(b_s->drain_count, ==, 2);
445 g_assert_cmpint(backing_s->drain_count, ==, 2);
446
447 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
448
449 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
450 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
451 g_assert_cmpint(backing->quiesce_counter, ==, 1);
452 g_assert_cmpint(a_s->drain_count, ==, 1);
453 g_assert_cmpint(b_s->drain_count, ==, 1);
454 g_assert_cmpint(backing_s->drain_count, ==, 1);
455
456 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
457
458 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
459 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
460 g_assert_cmpint(backing->quiesce_counter, ==, 0);
461 g_assert_cmpint(a_s->drain_count, ==, 0);
462 g_assert_cmpint(b_s->drain_count, ==, 0);
463 g_assert_cmpint(backing_s->drain_count, ==, 0);
464
465 bdrv_unref(backing);
466 bdrv_unref(bs_a);
467 bdrv_unref(bs_b);
468 blk_unref(blk_a);
469 blk_unref(blk_b);
470}
471
472static void test_graph_change_drain_subtree(void)
473{
474 BlockBackend *blk_a, *blk_b;
475 BlockDriverState *bs_a, *bs_b, *backing;
476 BDRVTestState *a_s, *b_s, *backing_s;
477
478 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
479 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
480 &error_abort);
481 a_s = bs_a->opaque;
482 blk_insert_bs(blk_a, bs_a, &error_abort);
483
484 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
485 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
486 &error_abort);
487 b_s = bs_b->opaque;
488 blk_insert_bs(blk_b, bs_b, &error_abort);
489
490 backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
491 backing_s = backing->opaque;
492 bdrv_set_backing_hd(bs_a, backing, &error_abort);
493
494 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
495 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
496 g_assert_cmpint(backing->quiesce_counter, ==, 0);
497 g_assert_cmpint(a_s->drain_count, ==, 0);
498 g_assert_cmpint(b_s->drain_count, ==, 0);
499 g_assert_cmpint(backing_s->drain_count, ==, 0);
500
501 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
502 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
503 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
504 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
505 do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
506
507 bdrv_set_backing_hd(bs_b, backing, &error_abort);
508 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
509 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
510 g_assert_cmpint(backing->quiesce_counter, ==, 5);
511 g_assert_cmpint(a_s->drain_count, ==, 5);
512 g_assert_cmpint(b_s->drain_count, ==, 5);
513 g_assert_cmpint(backing_s->drain_count, ==, 5);
514
515 bdrv_set_backing_hd(bs_b, NULL, &error_abort);
516 g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
517 g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
518 g_assert_cmpint(backing->quiesce_counter, ==, 3);
519 g_assert_cmpint(a_s->drain_count, ==, 3);
520 g_assert_cmpint(b_s->drain_count, ==, 2);
521 g_assert_cmpint(backing_s->drain_count, ==, 3);
522
523 bdrv_set_backing_hd(bs_b, backing, &error_abort);
524 g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
525 g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
526 g_assert_cmpint(backing->quiesce_counter, ==, 5);
527 g_assert_cmpint(a_s->drain_count, ==, 5);
528 g_assert_cmpint(b_s->drain_count, ==, 5);
529 g_assert_cmpint(backing_s->drain_count, ==, 5);
530
531 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
532 do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
533 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
534 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
535 do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
536
537 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
538 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
539 g_assert_cmpint(backing->quiesce_counter, ==, 0);
540 g_assert_cmpint(a_s->drain_count, ==, 0);
541 g_assert_cmpint(b_s->drain_count, ==, 0);
542 g_assert_cmpint(backing_s->drain_count, ==, 0);
543
544 bdrv_unref(backing);
545 bdrv_unref(bs_a);
546 bdrv_unref(bs_b);
547 blk_unref(blk_a);
548 blk_unref(blk_b);
549}
550
551static void test_graph_change_drain_all(void)
552{
553 BlockBackend *blk_a, *blk_b;
554 BlockDriverState *bs_a, *bs_b;
555 BDRVTestState *a_s, *b_s;
556
557
558 blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
559 bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
560 &error_abort);
561 a_s = bs_a->opaque;
562 blk_insert_bs(blk_a, bs_a, &error_abort);
563
564 g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
565 g_assert_cmpint(a_s->drain_count, ==, 0);
566
567
568 bdrv_drain_all_begin();
569
570 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
571 g_assert_cmpint(a_s->drain_count, ==, 1);
572
573
574 blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
575 bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
576 &error_abort);
577 b_s = bs_b->opaque;
578 blk_insert_bs(blk_b, bs_b, &error_abort);
579
580 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
581 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
582 g_assert_cmpint(a_s->drain_count, ==, 1);
583 g_assert_cmpint(b_s->drain_count, ==, 1);
584
585
586 blk_unref(blk_a);
587
588 g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
589 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
590 g_assert_cmpint(a_s->drain_count, ==, 1);
591 g_assert_cmpint(b_s->drain_count, ==, 1);
592
593 bdrv_unref(bs_a);
594
595 g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
596 g_assert_cmpint(b_s->drain_count, ==, 1);
597
598
599 bdrv_drain_all_end();
600
601 g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
602 g_assert_cmpint(b_s->drain_count, ==, 0);
603
604 bdrv_unref(bs_b);
605 blk_unref(blk_b);
606}
607
608struct test_iothread_data {
609 BlockDriverState *bs;
610 enum drain_type drain_type;
611 int *aio_ret;
612};
613
614static void test_iothread_drain_entry(void *opaque)
615{
616 struct test_iothread_data *data = opaque;
617
618 aio_context_acquire(bdrv_get_aio_context(data->bs));
619 do_drain_begin(data->drain_type, data->bs);
620 g_assert_cmpint(*data->aio_ret, ==, 0);
621 do_drain_end(data->drain_type, data->bs);
622 aio_context_release(bdrv_get_aio_context(data->bs));
623
624 qemu_event_set(&done_event);
625}
626
627static void test_iothread_aio_cb(void *opaque, int ret)
628{
629 int *aio_ret = opaque;
630 *aio_ret = ret;
631 qemu_event_set(&done_event);
632}
633
634static void test_iothread_main_thread_bh(void *opaque)
635{
636 struct test_iothread_data *data = opaque;
637
638
639
640 aio_context_acquire(bdrv_get_aio_context(data->bs));
641 bdrv_flush(data->bs);
642 aio_context_release(bdrv_get_aio_context(data->bs));
643}
644
645
646
647
648
649
650
651
652
653
654static void test_iothread_common(enum drain_type drain_type, int drain_thread)
655{
656 BlockBackend *blk;
657 BlockDriverState *bs;
658 BDRVTestState *s;
659 BlockAIOCB *acb;
660 int aio_ret;
661 struct test_iothread_data data;
662
663 IOThread *a = iothread_new();
664 IOThread *b = iothread_new();
665 AioContext *ctx_a = iothread_get_aio_context(a);
666 AioContext *ctx_b = iothread_get_aio_context(b);
667
668 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
669
670
671 if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
672 goto out;
673 }
674
675 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
676 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
677 &error_abort);
678 s = bs->opaque;
679 blk_insert_bs(blk, bs, &error_abort);
680
681 blk_set_aio_context(blk, ctx_a, &error_abort);
682 aio_context_acquire(ctx_a);
683
684 s->bh_indirection_ctx = ctx_b;
685
686 aio_ret = -EINPROGRESS;
687 qemu_event_reset(&done_event);
688
689 if (drain_thread == 0) {
690 acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
691 } else {
692 acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
693 }
694 g_assert(acb != NULL);
695 g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
696
697 aio_context_release(ctx_a);
698
699 data = (struct test_iothread_data) {
700 .bs = bs,
701 .drain_type = drain_type,
702 .aio_ret = &aio_ret,
703 };
704
705 switch (drain_thread) {
706 case 0:
707 if (drain_type != BDRV_DRAIN_ALL) {
708 aio_context_acquire(ctx_a);
709 }
710
711 aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
712
713
714
715
716
717
718 do_drain_begin(drain_type, bs);
719 g_assert_cmpint(bs->in_flight, ==, 0);
720
721 if (drain_type != BDRV_DRAIN_ALL) {
722 aio_context_release(ctx_a);
723 }
724 qemu_event_wait(&done_event);
725 if (drain_type != BDRV_DRAIN_ALL) {
726 aio_context_acquire(ctx_a);
727 }
728
729 g_assert_cmpint(aio_ret, ==, 0);
730 do_drain_end(drain_type, bs);
731
732 if (drain_type != BDRV_DRAIN_ALL) {
733 aio_context_release(ctx_a);
734 }
735 break;
736 case 1:
737 aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data);
738 qemu_event_wait(&done_event);
739 break;
740 default:
741 g_assert_not_reached();
742 }
743
744 aio_context_acquire(ctx_a);
745 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
746 aio_context_release(ctx_a);
747
748 bdrv_unref(bs);
749 blk_unref(blk);
750
751out:
752 iothread_join(a);
753 iothread_join(b);
754}
755
756static void test_iothread_drain_all(void)
757{
758 test_iothread_common(BDRV_DRAIN_ALL, 0);
759 test_iothread_common(BDRV_DRAIN_ALL, 1);
760}
761
762static void test_iothread_drain(void)
763{
764 test_iothread_common(BDRV_DRAIN, 0);
765 test_iothread_common(BDRV_DRAIN, 1);
766}
767
768static void test_iothread_drain_subtree(void)
769{
770 test_iothread_common(BDRV_SUBTREE_DRAIN, 0);
771 test_iothread_common(BDRV_SUBTREE_DRAIN, 1);
772}
773
774
775typedef struct TestBlockJob {
776 BlockJob common;
777 int run_ret;
778 int prepare_ret;
779 bool running;
780 bool should_complete;
781} TestBlockJob;
782
783static int test_job_prepare(Job *job)
784{
785 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
786
787
788 blk_flush(s->common.blk);
789 return s->prepare_ret;
790}
791
792static void test_job_commit(Job *job)
793{
794 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
795
796
797 blk_flush(s->common.blk);
798}
799
800static void test_job_abort(Job *job)
801{
802 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
803
804
805 blk_flush(s->common.blk);
806}
807
808static int coroutine_fn test_job_run(Job *job, Error **errp)
809{
810 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
811
812
813
814 s->running = true;
815
816 job_transition_to_ready(&s->common.job);
817 while (!s->should_complete) {
818
819
820
821 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
822
823 job_pause_point(&s->common.job);
824 }
825
826 return s->run_ret;
827}
828
829static void test_job_complete(Job *job, Error **errp)
830{
831 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
832 s->should_complete = true;
833}
834
835BlockJobDriver test_job_driver = {
836 .job_driver = {
837 .instance_size = sizeof(TestBlockJob),
838 .free = block_job_free,
839 .user_resume = block_job_user_resume,
840 .drain = block_job_drain,
841 .run = test_job_run,
842 .complete = test_job_complete,
843 .prepare = test_job_prepare,
844 .commit = test_job_commit,
845 .abort = test_job_abort,
846 },
847};
848
849enum test_job_result {
850 TEST_JOB_SUCCESS,
851 TEST_JOB_FAIL_RUN,
852 TEST_JOB_FAIL_PREPARE,
853};
854
855enum test_job_drain_node {
856 TEST_JOB_DRAIN_SRC,
857 TEST_JOB_DRAIN_SRC_CHILD,
858 TEST_JOB_DRAIN_SRC_PARENT,
859};
860
861static void test_blockjob_common_drain_node(enum drain_type drain_type,
862 bool use_iothread,
863 enum test_job_result result,
864 enum test_job_drain_node drain_node)
865{
866 BlockBackend *blk_src, *blk_target;
867 BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
868 BlockJob *job;
869 TestBlockJob *tjob;
870 IOThread *iothread = NULL;
871 AioContext *ctx;
872 int ret;
873
874 src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
875 &error_abort);
876 src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
877 BDRV_O_RDWR, &error_abort);
878 src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
879 BDRV_O_RDWR, &error_abort);
880
881 bdrv_set_backing_hd(src_overlay, src, &error_abort);
882 bdrv_unref(src);
883 bdrv_set_backing_hd(src, src_backing, &error_abort);
884 bdrv_unref(src_backing);
885
886 blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
887 blk_insert_bs(blk_src, src_overlay, &error_abort);
888
889 switch (drain_node) {
890 case TEST_JOB_DRAIN_SRC:
891 drain_bs = src;
892 break;
893 case TEST_JOB_DRAIN_SRC_CHILD:
894 drain_bs = src_backing;
895 break;
896 case TEST_JOB_DRAIN_SRC_PARENT:
897 drain_bs = src_overlay;
898 break;
899 default:
900 g_assert_not_reached();
901 }
902
903 if (use_iothread) {
904 iothread = iothread_new();
905 ctx = iothread_get_aio_context(iothread);
906 blk_set_aio_context(blk_src, ctx, &error_abort);
907 } else {
908 ctx = qemu_get_aio_context();
909 }
910
911 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
912 &error_abort);
913 blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
914 blk_insert_bs(blk_target, target, &error_abort);
915 blk_set_allow_aio_context_change(blk_target, true);
916
917 aio_context_acquire(ctx);
918 tjob = block_job_create("job0", &test_job_driver, NULL, src,
919 0, BLK_PERM_ALL,
920 0, 0, NULL, NULL, &error_abort);
921 job = &tjob->common;
922 block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
923
924 switch (result) {
925 case TEST_JOB_SUCCESS:
926 break;
927 case TEST_JOB_FAIL_RUN:
928 tjob->run_ret = -EIO;
929 break;
930 case TEST_JOB_FAIL_PREPARE:
931 tjob->prepare_ret = -EIO;
932 break;
933 }
934
935 job_start(&job->job);
936 aio_context_release(ctx);
937
938 if (use_iothread) {
939
940
941
942 while (!tjob->running) {
943 aio_poll(qemu_get_aio_context(), false);
944 }
945 }
946
947 g_assert_cmpint(job->job.pause_count, ==, 0);
948 g_assert_false(job->job.paused);
949 g_assert_true(tjob->running);
950 g_assert_true(job->job.busy);
951
952 do_drain_begin_unlocked(drain_type, drain_bs);
953
954 if (drain_type == BDRV_DRAIN_ALL) {
955
956 g_assert_cmpint(job->job.pause_count, ==, 2);
957 } else {
958 g_assert_cmpint(job->job.pause_count, ==, 1);
959 }
960 g_assert_true(job->job.paused);
961 g_assert_false(job->job.busy);
962
963 do_drain_end_unlocked(drain_type, drain_bs);
964
965 if (use_iothread) {
966
967 while (job->job.paused) {
968 aio_poll(qemu_get_aio_context(), false);
969 }
970 }
971
972 g_assert_cmpint(job->job.pause_count, ==, 0);
973 g_assert_false(job->job.paused);
974 g_assert_true(job->job.busy);
975
976 do_drain_begin_unlocked(drain_type, target);
977
978 if (drain_type == BDRV_DRAIN_ALL) {
979
980 g_assert_cmpint(job->job.pause_count, ==, 2);
981 } else {
982 g_assert_cmpint(job->job.pause_count, ==, 1);
983 }
984 g_assert_true(job->job.paused);
985 g_assert_false(job->job.busy);
986
987 do_drain_end_unlocked(drain_type, target);
988
989 if (use_iothread) {
990
991 while (job->job.paused) {
992 aio_poll(qemu_get_aio_context(), false);
993 }
994 }
995
996 g_assert_cmpint(job->job.pause_count, ==, 0);
997 g_assert_false(job->job.paused);
998 g_assert_true(job->job.busy);
999
1000 aio_context_acquire(ctx);
1001 ret = job_complete_sync(&job->job, &error_abort);
1002 g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
1003
1004 if (use_iothread) {
1005 blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
1006 assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
1007 }
1008 aio_context_release(ctx);
1009
1010 blk_unref(blk_src);
1011 blk_unref(blk_target);
1012 bdrv_unref(src_overlay);
1013 bdrv_unref(target);
1014
1015 if (iothread) {
1016 iothread_join(iothread);
1017 }
1018}
1019
1020static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
1021 enum test_job_result result)
1022{
1023 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1024 TEST_JOB_DRAIN_SRC);
1025 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1026 TEST_JOB_DRAIN_SRC_CHILD);
1027 if (drain_type == BDRV_SUBTREE_DRAIN) {
1028 test_blockjob_common_drain_node(drain_type, use_iothread, result,
1029 TEST_JOB_DRAIN_SRC_PARENT);
1030 }
1031}
1032
1033static void test_blockjob_drain_all(void)
1034{
1035 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
1036}
1037
1038static void test_blockjob_drain(void)
1039{
1040 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
1041}
1042
1043static void test_blockjob_drain_subtree(void)
1044{
1045 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
1046}
1047
1048static void test_blockjob_error_drain_all(void)
1049{
1050 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
1051 test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
1052}
1053
1054static void test_blockjob_error_drain(void)
1055{
1056 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
1057 test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1058}
1059
1060static void test_blockjob_error_drain_subtree(void)
1061{
1062 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
1063 test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
1064}
1065
1066static void test_blockjob_iothread_drain_all(void)
1067{
1068 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
1069}
1070
1071static void test_blockjob_iothread_drain(void)
1072{
1073 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
1074}
1075
1076static void test_blockjob_iothread_drain_subtree(void)
1077{
1078 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
1079}
1080
1081static void test_blockjob_iothread_error_drain_all(void)
1082{
1083 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
1084 test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
1085}
1086
1087static void test_blockjob_iothread_error_drain(void)
1088{
1089 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
1090 test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1091}
1092
1093static void test_blockjob_iothread_error_drain_subtree(void)
1094{
1095 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
1096 test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
1097}
1098
1099
1100typedef struct BDRVTestTopState {
1101 BdrvChild *wait_child;
1102} BDRVTestTopState;
1103
1104static void bdrv_test_top_close(BlockDriverState *bs)
1105{
1106 BdrvChild *c, *next_c;
1107 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1108 bdrv_unref_child(bs, c);
1109 }
1110}
1111
1112static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs,
1113 uint64_t offset, uint64_t bytes,
1114 QEMUIOVector *qiov, int flags)
1115{
1116 BDRVTestTopState *tts = bs->opaque;
1117 return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
1118}
1119
1120static BlockDriver bdrv_test_top_driver = {
1121 .format_name = "test_top_driver",
1122 .instance_size = sizeof(BDRVTestTopState),
1123
1124 .bdrv_close = bdrv_test_top_close,
1125 .bdrv_co_preadv = bdrv_test_top_co_preadv,
1126
1127 .bdrv_child_perm = bdrv_format_default_perms,
1128};
1129
1130typedef struct TestCoDeleteByDrainData {
1131 BlockBackend *blk;
1132 bool detach_instead_of_delete;
1133 bool done;
1134} TestCoDeleteByDrainData;
1135
1136static void coroutine_fn test_co_delete_by_drain(void *opaque)
1137{
1138 TestCoDeleteByDrainData *dbdd = opaque;
1139 BlockBackend *blk = dbdd->blk;
1140 BlockDriverState *bs = blk_bs(blk);
1141 BDRVTestTopState *tts = bs->opaque;
1142 void *buffer = g_malloc(65536);
1143 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
1154
1155 g_assert_cmpint(bs->refcnt, ==, 1);
1156
1157 if (!dbdd->detach_instead_of_delete) {
1158 blk_unref(blk);
1159 } else {
1160 BdrvChild *c, *next_c;
1161 QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
1162 bdrv_unref_child(bs, c);
1163 }
1164 }
1165
1166 dbdd->done = true;
1167 g_free(buffer);
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177static void do_test_delete_by_drain(bool detach_instead_of_delete,
1178 enum drain_type drain_type)
1179{
1180 BlockBackend *blk;
1181 BlockDriverState *bs, *child_bs, *null_bs;
1182 BDRVTestTopState *tts;
1183 TestCoDeleteByDrainData dbdd;
1184 Coroutine *co;
1185
1186 bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1187 &error_abort);
1188 bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1189 tts = bs->opaque;
1190
1191 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1192 &error_abort);
1193 bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort);
1194
1195
1196
1197 child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1198 &error_abort);
1199 child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1200
1201 tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_file,
1202 &error_abort);
1203
1204
1205
1206 null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1207 &error_abort);
1208 bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort);
1209
1210 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1211 blk_insert_bs(blk, bs, &error_abort);
1212
1213
1214 bdrv_unref(bs);
1215
1216 g_assert_cmpint(bs->refcnt, ==, 1);
1217 g_assert_cmpint(child_bs->refcnt, ==, 1);
1218 g_assert_cmpint(null_bs->refcnt, ==, 1);
1219
1220
1221 dbdd = (TestCoDeleteByDrainData){
1222 .blk = blk,
1223 .detach_instead_of_delete = detach_instead_of_delete,
1224 .done = false,
1225 };
1226 co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1227 qemu_coroutine_enter(co);
1228
1229
1230
1231
1232
1233
1234 switch (drain_type) {
1235 case BDRV_DRAIN:
1236 bdrv_ref(child_bs);
1237 bdrv_drain(child_bs);
1238 bdrv_unref(child_bs);
1239 break;
1240 case BDRV_SUBTREE_DRAIN:
1241
1242
1243
1244 assert(detach_instead_of_delete);
1245 bdrv_subtree_drained_begin(bs);
1246 bdrv_subtree_drained_end(bs);
1247 break;
1248 case BDRV_DRAIN_ALL:
1249 bdrv_drain_all_begin();
1250 bdrv_drain_all_end();
1251 break;
1252 default:
1253 g_assert_not_reached();
1254 }
1255
1256 while (!dbdd.done) {
1257 aio_poll(qemu_get_aio_context(), true);
1258 }
1259
1260 if (detach_instead_of_delete) {
1261
1262
1263 blk_unref(blk);
1264 }
1265}
1266
1267static void test_delete_by_drain(void)
1268{
1269 do_test_delete_by_drain(false, BDRV_DRAIN);
1270}
1271
1272static void test_detach_by_drain_all(void)
1273{
1274 do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1275}
1276
1277static void test_detach_by_drain(void)
1278{
1279 do_test_delete_by_drain(true, BDRV_DRAIN);
1280}
1281
1282static void test_detach_by_drain_subtree(void)
1283{
1284 do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN);
1285}
1286
1287
1288struct detach_by_parent_data {
1289 BlockDriverState *parent_b;
1290 BdrvChild *child_b;
1291 BlockDriverState *c;
1292 BdrvChild *child_c;
1293 bool by_parent_cb;
1294};
1295static struct detach_by_parent_data detach_by_parent_data;
1296
1297static void detach_indirect_bh(void *opaque)
1298{
1299 struct detach_by_parent_data *data = opaque;
1300
1301 bdrv_unref_child(data->parent_b, data->child_b);
1302
1303 bdrv_ref(data->c);
1304 data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1305 &child_file, &error_abort);
1306}
1307
1308static void detach_by_parent_aio_cb(void *opaque, int ret)
1309{
1310 struct detach_by_parent_data *data = &detach_by_parent_data;
1311
1312 g_assert_cmpint(ret, ==, 0);
1313 if (data->by_parent_cb) {
1314 detach_indirect_bh(data);
1315 }
1316}
1317
1318static void detach_by_driver_cb_drained_begin(BdrvChild *child)
1319{
1320 aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1321 detach_indirect_bh, &detach_by_parent_data);
1322 child_file.drained_begin(child);
1323}
1324
1325static BdrvChildRole detach_by_driver_cb_role;
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347static void test_detach_indirect(bool by_parent_cb)
1348{
1349 BlockBackend *blk;
1350 BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1351 BdrvChild *child_a, *child_b;
1352 BlockAIOCB *acb;
1353
1354 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1355
1356 if (!by_parent_cb) {
1357 detach_by_driver_cb_role = child_file;
1358 detach_by_driver_cb_role.drained_begin =
1359 detach_by_driver_cb_drained_begin;
1360 }
1361
1362
1363 parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1364 &error_abort);
1365 parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1366 &error_abort);
1367
1368 a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1369 b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1370 c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1371
1372
1373 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1374 blk_insert_bs(blk, parent_a, &error_abort);
1375 bdrv_unref(parent_a);
1376
1377
1378
1379 if (!by_parent_cb) {
1380 BDRVTestState *s = parent_a->opaque;
1381 s->sleep_in_drain_begin = true;
1382 }
1383
1384
1385 bdrv_ref(b);
1386 bdrv_ref(a);
1387 child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_file, &error_abort);
1388 child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort);
1389
1390 bdrv_ref(a);
1391 bdrv_attach_child(parent_a, a, "PA-A",
1392 by_parent_cb ? &child_file : &detach_by_driver_cb_role,
1393 &error_abort);
1394
1395 g_assert_cmpint(parent_a->refcnt, ==, 1);
1396 g_assert_cmpint(parent_b->refcnt, ==, 1);
1397 g_assert_cmpint(a->refcnt, ==, 3);
1398 g_assert_cmpint(b->refcnt, ==, 2);
1399 g_assert_cmpint(c->refcnt, ==, 1);
1400
1401 g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1402 g_assert(QLIST_NEXT(child_a, next) == child_b);
1403 g_assert(QLIST_NEXT(child_b, next) == NULL);
1404
1405
1406 detach_by_parent_data = (struct detach_by_parent_data) {
1407 .parent_b = parent_b,
1408 .child_b = child_b,
1409 .c = c,
1410 .by_parent_cb = by_parent_cb,
1411 };
1412 acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1413 g_assert(acb != NULL);
1414
1415
1416 bdrv_subtree_drained_begin(parent_b);
1417
1418 g_assert(detach_by_parent_data.child_c != NULL);
1419
1420 g_assert_cmpint(parent_a->refcnt, ==, 1);
1421 g_assert_cmpint(parent_b->refcnt, ==, 1);
1422 g_assert_cmpint(a->refcnt, ==, 3);
1423 g_assert_cmpint(b->refcnt, ==, 1);
1424 g_assert_cmpint(c->refcnt, ==, 2);
1425
1426 g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1427 g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1428 g_assert(QLIST_NEXT(child_a, next) == NULL);
1429
1430 g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1431 g_assert_cmpint(parent_b->quiesce_counter, ==, 1);
1432 g_assert_cmpint(a->quiesce_counter, ==, 1);
1433 g_assert_cmpint(b->quiesce_counter, ==, 0);
1434 g_assert_cmpint(c->quiesce_counter, ==, 1);
1435
1436 bdrv_subtree_drained_end(parent_b);
1437
1438 bdrv_unref(parent_b);
1439 blk_unref(blk);
1440
1441 g_assert_cmpint(a->refcnt, ==, 1);
1442 g_assert_cmpint(b->refcnt, ==, 1);
1443 g_assert_cmpint(c->refcnt, ==, 1);
1444 bdrv_unref(a);
1445 bdrv_unref(b);
1446 bdrv_unref(c);
1447}
1448
1449static void test_detach_by_parent_cb(void)
1450{
1451 test_detach_indirect(true);
1452}
1453
1454static void test_detach_by_driver_cb(void)
1455{
1456 test_detach_indirect(false);
1457}
1458
1459static void test_append_to_drained(void)
1460{
1461 BlockBackend *blk;
1462 BlockDriverState *base, *overlay;
1463 BDRVTestState *base_s, *overlay_s;
1464
1465 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1466 base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1467 base_s = base->opaque;
1468 blk_insert_bs(blk, base, &error_abort);
1469
1470 overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1471 &error_abort);
1472 overlay_s = overlay->opaque;
1473
1474 do_drain_begin(BDRV_DRAIN, base);
1475 g_assert_cmpint(base->quiesce_counter, ==, 1);
1476 g_assert_cmpint(base_s->drain_count, ==, 1);
1477 g_assert_cmpint(base->in_flight, ==, 0);
1478
1479
1480 bdrv_append(overlay, base, &error_abort);
1481 g_assert_cmpint(base->in_flight, ==, 0);
1482 g_assert_cmpint(overlay->in_flight, ==, 0);
1483
1484 g_assert_cmpint(base->quiesce_counter, ==, 1);
1485 g_assert_cmpint(base_s->drain_count, ==, 1);
1486 g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1487 g_assert_cmpint(overlay_s->drain_count, ==, 1);
1488
1489 do_drain_end(BDRV_DRAIN, base);
1490
1491 g_assert_cmpint(base->quiesce_counter, ==, 0);
1492 g_assert_cmpint(base_s->drain_count, ==, 0);
1493 g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1494 g_assert_cmpint(overlay_s->drain_count, ==, 0);
1495
1496 bdrv_unref(base);
1497 blk_unref(blk);
1498}
1499
1500static void test_set_aio_context(void)
1501{
1502 BlockDriverState *bs;
1503 IOThread *a = iothread_new();
1504 IOThread *b = iothread_new();
1505 AioContext *ctx_a = iothread_get_aio_context(a);
1506 AioContext *ctx_b = iothread_get_aio_context(b);
1507
1508 bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1509 &error_abort);
1510
1511 bdrv_drained_begin(bs);
1512 bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
1513
1514 aio_context_acquire(ctx_a);
1515 bdrv_drained_end(bs);
1516
1517 bdrv_drained_begin(bs);
1518 bdrv_try_set_aio_context(bs, ctx_b, &error_abort);
1519 aio_context_release(ctx_a);
1520 aio_context_acquire(ctx_b);
1521 bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort);
1522 aio_context_release(ctx_b);
1523 bdrv_drained_end(bs);
1524
1525 bdrv_unref(bs);
1526 iothread_join(a);
1527 iothread_join(b);
1528}
1529
1530
1531typedef struct TestDropBackingBlockJob {
1532 BlockJob common;
1533 bool should_complete;
1534 bool *did_complete;
1535 BlockDriverState *detach_also;
1536} TestDropBackingBlockJob;
1537
1538static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1539{
1540 TestDropBackingBlockJob *s =
1541 container_of(job, TestDropBackingBlockJob, common.job);
1542
1543 while (!s->should_complete) {
1544 job_sleep_ns(job, 0);
1545 }
1546
1547 return 0;
1548}
1549
1550static void test_drop_backing_job_commit(Job *job)
1551{
1552 TestDropBackingBlockJob *s =
1553 container_of(job, TestDropBackingBlockJob, common.job);
1554
1555 bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort);
1556 bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1557
1558 *s->did_complete = true;
1559}
1560
1561static const BlockJobDriver test_drop_backing_job_driver = {
1562 .job_driver = {
1563 .instance_size = sizeof(TestDropBackingBlockJob),
1564 .free = block_job_free,
1565 .user_resume = block_job_user_resume,
1566 .drain = block_job_drain,
1567 .run = test_drop_backing_job_run,
1568 .commit = test_drop_backing_job_commit,
1569 }
1570};
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static void test_blockjob_commit_by_drained_end(void)
1636{
1637 BlockDriverState *bs_child, *bs_parents[3];
1638 TestDropBackingBlockJob *job;
1639 bool job_has_completed = false;
1640 int i;
1641
1642 bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1643 &error_abort);
1644
1645 for (i = 0; i < 3; i++) {
1646 char name[32];
1647 snprintf(name, sizeof(name), "parent-node-%i", i);
1648 bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1649 &error_abort);
1650 bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1651 }
1652
1653 job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1654 bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1655 &error_abort);
1656
1657 job->detach_also = bs_parents[0];
1658 job->did_complete = &job_has_completed;
1659
1660 job_start(&job->common.job);
1661
1662 job->should_complete = true;
1663 bdrv_drained_begin(bs_child);
1664 g_assert(!job_has_completed);
1665 bdrv_drained_end(bs_child);
1666 g_assert(job_has_completed);
1667
1668 bdrv_unref(bs_parents[0]);
1669 bdrv_unref(bs_parents[1]);
1670 bdrv_unref(bs_parents[2]);
1671 bdrv_unref(bs_child);
1672}
1673
1674int main(int argc, char **argv)
1675{
1676 int ret;
1677
1678 bdrv_init();
1679 qemu_init_main_loop(&error_abort);
1680
1681 g_test_init(&argc, &argv, NULL);
1682 qemu_event_init(&done_event, false);
1683
1684 g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
1685 g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
1686 g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
1687 test_drv_cb_drain_subtree);
1688
1689 g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
1690 test_drv_cb_co_drain_all);
1691 g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
1692 g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
1693 test_drv_cb_co_drain_subtree);
1694
1695
1696 g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
1697 g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
1698 g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
1699 test_quiesce_drain_subtree);
1700
1701 g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
1702 test_quiesce_co_drain_all);
1703 g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
1704 g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
1705 test_quiesce_co_drain_subtree);
1706
1707 g_test_add_func("/bdrv-drain/nested", test_nested);
1708 g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
1709
1710 g_test_add_func("/bdrv-drain/graph-change/drain_subtree",
1711 test_graph_change_drain_subtree);
1712 g_test_add_func("/bdrv-drain/graph-change/drain_all",
1713 test_graph_change_drain_all);
1714
1715 g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
1716 g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
1717 g_test_add_func("/bdrv-drain/iothread/drain_subtree",
1718 test_iothread_drain_subtree);
1719
1720 g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
1721 g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
1722 g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
1723 test_blockjob_drain_subtree);
1724
1725 g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
1726 test_blockjob_error_drain_all);
1727 g_test_add_func("/bdrv-drain/blockjob/error/drain",
1728 test_blockjob_error_drain);
1729 g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
1730 test_blockjob_error_drain_subtree);
1731
1732 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
1733 test_blockjob_iothread_drain_all);
1734 g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
1735 test_blockjob_iothread_drain);
1736 g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
1737 test_blockjob_iothread_drain_subtree);
1738
1739 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
1740 test_blockjob_iothread_error_drain_all);
1741 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
1742 test_blockjob_iothread_error_drain);
1743 g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
1744 test_blockjob_iothread_error_drain_subtree);
1745
1746 g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
1747 g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
1748 g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
1749 g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree);
1750 g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
1751 g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
1752
1753 g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
1754
1755 g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
1756
1757 g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
1758 test_blockjob_commit_by_drained_end);
1759
1760 ret = g_test_run();
1761 qemu_event_destroy(&done_event);
1762 return ret;
1763}
1764