qemu/tests/unit/test-block-iothread.c
<<
>>
Prefs
   1/*
   2 * Block tests for iothreads
   3 *
   4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "block/block.h"
  27#include "block/blockjob_int.h"
  28#include "sysemu/block-backend.h"
  29#include "qapi/error.h"
  30#include "qapi/qmp/qdict.h"
  31#include "qemu/main-loop.h"
  32#include "iothread.h"
  33
  34static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
  35                                          uint64_t offset, uint64_t bytes,
  36                                          QEMUIOVector *qiov, int flags)
  37{
  38    return 0;
  39}
  40
  41static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
  42                                              int64_t offset, int bytes)
  43{
  44    return 0;
  45}
  46
  47static int coroutine_fn
  48bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
  49                      PreallocMode prealloc, BdrvRequestFlags flags,
  50                      Error **errp)
  51{
  52    return 0;
  53}
  54
  55static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
  56                                                  bool want_zero,
  57                                                  int64_t offset, int64_t count,
  58                                                  int64_t *pnum, int64_t *map,
  59                                                  BlockDriverState **file)
  60{
  61    *pnum = count;
  62    return 0;
  63}
  64
  65static BlockDriver bdrv_test = {
  66    .format_name            = "test",
  67    .instance_size          = 1,
  68
  69    .bdrv_co_preadv         = bdrv_test_co_prwv,
  70    .bdrv_co_pwritev        = bdrv_test_co_prwv,
  71    .bdrv_co_pdiscard       = bdrv_test_co_pdiscard,
  72    .bdrv_co_truncate       = bdrv_test_co_truncate,
  73    .bdrv_co_block_status   = bdrv_test_co_block_status,
  74};
  75
  76static void test_sync_op_pread(BdrvChild *c)
  77{
  78    uint8_t buf[512];
  79    int ret;
  80
  81    /* Success */
  82    ret = bdrv_pread(c, 0, buf, sizeof(buf));
  83    g_assert_cmpint(ret, ==, 512);
  84
  85    /* Early error: Negative offset */
  86    ret = bdrv_pread(c, -2, buf, sizeof(buf));
  87    g_assert_cmpint(ret, ==, -EIO);
  88}
  89
  90static void test_sync_op_pwrite(BdrvChild *c)
  91{
  92    uint8_t buf[512] = { 0 };
  93    int ret;
  94
  95    /* Success */
  96    ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
  97    g_assert_cmpint(ret, ==, 512);
  98
  99    /* Early error: Negative offset */
 100    ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
 101    g_assert_cmpint(ret, ==, -EIO);
 102}
 103
 104static void test_sync_op_blk_pread(BlockBackend *blk)
 105{
 106    uint8_t buf[512];
 107    int ret;
 108
 109    /* Success */
 110    ret = blk_pread(blk, 0, buf, sizeof(buf));
 111    g_assert_cmpint(ret, ==, 512);
 112
 113    /* Early error: Negative offset */
 114    ret = blk_pread(blk, -2, buf, sizeof(buf));
 115    g_assert_cmpint(ret, ==, -EIO);
 116}
 117
 118static void test_sync_op_blk_pwrite(BlockBackend *blk)
 119{
 120    uint8_t buf[512] = { 0 };
 121    int ret;
 122
 123    /* Success */
 124    ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
 125    g_assert_cmpint(ret, ==, 512);
 126
 127    /* Early error: Negative offset */
 128    ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
 129    g_assert_cmpint(ret, ==, -EIO);
 130}
 131
 132static void test_sync_op_load_vmstate(BdrvChild *c)
 133{
 134    uint8_t buf[512];
 135    int ret;
 136
 137    /* Error: Driver does not support snapshots */
 138    ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
 139    g_assert_cmpint(ret, ==, -ENOTSUP);
 140}
 141
 142static void test_sync_op_save_vmstate(BdrvChild *c)
 143{
 144    uint8_t buf[512] = { 0 };
 145    int ret;
 146
 147    /* Error: Driver does not support snapshots */
 148    ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
 149    g_assert_cmpint(ret, ==, -ENOTSUP);
 150}
 151
 152static void test_sync_op_pdiscard(BdrvChild *c)
 153{
 154    int ret;
 155
 156    /* Normal success path */
 157    c->bs->open_flags |= BDRV_O_UNMAP;
 158    ret = bdrv_pdiscard(c, 0, 512);
 159    g_assert_cmpint(ret, ==, 0);
 160
 161    /* Early success: UNMAP not supported */
 162    c->bs->open_flags &= ~BDRV_O_UNMAP;
 163    ret = bdrv_pdiscard(c, 0, 512);
 164    g_assert_cmpint(ret, ==, 0);
 165
 166    /* Early error: Negative offset */
 167    ret = bdrv_pdiscard(c, -2, 512);
 168    g_assert_cmpint(ret, ==, -EIO);
 169}
 170
 171static void test_sync_op_blk_pdiscard(BlockBackend *blk)
 172{
 173    int ret;
 174
 175    /* Early success: UNMAP not supported */
 176    ret = blk_pdiscard(blk, 0, 512);
 177    g_assert_cmpint(ret, ==, 0);
 178
 179    /* Early error: Negative offset */
 180    ret = blk_pdiscard(blk, -2, 512);
 181    g_assert_cmpint(ret, ==, -EIO);
 182}
 183
 184static void test_sync_op_truncate(BdrvChild *c)
 185{
 186    int ret;
 187
 188    /* Normal success path */
 189    ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
 190    g_assert_cmpint(ret, ==, 0);
 191
 192    /* Early error: Negative offset */
 193    ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
 194    g_assert_cmpint(ret, ==, -EINVAL);
 195
 196    /* Error: Read-only image */
 197    c->bs->read_only = true;
 198    c->bs->open_flags &= ~BDRV_O_RDWR;
 199
 200    ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
 201    g_assert_cmpint(ret, ==, -EACCES);
 202
 203    c->bs->read_only = false;
 204    c->bs->open_flags |= BDRV_O_RDWR;
 205}
 206
 207static void test_sync_op_block_status(BdrvChild *c)
 208{
 209    int ret;
 210    int64_t n;
 211
 212    /* Normal success path */
 213    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
 214    g_assert_cmpint(ret, ==, 0);
 215
 216    /* Early success: No driver support */
 217    bdrv_test.bdrv_co_block_status = NULL;
 218    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
 219    g_assert_cmpint(ret, ==, 1);
 220
 221    /* Early success: bytes = 0 */
 222    ret = bdrv_is_allocated(c->bs, 0, 0, &n);
 223    g_assert_cmpint(ret, ==, 0);
 224
 225    /* Early success: Offset > image size*/
 226    ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
 227    g_assert_cmpint(ret, ==, 0);
 228}
 229
 230static void test_sync_op_flush(BdrvChild *c)
 231{
 232    int ret;
 233
 234    /* Normal success path */
 235    ret = bdrv_flush(c->bs);
 236    g_assert_cmpint(ret, ==, 0);
 237
 238    /* Early success: Read-only image */
 239    c->bs->read_only = true;
 240    c->bs->open_flags &= ~BDRV_O_RDWR;
 241
 242    ret = bdrv_flush(c->bs);
 243    g_assert_cmpint(ret, ==, 0);
 244
 245    c->bs->read_only = false;
 246    c->bs->open_flags |= BDRV_O_RDWR;
 247}
 248
 249static void test_sync_op_blk_flush(BlockBackend *blk)
 250{
 251    BlockDriverState *bs = blk_bs(blk);
 252    int ret;
 253
 254    /* Normal success path */
 255    ret = blk_flush(blk);
 256    g_assert_cmpint(ret, ==, 0);
 257
 258    /* Early success: Read-only image */
 259    bs->read_only = true;
 260    bs->open_flags &= ~BDRV_O_RDWR;
 261
 262    ret = blk_flush(blk);
 263    g_assert_cmpint(ret, ==, 0);
 264
 265    bs->read_only = false;
 266    bs->open_flags |= BDRV_O_RDWR;
 267}
 268
 269static void test_sync_op_check(BdrvChild *c)
 270{
 271    BdrvCheckResult result;
 272    int ret;
 273
 274    /* Error: Driver does not implement check */
 275    ret = bdrv_check(c->bs, &result, 0);
 276    g_assert_cmpint(ret, ==, -ENOTSUP);
 277}
 278
 279static void test_sync_op_invalidate_cache(BdrvChild *c)
 280{
 281    /* Early success: Image is not inactive */
 282    bdrv_invalidate_cache(c->bs, NULL);
 283}
 284
 285
 286typedef struct SyncOpTest {
 287    const char *name;
 288    void (*fn)(BdrvChild *c);
 289    void (*blkfn)(BlockBackend *blk);
 290} SyncOpTest;
 291
 292const SyncOpTest sync_op_tests[] = {
 293    {
 294        .name   = "/sync-op/pread",
 295        .fn     = test_sync_op_pread,
 296        .blkfn  = test_sync_op_blk_pread,
 297    }, {
 298        .name   = "/sync-op/pwrite",
 299        .fn     = test_sync_op_pwrite,
 300        .blkfn  = test_sync_op_blk_pwrite,
 301    }, {
 302        .name   = "/sync-op/load_vmstate",
 303        .fn     = test_sync_op_load_vmstate,
 304    }, {
 305        .name   = "/sync-op/save_vmstate",
 306        .fn     = test_sync_op_save_vmstate,
 307    }, {
 308        .name   = "/sync-op/pdiscard",
 309        .fn     = test_sync_op_pdiscard,
 310        .blkfn  = test_sync_op_blk_pdiscard,
 311    }, {
 312        .name   = "/sync-op/truncate",
 313        .fn     = test_sync_op_truncate,
 314    }, {
 315        .name   = "/sync-op/block_status",
 316        .fn     = test_sync_op_block_status,
 317    }, {
 318        .name   = "/sync-op/flush",
 319        .fn     = test_sync_op_flush,
 320        .blkfn  = test_sync_op_blk_flush,
 321    }, {
 322        .name   = "/sync-op/check",
 323        .fn     = test_sync_op_check,
 324    }, {
 325        .name   = "/sync-op/invalidate_cache",
 326        .fn     = test_sync_op_invalidate_cache,
 327    },
 328};
 329
 330/* Test synchronous operations that run in a different iothread, so we have to
 331 * poll for the coroutine there to return. */
 332static void test_sync_op(const void *opaque)
 333{
 334    const SyncOpTest *t = opaque;
 335    IOThread *iothread = iothread_new();
 336    AioContext *ctx = iothread_get_aio_context(iothread);
 337    BlockBackend *blk;
 338    BlockDriverState *bs;
 339    BdrvChild *c;
 340
 341    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
 342    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
 343    bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
 344    blk_insert_bs(blk, bs, &error_abort);
 345    c = QLIST_FIRST(&bs->parents);
 346
 347    blk_set_aio_context(blk, ctx, &error_abort);
 348    aio_context_acquire(ctx);
 349    t->fn(c);
 350    if (t->blkfn) {
 351        t->blkfn(blk);
 352    }
 353    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
 354    aio_context_release(ctx);
 355
 356    bdrv_unref(bs);
 357    blk_unref(blk);
 358}
 359
 360typedef struct TestBlockJob {
 361    BlockJob common;
 362    bool should_complete;
 363    int n;
 364} TestBlockJob;
 365
 366static int test_job_prepare(Job *job)
 367{
 368    g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
 369    return 0;
 370}
 371
 372static int coroutine_fn test_job_run(Job *job, Error **errp)
 373{
 374    TestBlockJob *s = container_of(job, TestBlockJob, common.job);
 375
 376    job_transition_to_ready(&s->common.job);
 377    while (!s->should_complete) {
 378        s->n++;
 379        g_assert(qemu_get_current_aio_context() == job->aio_context);
 380
 381        /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
 382         * emulate some actual activity (probably some I/O) here so that the
 383         * drain involved in AioContext switches has to wait for this activity
 384         * to stop. */
 385        qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
 386
 387        job_pause_point(&s->common.job);
 388    }
 389
 390    g_assert(qemu_get_current_aio_context() == job->aio_context);
 391    return 0;
 392}
 393
 394static void test_job_complete(Job *job, Error **errp)
 395{
 396    TestBlockJob *s = container_of(job, TestBlockJob, common.job);
 397    s->should_complete = true;
 398}
 399
 400BlockJobDriver test_job_driver = {
 401    .job_driver = {
 402        .instance_size  = sizeof(TestBlockJob),
 403        .free           = block_job_free,
 404        .user_resume    = block_job_user_resume,
 405        .run            = test_job_run,
 406        .complete       = test_job_complete,
 407        .prepare        = test_job_prepare,
 408    },
 409};
 410
 411static void test_attach_blockjob(void)
 412{
 413    IOThread *iothread = iothread_new();
 414    AioContext *ctx = iothread_get_aio_context(iothread);
 415    BlockBackend *blk;
 416    BlockDriverState *bs;
 417    TestBlockJob *tjob;
 418
 419    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
 420    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
 421    blk_insert_bs(blk, bs, &error_abort);
 422
 423    tjob = block_job_create("job0", &test_job_driver, NULL, bs,
 424                            0, BLK_PERM_ALL,
 425                            0, 0, NULL, NULL, &error_abort);
 426    job_start(&tjob->common.job);
 427
 428    while (tjob->n == 0) {
 429        aio_poll(qemu_get_aio_context(), false);
 430    }
 431
 432    blk_set_aio_context(blk, ctx, &error_abort);
 433
 434    tjob->n = 0;
 435    while (tjob->n == 0) {
 436        aio_poll(qemu_get_aio_context(), false);
 437    }
 438
 439    aio_context_acquire(ctx);
 440    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
 441    aio_context_release(ctx);
 442
 443    tjob->n = 0;
 444    while (tjob->n == 0) {
 445        aio_poll(qemu_get_aio_context(), false);
 446    }
 447
 448    blk_set_aio_context(blk, ctx, &error_abort);
 449
 450    tjob->n = 0;
 451    while (tjob->n == 0) {
 452        aio_poll(qemu_get_aio_context(), false);
 453    }
 454
 455    aio_context_acquire(ctx);
 456    job_complete_sync(&tjob->common.job, &error_abort);
 457    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
 458    aio_context_release(ctx);
 459
 460    bdrv_unref(bs);
 461    blk_unref(blk);
 462}
 463
 464/*
 465 * Test that changing the AioContext for one node in a tree (here through blk)
 466 * changes all other nodes as well:
 467 *
 468 *  blk
 469 *   |
 470 *   |  bs_verify [blkverify]
 471 *   |   /               \
 472 *   |  /                 \
 473 *  bs_a [bdrv_test]    bs_b [bdrv_test]
 474 *
 475 */
 476static void test_propagate_basic(void)
 477{
 478    IOThread *iothread = iothread_new();
 479    AioContext *ctx = iothread_get_aio_context(iothread);
 480    AioContext *main_ctx;
 481    BlockBackend *blk;
 482    BlockDriverState *bs_a, *bs_b, *bs_verify;
 483    QDict *options;
 484
 485    /*
 486     * Create bs_a and its BlockBackend.  We cannot take the RESIZE
 487     * permission because blkverify will not share it on the test
 488     * image.
 489     */
 490    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
 491                  BLK_PERM_ALL);
 492    bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
 493    blk_insert_bs(blk, bs_a, &error_abort);
 494
 495    /* Create bs_b */
 496    bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
 497
 498    /* Create blkverify filter that references both bs_a and bs_b */
 499    options = qdict_new();
 500    qdict_put_str(options, "driver", "blkverify");
 501    qdict_put_str(options, "test", "bs_a");
 502    qdict_put_str(options, "raw", "bs_b");
 503
 504    bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 505
 506    /* Switch the AioContext */
 507    blk_set_aio_context(blk, ctx, &error_abort);
 508    g_assert(blk_get_aio_context(blk) == ctx);
 509    g_assert(bdrv_get_aio_context(bs_a) == ctx);
 510    g_assert(bdrv_get_aio_context(bs_verify) == ctx);
 511    g_assert(bdrv_get_aio_context(bs_b) == ctx);
 512
 513    /* Switch the AioContext back */
 514    main_ctx = qemu_get_aio_context();
 515    aio_context_acquire(ctx);
 516    blk_set_aio_context(blk, main_ctx, &error_abort);
 517    aio_context_release(ctx);
 518    g_assert(blk_get_aio_context(blk) == main_ctx);
 519    g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
 520    g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
 521    g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
 522
 523    bdrv_unref(bs_verify);
 524    bdrv_unref(bs_b);
 525    bdrv_unref(bs_a);
 526    blk_unref(blk);
 527}
 528
 529/*
 530 * Test that diamonds in the graph don't lead to endless recursion:
 531 *
 532 *              blk
 533 *               |
 534 *      bs_verify [blkverify]
 535 *       /              \
 536 *      /                \
 537 *   bs_b [raw]         bs_c[raw]
 538 *      \                /
 539 *       \              /
 540 *       bs_a [bdrv_test]
 541 */
 542static void test_propagate_diamond(void)
 543{
 544    IOThread *iothread = iothread_new();
 545    AioContext *ctx = iothread_get_aio_context(iothread);
 546    AioContext *main_ctx;
 547    BlockBackend *blk;
 548    BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
 549    QDict *options;
 550
 551    /* Create bs_a */
 552    bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
 553
 554    /* Create bs_b and bc_c */
 555    options = qdict_new();
 556    qdict_put_str(options, "driver", "raw");
 557    qdict_put_str(options, "file", "bs_a");
 558    qdict_put_str(options, "node-name", "bs_b");
 559    bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 560
 561    options = qdict_new();
 562    qdict_put_str(options, "driver", "raw");
 563    qdict_put_str(options, "file", "bs_a");
 564    qdict_put_str(options, "node-name", "bs_c");
 565    bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 566
 567    /* Create blkverify filter that references both bs_b and bs_c */
 568    options = qdict_new();
 569    qdict_put_str(options, "driver", "blkverify");
 570    qdict_put_str(options, "test", "bs_b");
 571    qdict_put_str(options, "raw", "bs_c");
 572
 573    bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 574    /*
 575     * Do not take the RESIZE permission: This would require the same
 576     * from bs_c and thus from bs_a; however, blkverify will not share
 577     * it on bs_b, and thus it will not be available for bs_a.
 578     */
 579    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
 580                  BLK_PERM_ALL);
 581    blk_insert_bs(blk, bs_verify, &error_abort);
 582
 583    /* Switch the AioContext */
 584    blk_set_aio_context(blk, ctx, &error_abort);
 585    g_assert(blk_get_aio_context(blk) == ctx);
 586    g_assert(bdrv_get_aio_context(bs_verify) == ctx);
 587    g_assert(bdrv_get_aio_context(bs_a) == ctx);
 588    g_assert(bdrv_get_aio_context(bs_b) == ctx);
 589    g_assert(bdrv_get_aio_context(bs_c) == ctx);
 590
 591    /* Switch the AioContext back */
 592    main_ctx = qemu_get_aio_context();
 593    aio_context_acquire(ctx);
 594    blk_set_aio_context(blk, main_ctx, &error_abort);
 595    aio_context_release(ctx);
 596    g_assert(blk_get_aio_context(blk) == main_ctx);
 597    g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
 598    g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
 599    g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
 600    g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
 601
 602    blk_unref(blk);
 603    bdrv_unref(bs_verify);
 604    bdrv_unref(bs_c);
 605    bdrv_unref(bs_b);
 606    bdrv_unref(bs_a);
 607}
 608
 609static void test_propagate_mirror(void)
 610{
 611    IOThread *iothread = iothread_new();
 612    AioContext *ctx = iothread_get_aio_context(iothread);
 613    AioContext *main_ctx = qemu_get_aio_context();
 614    BlockDriverState *src, *target, *filter;
 615    BlockBackend *blk;
 616    Job *job;
 617    Error *local_err = NULL;
 618
 619    /* Create src and target*/
 620    src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
 621    target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
 622                                  &error_abort);
 623
 624    /* Start a mirror job */
 625    mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
 626                 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
 627                 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
 628                 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
 629                 &error_abort);
 630    job = job_get("job0");
 631    filter = bdrv_find_node("filter_node");
 632
 633    /* Change the AioContext of src */
 634    bdrv_try_set_aio_context(src, ctx, &error_abort);
 635    g_assert(bdrv_get_aio_context(src) == ctx);
 636    g_assert(bdrv_get_aio_context(target) == ctx);
 637    g_assert(bdrv_get_aio_context(filter) == ctx);
 638    g_assert(job->aio_context == ctx);
 639
 640    /* Change the AioContext of target */
 641    aio_context_acquire(ctx);
 642    bdrv_try_set_aio_context(target, main_ctx, &error_abort);
 643    aio_context_release(ctx);
 644    g_assert(bdrv_get_aio_context(src) == main_ctx);
 645    g_assert(bdrv_get_aio_context(target) == main_ctx);
 646    g_assert(bdrv_get_aio_context(filter) == main_ctx);
 647
 648    /* With a BlockBackend on src, changing target must fail */
 649    blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
 650    blk_insert_bs(blk, src, &error_abort);
 651
 652    bdrv_try_set_aio_context(target, ctx, &local_err);
 653    error_free_or_abort(&local_err);
 654
 655    g_assert(blk_get_aio_context(blk) == main_ctx);
 656    g_assert(bdrv_get_aio_context(src) == main_ctx);
 657    g_assert(bdrv_get_aio_context(target) == main_ctx);
 658    g_assert(bdrv_get_aio_context(filter) == main_ctx);
 659
 660    /* ...unless we explicitly allow it */
 661    aio_context_acquire(ctx);
 662    blk_set_allow_aio_context_change(blk, true);
 663    bdrv_try_set_aio_context(target, ctx, &error_abort);
 664    aio_context_release(ctx);
 665
 666    g_assert(blk_get_aio_context(blk) == ctx);
 667    g_assert(bdrv_get_aio_context(src) == ctx);
 668    g_assert(bdrv_get_aio_context(target) == ctx);
 669    g_assert(bdrv_get_aio_context(filter) == ctx);
 670
 671    job_cancel_sync_all();
 672
 673    aio_context_acquire(ctx);
 674    blk_set_aio_context(blk, main_ctx, &error_abort);
 675    bdrv_try_set_aio_context(target, main_ctx, &error_abort);
 676    aio_context_release(ctx);
 677
 678    blk_unref(blk);
 679    bdrv_unref(src);
 680    bdrv_unref(target);
 681}
 682
 683static void test_attach_second_node(void)
 684{
 685    IOThread *iothread = iothread_new();
 686    AioContext *ctx = iothread_get_aio_context(iothread);
 687    AioContext *main_ctx = qemu_get_aio_context();
 688    BlockBackend *blk;
 689    BlockDriverState *bs, *filter;
 690    QDict *options;
 691
 692    blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
 693    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
 694    blk_insert_bs(blk, bs, &error_abort);
 695
 696    options = qdict_new();
 697    qdict_put_str(options, "driver", "raw");
 698    qdict_put_str(options, "file", "base");
 699
 700    filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 701    g_assert(blk_get_aio_context(blk) == ctx);
 702    g_assert(bdrv_get_aio_context(bs) == ctx);
 703    g_assert(bdrv_get_aio_context(filter) == ctx);
 704
 705    aio_context_acquire(ctx);
 706    blk_set_aio_context(blk, main_ctx, &error_abort);
 707    aio_context_release(ctx);
 708    g_assert(blk_get_aio_context(blk) == main_ctx);
 709    g_assert(bdrv_get_aio_context(bs) == main_ctx);
 710    g_assert(bdrv_get_aio_context(filter) == main_ctx);
 711
 712    bdrv_unref(filter);
 713    bdrv_unref(bs);
 714    blk_unref(blk);
 715}
 716
 717static void test_attach_preserve_blk_ctx(void)
 718{
 719    IOThread *iothread = iothread_new();
 720    AioContext *ctx = iothread_get_aio_context(iothread);
 721    BlockBackend *blk;
 722    BlockDriverState *bs;
 723
 724    blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
 725    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
 726    bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
 727
 728    /* Add node to BlockBackend that has an iothread context assigned */
 729    blk_insert_bs(blk, bs, &error_abort);
 730    g_assert(blk_get_aio_context(blk) == ctx);
 731    g_assert(bdrv_get_aio_context(bs) == ctx);
 732
 733    /* Remove the node again */
 734    aio_context_acquire(ctx);
 735    blk_remove_bs(blk);
 736    aio_context_release(ctx);
 737    g_assert(blk_get_aio_context(blk) == ctx);
 738    g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
 739
 740    /* Re-attach the node */
 741    blk_insert_bs(blk, bs, &error_abort);
 742    g_assert(blk_get_aio_context(blk) == ctx);
 743    g_assert(bdrv_get_aio_context(bs) == ctx);
 744
 745    aio_context_acquire(ctx);
 746    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
 747    aio_context_release(ctx);
 748    bdrv_unref(bs);
 749    blk_unref(blk);
 750}
 751
 752int main(int argc, char **argv)
 753{
 754    int i;
 755
 756    bdrv_init();
 757    qemu_init_main_loop(&error_abort);
 758
 759    g_test_init(&argc, &argv, NULL);
 760
 761    for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
 762        const SyncOpTest *t = &sync_op_tests[i];
 763        g_test_add_data_func(t->name, t, test_sync_op);
 764    }
 765
 766    g_test_add_func("/attach/blockjob", test_attach_blockjob);
 767    g_test_add_func("/attach/second_node", test_attach_second_node);
 768    g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
 769    g_test_add_func("/propagate/basic", test_propagate_basic);
 770    g_test_add_func("/propagate/diamond", test_propagate_diamond);
 771    g_test_add_func("/propagate/mirror", test_propagate_mirror);
 772
 773    return g_test_run();
 774}
 775