qemu/tests/unit/test-blockjob.c
<<
>>
Prefs
   1/*
   2 * Blockjob tests
   3 *
   4 * Copyright Igalia, S.L. 2016
   5 *
   6 * Authors:
   7 *  Alberto Garcia   <berto@igalia.com>
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  10 * See the COPYING.LIB file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "qapi/error.h"
  15#include "qemu/main-loop.h"
  16#include "block/blockjob_int.h"
  17#include "sysemu/block-backend.h"
  18#include "qapi/qmp/qdict.h"
  19#include "iothread.h"
  20
  21static const BlockJobDriver test_block_job_driver = {
  22    .job_driver = {
  23        .instance_size = sizeof(BlockJob),
  24        .free          = block_job_free,
  25        .user_resume   = block_job_user_resume,
  26    },
  27};
  28
  29static void block_job_cb(void *opaque, int ret)
  30{
  31}
  32
  33static BlockJob *mk_job(BlockBackend *blk, const char *id,
  34                        const BlockJobDriver *drv, bool should_succeed,
  35                        int flags)
  36{
  37    BlockJob *job;
  38    Error *err = NULL;
  39
  40    job = block_job_create(id, drv, NULL, blk_bs(blk),
  41                           0, BLK_PERM_ALL, 0, flags, block_job_cb,
  42                           NULL, &err);
  43    if (should_succeed) {
  44        g_assert_null(err);
  45        g_assert_nonnull(job);
  46        if (id) {
  47            g_assert_cmpstr(job->job.id, ==, id);
  48        } else {
  49            g_assert_cmpstr(job->job.id, ==, blk_name(blk));
  50        }
  51    } else {
  52        error_free_or_abort(&err);
  53        g_assert_null(job);
  54    }
  55
  56    return job;
  57}
  58
  59static BlockJob *do_test_id(BlockBackend *blk, const char *id,
  60                            bool should_succeed)
  61{
  62    return mk_job(blk, id, &test_block_job_driver,
  63                  should_succeed, JOB_DEFAULT);
  64}
  65
  66/* This creates a BlockBackend (optionally with a name) with a
  67 * BlockDriverState inserted. */
  68static BlockBackend *create_blk(const char *name)
  69{
  70    /* No I/O is performed on this device */
  71    BlockBackend *blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  72    BlockDriverState *bs;
  73
  74    QDict *opt = qdict_new();
  75    qdict_put_str(opt, "file.read-zeroes", "on");
  76    bs = bdrv_open("null-co://", NULL, opt, 0, &error_abort);
  77    g_assert_nonnull(bs);
  78
  79    blk_insert_bs(blk, bs, &error_abort);
  80    bdrv_unref(bs);
  81
  82    if (name) {
  83        Error *err = NULL;
  84        monitor_add_blk(blk, name, &err);
  85        g_assert_null(err);
  86    }
  87
  88    return blk;
  89}
  90
  91/* This destroys the backend */
  92static void destroy_blk(BlockBackend *blk)
  93{
  94    if (blk_name(blk)[0] != '\0') {
  95        monitor_remove_blk(blk);
  96    }
  97
  98    blk_remove_bs(blk);
  99    blk_unref(blk);
 100}
 101
 102static void test_job_ids(void)
 103{
 104    BlockBackend *blk[3];
 105    BlockJob *job[3];
 106
 107    blk[0] = create_blk(NULL);
 108    blk[1] = create_blk("drive1");
 109    blk[2] = create_blk("drive2");
 110
 111    /* No job ID provided and the block backend has no name */
 112    job[0] = do_test_id(blk[0], NULL, false);
 113
 114    /* These are all invalid job IDs */
 115    job[0] = do_test_id(blk[0], "0id", false);
 116    job[0] = do_test_id(blk[0], "",    false);
 117    job[0] = do_test_id(blk[0], "   ", false);
 118    job[0] = do_test_id(blk[0], "123", false);
 119    job[0] = do_test_id(blk[0], "_id", false);
 120    job[0] = do_test_id(blk[0], "-id", false);
 121    job[0] = do_test_id(blk[0], ".id", false);
 122    job[0] = do_test_id(blk[0], "#id", false);
 123
 124    /* This one is valid */
 125    job[0] = do_test_id(blk[0], "id0", true);
 126
 127    /* We can have two jobs in the same BDS */
 128    job[1] = do_test_id(blk[0], "id1", true);
 129    job_early_fail(&job[1]->job);
 130
 131    /* Duplicate job IDs are not allowed */
 132    job[1] = do_test_id(blk[1], "id0", false);
 133
 134    /* But once job[0] finishes we can reuse its ID */
 135    job_early_fail(&job[0]->job);
 136    job[1] = do_test_id(blk[1], "id0", true);
 137
 138    /* No job ID specified, defaults to the backend name ('drive1') */
 139    job_early_fail(&job[1]->job);
 140    job[1] = do_test_id(blk[1], NULL, true);
 141
 142    /* Duplicate job ID */
 143    job[2] = do_test_id(blk[2], "drive1", false);
 144
 145    /* The ID of job[2] would default to 'drive2' but it is already in use */
 146    job[0] = do_test_id(blk[0], "drive2", true);
 147    job[2] = do_test_id(blk[2], NULL, false);
 148
 149    /* This one is valid */
 150    job[2] = do_test_id(blk[2], "id_2", true);
 151
 152    job_early_fail(&job[0]->job);
 153    job_early_fail(&job[1]->job);
 154    job_early_fail(&job[2]->job);
 155
 156    destroy_blk(blk[0]);
 157    destroy_blk(blk[1]);
 158    destroy_blk(blk[2]);
 159}
 160
 161typedef struct CancelJob {
 162    BlockJob common;
 163    BlockBackend *blk;
 164    bool should_converge;
 165    bool should_complete;
 166} CancelJob;
 167
 168static void cancel_job_complete(Job *job, Error **errp)
 169{
 170    CancelJob *s = container_of(job, CancelJob, common.job);
 171    s->should_complete = true;
 172}
 173
 174static int coroutine_fn cancel_job_run(Job *job, Error **errp)
 175{
 176    CancelJob *s = container_of(job, CancelJob, common.job);
 177
 178    while (!s->should_complete) {
 179        if (job_is_cancelled(&s->common.job)) {
 180            return 0;
 181        }
 182
 183        if (!job_is_ready(&s->common.job) && s->should_converge) {
 184            job_transition_to_ready(&s->common.job);
 185        }
 186
 187        job_sleep_ns(&s->common.job, 100000);
 188    }
 189
 190    return 0;
 191}
 192
 193static const BlockJobDriver test_cancel_driver = {
 194    .job_driver = {
 195        .instance_size = sizeof(CancelJob),
 196        .free          = block_job_free,
 197        .user_resume   = block_job_user_resume,
 198        .run           = cancel_job_run,
 199        .complete      = cancel_job_complete,
 200    },
 201};
 202
 203static CancelJob *create_common(Job **pjob)
 204{
 205    BlockBackend *blk;
 206    Job *job;
 207    BlockJob *bjob;
 208    CancelJob *s;
 209
 210    blk = create_blk(NULL);
 211    bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
 212                  JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
 213    job = &bjob->job;
 214    job_ref(job);
 215    assert(job->status == JOB_STATUS_CREATED);
 216    s = container_of(bjob, CancelJob, common);
 217    s->blk = blk;
 218
 219    *pjob = job;
 220    return s;
 221}
 222
 223static void cancel_common(CancelJob *s)
 224{
 225    BlockJob *job = &s->common;
 226    BlockBackend *blk = s->blk;
 227    JobStatus sts = job->job.status;
 228    AioContext *ctx;
 229
 230    ctx = job->job.aio_context;
 231    aio_context_acquire(ctx);
 232
 233    job_cancel_sync(&job->job, true);
 234    if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
 235        Job *dummy = &job->job;
 236        job_dismiss(&dummy, &error_abort);
 237    }
 238    assert(job->job.status == JOB_STATUS_NULL);
 239    job_unref(&job->job);
 240    destroy_blk(blk);
 241
 242    aio_context_release(ctx);
 243}
 244
 245static void test_cancel_created(void)
 246{
 247    Job *job;
 248    CancelJob *s;
 249
 250    s = create_common(&job);
 251    cancel_common(s);
 252}
 253
 254static void test_cancel_running(void)
 255{
 256    Job *job;
 257    CancelJob *s;
 258
 259    s = create_common(&job);
 260
 261    job_start(job);
 262    assert(job->status == JOB_STATUS_RUNNING);
 263
 264    cancel_common(s);
 265}
 266
 267static void test_cancel_paused(void)
 268{
 269    Job *job;
 270    CancelJob *s;
 271
 272    s = create_common(&job);
 273
 274    job_start(job);
 275    assert(job->status == JOB_STATUS_RUNNING);
 276
 277    job_user_pause(job, &error_abort);
 278    job_enter(job);
 279    assert(job->status == JOB_STATUS_PAUSED);
 280
 281    cancel_common(s);
 282}
 283
 284static void test_cancel_ready(void)
 285{
 286    Job *job;
 287    CancelJob *s;
 288
 289    s = create_common(&job);
 290
 291    job_start(job);
 292    assert(job->status == JOB_STATUS_RUNNING);
 293
 294    s->should_converge = true;
 295    job_enter(job);
 296    assert(job->status == JOB_STATUS_READY);
 297
 298    cancel_common(s);
 299}
 300
 301static void test_cancel_standby(void)
 302{
 303    Job *job;
 304    CancelJob *s;
 305
 306    s = create_common(&job);
 307
 308    job_start(job);
 309    assert(job->status == JOB_STATUS_RUNNING);
 310
 311    s->should_converge = true;
 312    job_enter(job);
 313    assert(job->status == JOB_STATUS_READY);
 314
 315    job_user_pause(job, &error_abort);
 316    job_enter(job);
 317    assert(job->status == JOB_STATUS_STANDBY);
 318
 319    cancel_common(s);
 320}
 321
 322static void test_cancel_pending(void)
 323{
 324    Job *job;
 325    CancelJob *s;
 326
 327    s = create_common(&job);
 328
 329    job_start(job);
 330    assert(job->status == JOB_STATUS_RUNNING);
 331
 332    s->should_converge = true;
 333    job_enter(job);
 334    assert(job->status == JOB_STATUS_READY);
 335
 336    job_complete(job, &error_abort);
 337    job_enter(job);
 338    while (!job->deferred_to_main_loop) {
 339        aio_poll(qemu_get_aio_context(), true);
 340    }
 341    assert(job->status == JOB_STATUS_READY);
 342    aio_poll(qemu_get_aio_context(), true);
 343    assert(job->status == JOB_STATUS_PENDING);
 344
 345    cancel_common(s);
 346}
 347
 348static void test_cancel_concluded(void)
 349{
 350    Job *job;
 351    CancelJob *s;
 352
 353    s = create_common(&job);
 354
 355    job_start(job);
 356    assert(job->status == JOB_STATUS_RUNNING);
 357
 358    s->should_converge = true;
 359    job_enter(job);
 360    assert(job->status == JOB_STATUS_READY);
 361
 362    job_complete(job, &error_abort);
 363    job_enter(job);
 364    while (!job->deferred_to_main_loop) {
 365        aio_poll(qemu_get_aio_context(), true);
 366    }
 367    assert(job->status == JOB_STATUS_READY);
 368    aio_poll(qemu_get_aio_context(), true);
 369    assert(job->status == JOB_STATUS_PENDING);
 370
 371    aio_context_acquire(job->aio_context);
 372    job_finalize(job, &error_abort);
 373    aio_context_release(job->aio_context);
 374    assert(job->status == JOB_STATUS_CONCLUDED);
 375
 376    cancel_common(s);
 377}
 378
 379/* (See test_yielding_driver for the job description) */
 380typedef struct YieldingJob {
 381    BlockJob common;
 382    bool should_complete;
 383} YieldingJob;
 384
 385static void yielding_job_complete(Job *job, Error **errp)
 386{
 387    YieldingJob *s = container_of(job, YieldingJob, common.job);
 388    s->should_complete = true;
 389    job_enter(job);
 390}
 391
 392static int coroutine_fn yielding_job_run(Job *job, Error **errp)
 393{
 394    YieldingJob *s = container_of(job, YieldingJob, common.job);
 395
 396    job_transition_to_ready(job);
 397
 398    while (!s->should_complete) {
 399        job_yield(job);
 400    }
 401
 402    return 0;
 403}
 404
 405/*
 406 * This job transitions immediately to the READY state, and then
 407 * yields until it is to complete.
 408 */
 409static const BlockJobDriver test_yielding_driver = {
 410    .job_driver = {
 411        .instance_size  = sizeof(YieldingJob),
 412        .free           = block_job_free,
 413        .user_resume    = block_job_user_resume,
 414        .run            = yielding_job_run,
 415        .complete       = yielding_job_complete,
 416    },
 417};
 418
 419/*
 420 * Test that job_complete() works even on jobs that are in a paused
 421 * state (i.e., STANDBY).
 422 *
 423 * To do this, run YieldingJob in an IO thread, get it into the READY
 424 * state, then have a drained section.  Before ending the section,
 425 * acquire the context so the job will not be entered and will thus
 426 * remain on STANDBY.
 427 *
 428 * job_complete() should still work without error.
 429 *
 430 * Note that on the QMP interface, it is impossible to lock an IO
 431 * thread before a drained section ends.  In practice, the
 432 * bdrv_drain_all_end() and the aio_context_acquire() will be
 433 * reversed.  However, that makes for worse reproducibility here:
 434 * Sometimes, the job would no longer be in STANDBY then but already
 435 * be started.  We cannot prevent that, because the IO thread runs
 436 * concurrently.  We can only prevent it by taking the lock before
 437 * ending the drained section, so we do that.
 438 *
 439 * (You can reverse the order of operations and most of the time the
 440 * test will pass, but sometimes the assert(status == STANDBY) will
 441 * fail.)
 442 */
 443static void test_complete_in_standby(void)
 444{
 445    BlockBackend *blk;
 446    IOThread *iothread;
 447    AioContext *ctx;
 448    Job *job;
 449    BlockJob *bjob;
 450
 451    /* Create a test drive, move it to an IO thread */
 452    blk = create_blk(NULL);
 453    iothread = iothread_new();
 454
 455    ctx = iothread_get_aio_context(iothread);
 456    blk_set_aio_context(blk, ctx, &error_abort);
 457
 458    /* Create our test job */
 459    bjob = mk_job(blk, "job", &test_yielding_driver, true,
 460                  JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
 461    job = &bjob->job;
 462    assert(job->status == JOB_STATUS_CREATED);
 463
 464    /* Wait for the job to become READY */
 465    job_start(job);
 466    aio_context_acquire(ctx);
 467    AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
 468    aio_context_release(ctx);
 469
 470    /* Begin the drained section, pausing the job */
 471    bdrv_drain_all_begin();
 472    assert(job->status == JOB_STATUS_STANDBY);
 473    /* Lock the IO thread to prevent the job from being run */
 474    aio_context_acquire(ctx);
 475    /* This will schedule the job to resume it */
 476    bdrv_drain_all_end();
 477
 478    /* But the job cannot run, so it will remain on standby */
 479    assert(job->status == JOB_STATUS_STANDBY);
 480
 481    /* Even though the job is on standby, this should work */
 482    job_complete(job, &error_abort);
 483
 484    /* The test is done now, clean up. */
 485    job_finish_sync(job, NULL, &error_abort);
 486    assert(job->status == JOB_STATUS_PENDING);
 487
 488    job_finalize(job, &error_abort);
 489    assert(job->status == JOB_STATUS_CONCLUDED);
 490
 491    job_dismiss(&job, &error_abort);
 492
 493    destroy_blk(blk);
 494    aio_context_release(ctx);
 495    iothread_join(iothread);
 496}
 497
 498int main(int argc, char **argv)
 499{
 500    qemu_init_main_loop(&error_abort);
 501    bdrv_init();
 502
 503    g_test_init(&argc, &argv, NULL);
 504    g_test_add_func("/blockjob/ids", test_job_ids);
 505    g_test_add_func("/blockjob/cancel/created", test_cancel_created);
 506    g_test_add_func("/blockjob/cancel/running", test_cancel_running);
 507    g_test_add_func("/blockjob/cancel/paused", test_cancel_paused);
 508    g_test_add_func("/blockjob/cancel/ready", test_cancel_ready);
 509    g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
 510    g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
 511    g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
 512    g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
 513    return g_test_run();
 514}
 515