qemu/job.c
<<
>>
Prefs
   1/*
   2 * Background jobs (long-running operations)
   3 *
   4 * Copyright (c) 2011 IBM Corp.
   5 * Copyright (c) 2012, 2018 Red Hat, Inc.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25
  26#include "qemu/osdep.h"
  27#include "qapi/error.h"
  28#include "qemu/job.h"
  29#include "qemu/id.h"
  30#include "qemu/main-loop.h"
  31#include "block/aio-wait.h"
  32#include "trace/trace-root.h"
  33#include "qapi/qapi-events-job.h"
  34
  35static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
  36
  37/* Job State Transition Table */
  38bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = {
  39                                    /* U, C, R, P, Y, S, W, D, X, E, N */
  40    /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
  41    /* C: */ [JOB_STATUS_CREATED]   = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
  42    /* R: */ [JOB_STATUS_RUNNING]   = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
  43    /* P: */ [JOB_STATUS_PAUSED]    = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
  44    /* Y: */ [JOB_STATUS_READY]     = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
  45    /* S: */ [JOB_STATUS_STANDBY]   = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
  46    /* W: */ [JOB_STATUS_WAITING]   = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
  47    /* D: */ [JOB_STATUS_PENDING]   = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
  48    /* X: */ [JOB_STATUS_ABORTING]  = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
  49    /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
  50    /* N: */ [JOB_STATUS_NULL]      = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
  51};
  52
  53bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = {
  54                                    /* U, C, R, P, Y, S, W, D, X, E, N */
  55    [JOB_VERB_CANCEL]               = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
  56    [JOB_VERB_PAUSE]                = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
  57    [JOB_VERB_RESUME]               = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
  58    [JOB_VERB_SET_SPEED]            = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
  59    [JOB_VERB_COMPLETE]             = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
  60    [JOB_VERB_FINALIZE]             = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
  61    [JOB_VERB_DISMISS]              = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
  62};
  63
  64/* Transactional group of jobs */
  65struct JobTxn {
  66
  67    /* Is this txn being cancelled? */
  68    bool aborting;
  69
  70    /* List of jobs */
  71    QLIST_HEAD(, Job) jobs;
  72
  73    /* Reference count */
  74    int refcnt;
  75};
  76
  77/* Right now, this mutex is only needed to synchronize accesses to job->busy
  78 * and job->sleep_timer, such as concurrent calls to job_do_yield and
  79 * job_enter. */
  80static QemuMutex job_mutex;
  81
  82static void job_lock(void)
  83{
  84    qemu_mutex_lock(&job_mutex);
  85}
  86
  87static void job_unlock(void)
  88{
  89    qemu_mutex_unlock(&job_mutex);
  90}
  91
  92static void __attribute__((__constructor__)) job_init(void)
  93{
  94    qemu_mutex_init(&job_mutex);
  95}
  96
  97JobTxn *job_txn_new(void)
  98{
  99    JobTxn *txn = g_new0(JobTxn, 1);
 100    QLIST_INIT(&txn->jobs);
 101    txn->refcnt = 1;
 102    return txn;
 103}
 104
 105static void job_txn_ref(JobTxn *txn)
 106{
 107    txn->refcnt++;
 108}
 109
 110void job_txn_unref(JobTxn *txn)
 111{
 112    if (txn && --txn->refcnt == 0) {
 113        g_free(txn);
 114    }
 115}
 116
 117void job_txn_add_job(JobTxn *txn, Job *job)
 118{
 119    if (!txn) {
 120        return;
 121    }
 122
 123    assert(!job->txn);
 124    job->txn = txn;
 125
 126    QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
 127    job_txn_ref(txn);
 128}
 129
 130static void job_txn_del_job(Job *job)
 131{
 132    if (job->txn) {
 133        QLIST_REMOVE(job, txn_list);
 134        job_txn_unref(job->txn);
 135        job->txn = NULL;
 136    }
 137}
 138
 139static int job_txn_apply(Job *job, int fn(Job *))
 140{
 141    AioContext *inner_ctx;
 142    Job *other_job, *next;
 143    JobTxn *txn = job->txn;
 144    int rc = 0;
 145
 146    /*
 147     * Similar to job_completed_txn_abort, we take each job's lock before
 148     * applying fn, but since we assume that outer_ctx is held by the caller,
 149     * we need to release it here to avoid holding the lock twice - which would
 150     * break AIO_WAIT_WHILE from within fn.
 151     */
 152    job_ref(job);
 153    aio_context_release(job->aio_context);
 154
 155    QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
 156        inner_ctx = other_job->aio_context;
 157        aio_context_acquire(inner_ctx);
 158        rc = fn(other_job);
 159        aio_context_release(inner_ctx);
 160        if (rc) {
 161            break;
 162        }
 163    }
 164
 165    /*
 166     * Note that job->aio_context might have been changed by calling fn, so we
 167     * can't use a local variable to cache it.
 168     */
 169    aio_context_acquire(job->aio_context);
 170    job_unref(job);
 171    return rc;
 172}
 173
 174bool job_is_internal(Job *job)
 175{
 176    return (job->id == NULL);
 177}
 178
 179static void job_state_transition(Job *job, JobStatus s1)
 180{
 181    JobStatus s0 = job->status;
 182    assert(s1 >= 0 && s1 < JOB_STATUS__MAX);
 183    trace_job_state_transition(job, job->ret,
 184                               JobSTT[s0][s1] ? "allowed" : "disallowed",
 185                               JobStatus_str(s0), JobStatus_str(s1));
 186    assert(JobSTT[s0][s1]);
 187    job->status = s1;
 188
 189    if (!job_is_internal(job) && s1 != s0) {
 190        qapi_event_send_job_status_change(job->id, job->status);
 191    }
 192}
 193
 194int job_apply_verb(Job *job, JobVerb verb, Error **errp)
 195{
 196    JobStatus s0 = job->status;
 197    assert(verb >= 0 && verb < JOB_VERB__MAX);
 198    trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb),
 199                         JobVerbTable[verb][s0] ? "allowed" : "prohibited");
 200    if (JobVerbTable[verb][s0]) {
 201        return 0;
 202    }
 203    error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
 204               job->id, JobStatus_str(s0), JobVerb_str(verb));
 205    return -EPERM;
 206}
 207
 208JobType job_type(const Job *job)
 209{
 210    return job->driver->job_type;
 211}
 212
 213const char *job_type_str(const Job *job)
 214{
 215    return JobType_str(job_type(job));
 216}
 217
 218bool job_is_cancelled(Job *job)
 219{
 220    return job->cancelled;
 221}
 222
 223bool job_is_ready(Job *job)
 224{
 225    switch (job->status) {
 226    case JOB_STATUS_UNDEFINED:
 227    case JOB_STATUS_CREATED:
 228    case JOB_STATUS_RUNNING:
 229    case JOB_STATUS_PAUSED:
 230    case JOB_STATUS_WAITING:
 231    case JOB_STATUS_PENDING:
 232    case JOB_STATUS_ABORTING:
 233    case JOB_STATUS_CONCLUDED:
 234    case JOB_STATUS_NULL:
 235        return false;
 236    case JOB_STATUS_READY:
 237    case JOB_STATUS_STANDBY:
 238        return true;
 239    default:
 240        g_assert_not_reached();
 241    }
 242    return false;
 243}
 244
 245bool job_is_completed(Job *job)
 246{
 247    switch (job->status) {
 248    case JOB_STATUS_UNDEFINED:
 249    case JOB_STATUS_CREATED:
 250    case JOB_STATUS_RUNNING:
 251    case JOB_STATUS_PAUSED:
 252    case JOB_STATUS_READY:
 253    case JOB_STATUS_STANDBY:
 254        return false;
 255    case JOB_STATUS_WAITING:
 256    case JOB_STATUS_PENDING:
 257    case JOB_STATUS_ABORTING:
 258    case JOB_STATUS_CONCLUDED:
 259    case JOB_STATUS_NULL:
 260        return true;
 261    default:
 262        g_assert_not_reached();
 263    }
 264    return false;
 265}
 266
 267static bool job_started(Job *job)
 268{
 269    return job->co;
 270}
 271
 272static bool job_should_pause(Job *job)
 273{
 274    return job->pause_count > 0;
 275}
 276
 277Job *job_next(Job *job)
 278{
 279    if (!job) {
 280        return QLIST_FIRST(&jobs);
 281    }
 282    return QLIST_NEXT(job, job_list);
 283}
 284
 285Job *job_get(const char *id)
 286{
 287    Job *job;
 288
 289    QLIST_FOREACH(job, &jobs, job_list) {
 290        if (job->id && !strcmp(id, job->id)) {
 291            return job;
 292        }
 293    }
 294
 295    return NULL;
 296}
 297
 298static void job_sleep_timer_cb(void *opaque)
 299{
 300    Job *job = opaque;
 301
 302    job_enter(job);
 303}
 304
 305void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
 306                 AioContext *ctx, int flags, BlockCompletionFunc *cb,
 307                 void *opaque, Error **errp)
 308{
 309    Job *job;
 310
 311    if (job_id) {
 312        if (flags & JOB_INTERNAL) {
 313            error_setg(errp, "Cannot specify job ID for internal job");
 314            return NULL;
 315        }
 316        if (!id_wellformed(job_id)) {
 317            error_setg(errp, "Invalid job ID '%s'", job_id);
 318            return NULL;
 319        }
 320        if (job_get(job_id)) {
 321            error_setg(errp, "Job ID '%s' already in use", job_id);
 322            return NULL;
 323        }
 324    } else if (!(flags & JOB_INTERNAL)) {
 325        error_setg(errp, "An explicit job ID is required");
 326        return NULL;
 327    }
 328
 329    job = g_malloc0(driver->instance_size);
 330    job->driver        = driver;
 331    job->id            = g_strdup(job_id);
 332    job->refcnt        = 1;
 333    job->aio_context   = ctx;
 334    job->busy          = false;
 335    job->paused        = true;
 336    job->pause_count   = 1;
 337    job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE);
 338    job->auto_dismiss  = !(flags & JOB_MANUAL_DISMISS);
 339    job->cb            = cb;
 340    job->opaque        = opaque;
 341
 342    notifier_list_init(&job->on_finalize_cancelled);
 343    notifier_list_init(&job->on_finalize_completed);
 344    notifier_list_init(&job->on_pending);
 345    notifier_list_init(&job->on_ready);
 346
 347    job_state_transition(job, JOB_STATUS_CREATED);
 348    aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
 349                   QEMU_CLOCK_REALTIME, SCALE_NS,
 350                   job_sleep_timer_cb, job);
 351
 352    QLIST_INSERT_HEAD(&jobs, job, job_list);
 353
 354    /* Single jobs are modeled as single-job transactions for sake of
 355     * consolidating the job management logic */
 356    if (!txn) {
 357        txn = job_txn_new();
 358        job_txn_add_job(txn, job);
 359        job_txn_unref(txn);
 360    } else {
 361        job_txn_add_job(txn, job);
 362    }
 363
 364    return job;
 365}
 366
 367void job_ref(Job *job)
 368{
 369    ++job->refcnt;
 370}
 371
 372void job_unref(Job *job)
 373{
 374    if (--job->refcnt == 0) {
 375        assert(job->status == JOB_STATUS_NULL);
 376        assert(!timer_pending(&job->sleep_timer));
 377        assert(!job->txn);
 378
 379        if (job->driver->free) {
 380            job->driver->free(job);
 381        }
 382
 383        QLIST_REMOVE(job, job_list);
 384
 385        error_free(job->err);
 386        g_free(job->id);
 387        g_free(job);
 388    }
 389}
 390
 391void job_progress_update(Job *job, uint64_t done)
 392{
 393    progress_work_done(&job->progress, done);
 394}
 395
 396void job_progress_set_remaining(Job *job, uint64_t remaining)
 397{
 398    progress_set_remaining(&job->progress, remaining);
 399}
 400
 401void job_progress_increase_remaining(Job *job, uint64_t delta)
 402{
 403    progress_increase_remaining(&job->progress, delta);
 404}
 405
 406void job_event_cancelled(Job *job)
 407{
 408    notifier_list_notify(&job->on_finalize_cancelled, job);
 409}
 410
 411void job_event_completed(Job *job)
 412{
 413    notifier_list_notify(&job->on_finalize_completed, job);
 414}
 415
 416static void job_event_pending(Job *job)
 417{
 418    notifier_list_notify(&job->on_pending, job);
 419}
 420
 421static void job_event_ready(Job *job)
 422{
 423    notifier_list_notify(&job->on_ready, job);
 424}
 425
 426static void job_event_idle(Job *job)
 427{
 428    notifier_list_notify(&job->on_idle, job);
 429}
 430
 431void job_enter_cond(Job *job, bool(*fn)(Job *job))
 432{
 433    if (!job_started(job)) {
 434        return;
 435    }
 436    if (job->deferred_to_main_loop) {
 437        return;
 438    }
 439
 440    job_lock();
 441    if (job->busy) {
 442        job_unlock();
 443        return;
 444    }
 445
 446    if (fn && !fn(job)) {
 447        job_unlock();
 448        return;
 449    }
 450
 451    assert(!job->deferred_to_main_loop);
 452    timer_del(&job->sleep_timer);
 453    job->busy = true;
 454    job_unlock();
 455    aio_co_enter(job->aio_context, job->co);
 456}
 457
 458void job_enter(Job *job)
 459{
 460    job_enter_cond(job, NULL);
 461}
 462
 463/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
 464 * Reentering the job coroutine with job_enter() before the timer has expired
 465 * is allowed and cancels the timer.
 466 *
 467 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
 468 * called explicitly. */
 469static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
 470{
 471    job_lock();
 472    if (ns != -1) {
 473        timer_mod(&job->sleep_timer, ns);
 474    }
 475    job->busy = false;
 476    job_event_idle(job);
 477    job_unlock();
 478    qemu_coroutine_yield();
 479
 480    /* Set by job_enter_cond() before re-entering the coroutine.  */
 481    assert(job->busy);
 482}
 483
 484void coroutine_fn job_pause_point(Job *job)
 485{
 486    assert(job && job_started(job));
 487
 488    if (!job_should_pause(job)) {
 489        return;
 490    }
 491    if (job_is_cancelled(job)) {
 492        return;
 493    }
 494
 495    if (job->driver->pause) {
 496        job->driver->pause(job);
 497    }
 498
 499    if (job_should_pause(job) && !job_is_cancelled(job)) {
 500        JobStatus status = job->status;
 501        job_state_transition(job, status == JOB_STATUS_READY
 502                                  ? JOB_STATUS_STANDBY
 503                                  : JOB_STATUS_PAUSED);
 504        job->paused = true;
 505        job_do_yield(job, -1);
 506        job->paused = false;
 507        job_state_transition(job, status);
 508    }
 509
 510    if (job->driver->resume) {
 511        job->driver->resume(job);
 512    }
 513}
 514
 515void job_yield(Job *job)
 516{
 517    assert(job->busy);
 518
 519    /* Check cancellation *before* setting busy = false, too!  */
 520    if (job_is_cancelled(job)) {
 521        return;
 522    }
 523
 524    if (!job_should_pause(job)) {
 525        job_do_yield(job, -1);
 526    }
 527
 528    job_pause_point(job);
 529}
 530
 531void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
 532{
 533    assert(job->busy);
 534
 535    /* Check cancellation *before* setting busy = false, too!  */
 536    if (job_is_cancelled(job)) {
 537        return;
 538    }
 539
 540    if (!job_should_pause(job)) {
 541        job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
 542    }
 543
 544    job_pause_point(job);
 545}
 546
 547/* Assumes the block_job_mutex is held */
 548static bool job_timer_not_pending(Job *job)
 549{
 550    return !timer_pending(&job->sleep_timer);
 551}
 552
 553void job_pause(Job *job)
 554{
 555    job->pause_count++;
 556}
 557
 558void job_resume(Job *job)
 559{
 560    assert(job->pause_count > 0);
 561    job->pause_count--;
 562    if (job->pause_count) {
 563        return;
 564    }
 565
 566    /* kick only if no timer is pending */
 567    job_enter_cond(job, job_timer_not_pending);
 568}
 569
 570void job_user_pause(Job *job, Error **errp)
 571{
 572    if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) {
 573        return;
 574    }
 575    if (job->user_paused) {
 576        error_setg(errp, "Job is already paused");
 577        return;
 578    }
 579    job->user_paused = true;
 580    job_pause(job);
 581}
 582
 583bool job_user_paused(Job *job)
 584{
 585    return job->user_paused;
 586}
 587
 588void job_user_resume(Job *job, Error **errp)
 589{
 590    assert(job);
 591    if (!job->user_paused || job->pause_count <= 0) {
 592        error_setg(errp, "Can't resume a job that was not paused");
 593        return;
 594    }
 595    if (job_apply_verb(job, JOB_VERB_RESUME, errp)) {
 596        return;
 597    }
 598    if (job->driver->user_resume) {
 599        job->driver->user_resume(job);
 600    }
 601    job->user_paused = false;
 602    job_resume(job);
 603}
 604
 605static void job_do_dismiss(Job *job)
 606{
 607    assert(job);
 608    job->busy = false;
 609    job->paused = false;
 610    job->deferred_to_main_loop = true;
 611
 612    job_txn_del_job(job);
 613
 614    job_state_transition(job, JOB_STATUS_NULL);
 615    job_unref(job);
 616}
 617
 618void job_dismiss(Job **jobptr, Error **errp)
 619{
 620    Job *job = *jobptr;
 621    /* similarly to _complete, this is QMP-interface only. */
 622    assert(job->id);
 623    if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) {
 624        return;
 625    }
 626
 627    job_do_dismiss(job);
 628    *jobptr = NULL;
 629}
 630
 631void job_early_fail(Job *job)
 632{
 633    assert(job->status == JOB_STATUS_CREATED);
 634    job_do_dismiss(job);
 635}
 636
 637static void job_conclude(Job *job)
 638{
 639    job_state_transition(job, JOB_STATUS_CONCLUDED);
 640    if (job->auto_dismiss || !job_started(job)) {
 641        job_do_dismiss(job);
 642    }
 643}
 644
 645static void job_update_rc(Job *job)
 646{
 647    if (!job->ret && job_is_cancelled(job)) {
 648        job->ret = -ECANCELED;
 649    }
 650    if (job->ret) {
 651        if (!job->err) {
 652            error_setg(&job->err, "%s", strerror(-job->ret));
 653        }
 654        job_state_transition(job, JOB_STATUS_ABORTING);
 655    }
 656}
 657
 658static void job_commit(Job *job)
 659{
 660    assert(!job->ret);
 661    if (job->driver->commit) {
 662        job->driver->commit(job);
 663    }
 664}
 665
 666static void job_abort(Job *job)
 667{
 668    assert(job->ret);
 669    if (job->driver->abort) {
 670        job->driver->abort(job);
 671    }
 672}
 673
 674static void job_clean(Job *job)
 675{
 676    if (job->driver->clean) {
 677        job->driver->clean(job);
 678    }
 679}
 680
 681static int job_finalize_single(Job *job)
 682{
 683    assert(job_is_completed(job));
 684
 685    /* Ensure abort is called for late-transactional failures */
 686    job_update_rc(job);
 687
 688    if (!job->ret) {
 689        job_commit(job);
 690    } else {
 691        job_abort(job);
 692    }
 693    job_clean(job);
 694
 695    if (job->cb) {
 696        job->cb(job->opaque, job->ret);
 697    }
 698
 699    /* Emit events only if we actually started */
 700    if (job_started(job)) {
 701        if (job_is_cancelled(job)) {
 702            job_event_cancelled(job);
 703        } else {
 704            job_event_completed(job);
 705        }
 706    }
 707
 708    job_txn_del_job(job);
 709    job_conclude(job);
 710    return 0;
 711}
 712
 713static void job_cancel_async(Job *job, bool force)
 714{
 715    if (job->user_paused) {
 716        /* Do not call job_enter here, the caller will handle it.  */
 717        if (job->driver->user_resume) {
 718            job->driver->user_resume(job);
 719        }
 720        job->user_paused = false;
 721        assert(job->pause_count > 0);
 722        job->pause_count--;
 723    }
 724    job->cancelled = true;
 725    /* To prevent 'force == false' overriding a previous 'force == true' */
 726    job->force_cancel |= force;
 727}
 728
 729static void job_completed_txn_abort(Job *job)
 730{
 731    AioContext *outer_ctx = job->aio_context;
 732    AioContext *ctx;
 733    JobTxn *txn = job->txn;
 734    Job *other_job;
 735
 736    if (txn->aborting) {
 737        /*
 738         * We are cancelled by another job, which will handle everything.
 739         */
 740        return;
 741    }
 742    txn->aborting = true;
 743    job_txn_ref(txn);
 744
 745    /* We can only hold the single job's AioContext lock while calling
 746     * job_finalize_single() because the finalization callbacks can involve
 747     * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */
 748    aio_context_release(outer_ctx);
 749
 750    /* Other jobs are effectively cancelled by us, set the status for
 751     * them; this job, however, may or may not be cancelled, depending
 752     * on the caller, so leave it. */
 753    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
 754        if (other_job != job) {
 755            ctx = other_job->aio_context;
 756            aio_context_acquire(ctx);
 757            job_cancel_async(other_job, false);
 758            aio_context_release(ctx);
 759        }
 760    }
 761    while (!QLIST_EMPTY(&txn->jobs)) {
 762        other_job = QLIST_FIRST(&txn->jobs);
 763        ctx = other_job->aio_context;
 764        aio_context_acquire(ctx);
 765        if (!job_is_completed(other_job)) {
 766            assert(job_is_cancelled(other_job));
 767            job_finish_sync(other_job, NULL, NULL);
 768        }
 769        job_finalize_single(other_job);
 770        aio_context_release(ctx);
 771    }
 772
 773    aio_context_acquire(outer_ctx);
 774
 775    job_txn_unref(txn);
 776}
 777
 778static int job_prepare(Job *job)
 779{
 780    if (job->ret == 0 && job->driver->prepare) {
 781        job->ret = job->driver->prepare(job);
 782        job_update_rc(job);
 783    }
 784    return job->ret;
 785}
 786
 787static int job_needs_finalize(Job *job)
 788{
 789    return !job->auto_finalize;
 790}
 791
 792static void job_do_finalize(Job *job)
 793{
 794    int rc;
 795    assert(job && job->txn);
 796
 797    /* prepare the transaction to complete */
 798    rc = job_txn_apply(job, job_prepare);
 799    if (rc) {
 800        job_completed_txn_abort(job);
 801    } else {
 802        job_txn_apply(job, job_finalize_single);
 803    }
 804}
 805
 806void job_finalize(Job *job, Error **errp)
 807{
 808    assert(job && job->id);
 809    if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) {
 810        return;
 811    }
 812    job_do_finalize(job);
 813}
 814
 815static int job_transition_to_pending(Job *job)
 816{
 817    job_state_transition(job, JOB_STATUS_PENDING);
 818    if (!job->auto_finalize) {
 819        job_event_pending(job);
 820    }
 821    return 0;
 822}
 823
 824void job_transition_to_ready(Job *job)
 825{
 826    job_state_transition(job, JOB_STATUS_READY);
 827    job_event_ready(job);
 828}
 829
 830static void job_completed_txn_success(Job *job)
 831{
 832    JobTxn *txn = job->txn;
 833    Job *other_job;
 834
 835    job_state_transition(job, JOB_STATUS_WAITING);
 836
 837    /*
 838     * Successful completion, see if there are other running jobs in this
 839     * txn.
 840     */
 841    QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
 842        if (!job_is_completed(other_job)) {
 843            return;
 844        }
 845        assert(other_job->ret == 0);
 846    }
 847
 848    job_txn_apply(job, job_transition_to_pending);
 849
 850    /* If no jobs need manual finalization, automatically do so */
 851    if (job_txn_apply(job, job_needs_finalize) == 0) {
 852        job_do_finalize(job);
 853    }
 854}
 855
 856static void job_completed(Job *job)
 857{
 858    assert(job && job->txn && !job_is_completed(job));
 859
 860    job_update_rc(job);
 861    trace_job_completed(job, job->ret);
 862    if (job->ret) {
 863        job_completed_txn_abort(job);
 864    } else {
 865        job_completed_txn_success(job);
 866    }
 867}
 868
 869/** Useful only as a type shim for aio_bh_schedule_oneshot. */
 870static void job_exit(void *opaque)
 871{
 872    Job *job = (Job *)opaque;
 873    AioContext *ctx;
 874
 875    job_ref(job);
 876    aio_context_acquire(job->aio_context);
 877
 878    /* This is a lie, we're not quiescent, but still doing the completion
 879     * callbacks. However, completion callbacks tend to involve operations that
 880     * drain block nodes, and if .drained_poll still returned true, we would
 881     * deadlock. */
 882    job->busy = false;
 883    job_event_idle(job);
 884
 885    job_completed(job);
 886
 887    /*
 888     * Note that calling job_completed can move the job to a different
 889     * aio_context, so we cannot cache from above. job_txn_apply takes care of
 890     * acquiring the new lock, and we ref/unref to avoid job_completed freeing
 891     * the job underneath us.
 892     */
 893    ctx = job->aio_context;
 894    job_unref(job);
 895    aio_context_release(ctx);
 896}
 897
 898/**
 899 * All jobs must allow a pause point before entering their job proper. This
 900 * ensures that jobs can be paused prior to being started, then resumed later.
 901 */
 902static void coroutine_fn job_co_entry(void *opaque)
 903{
 904    Job *job = opaque;
 905
 906    assert(job && job->driver && job->driver->run);
 907    job_pause_point(job);
 908    job->ret = job->driver->run(job, &job->err);
 909    job->deferred_to_main_loop = true;
 910    job->busy = true;
 911    aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
 912}
 913
 914void job_start(Job *job)
 915{
 916    assert(job && !job_started(job) && job->paused &&
 917           job->driver && job->driver->run);
 918    job->co = qemu_coroutine_create(job_co_entry, job);
 919    job->pause_count--;
 920    job->busy = true;
 921    job->paused = false;
 922    job_state_transition(job, JOB_STATUS_RUNNING);
 923    aio_co_enter(job->aio_context, job->co);
 924}
 925
 926void job_cancel(Job *job, bool force)
 927{
 928    if (job->status == JOB_STATUS_CONCLUDED) {
 929        job_do_dismiss(job);
 930        return;
 931    }
 932    job_cancel_async(job, force);
 933    if (!job_started(job)) {
 934        job_completed(job);
 935    } else if (job->deferred_to_main_loop) {
 936        job_completed_txn_abort(job);
 937    } else {
 938        job_enter(job);
 939    }
 940}
 941
 942void job_user_cancel(Job *job, bool force, Error **errp)
 943{
 944    if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) {
 945        return;
 946    }
 947    job_cancel(job, force);
 948}
 949
 950/* A wrapper around job_cancel() taking an Error ** parameter so it may be
 951 * used with job_finish_sync() without the need for (rather nasty) function
 952 * pointer casts there. */
 953static void job_cancel_err(Job *job, Error **errp)
 954{
 955    job_cancel(job, false);
 956}
 957
 958int job_cancel_sync(Job *job)
 959{
 960    return job_finish_sync(job, &job_cancel_err, NULL);
 961}
 962
 963void job_cancel_sync_all(void)
 964{
 965    Job *job;
 966    AioContext *aio_context;
 967
 968    while ((job = job_next(NULL))) {
 969        aio_context = job->aio_context;
 970        aio_context_acquire(aio_context);
 971        job_cancel_sync(job);
 972        aio_context_release(aio_context);
 973    }
 974}
 975
 976int job_complete_sync(Job *job, Error **errp)
 977{
 978    return job_finish_sync(job, job_complete, errp);
 979}
 980
 981void job_complete(Job *job, Error **errp)
 982{
 983    /* Should not be reachable via external interface for internal jobs */
 984    assert(job->id);
 985    if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) {
 986        return;
 987    }
 988    if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) {
 989        error_setg(errp, "The active block job '%s' cannot be completed",
 990                   job->id);
 991        return;
 992    }
 993
 994    job->driver->complete(job, errp);
 995}
 996
 997int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
 998{
 999    Error *local_err = NULL;
1000    int ret;
1001
1002    job_ref(job);
1003
1004    if (finish) {
1005        finish(job, &local_err);
1006    }
1007    if (local_err) {
1008        error_propagate(errp, local_err);
1009        job_unref(job);
1010        return -EBUSY;
1011    }
1012
1013    AIO_WAIT_WHILE(job->aio_context,
1014                   (job_enter(job), !job_is_completed(job)));
1015
1016    ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
1017    job_unref(job);
1018    return ret;
1019}
1020