1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "qemu/osdep.h"
27#include "block/block.h"
28#include "block/blockjob_int.h"
29#include "block/block_int.h"
30#include "block/trace.h"
31#include "sysemu/block-backend.h"
32#include "qapi/error.h"
33#include "qapi/qapi-events-block-core.h"
34#include "qapi/qmp/qerror.h"
35#include "qemu/coroutine.h"
36#include "qemu/main-loop.h"
37#include "qemu/timer.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static bool is_block_job(Job *job)
55{
56 return job_type(job) == JOB_TYPE_BACKUP ||
57 job_type(job) == JOB_TYPE_COMMIT ||
58 job_type(job) == JOB_TYPE_MIRROR ||
59 job_type(job) == JOB_TYPE_STREAM;
60}
61
62BlockJob *block_job_next(BlockJob *bjob)
63{
64 Job *job = bjob ? &bjob->job : NULL;
65
66 do {
67 job = job_next(job);
68 } while (job && !is_block_job(job));
69
70 return job ? container_of(job, BlockJob, job) : NULL;
71}
72
73BlockJob *block_job_get(const char *id)
74{
75 Job *job = job_get(id);
76
77 if (job && is_block_job(job)) {
78 return container_of(job, BlockJob, job);
79 } else {
80 return NULL;
81 }
82}
83
84void block_job_free(Job *job)
85{
86 BlockJob *bjob = container_of(job, BlockJob, job);
87
88 block_job_remove_all_bdrv(bjob);
89 blk_unref(bjob->blk);
90 error_free(bjob->blocker);
91}
92
93static char *child_job_get_parent_desc(BdrvChild *c)
94{
95 BlockJob *job = c->opaque;
96 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
97}
98
99static void child_job_drained_begin(BdrvChild *c)
100{
101 BlockJob *job = c->opaque;
102 job_pause(&job->job);
103}
104
105static bool child_job_drained_poll(BdrvChild *c)
106{
107 BlockJob *bjob = c->opaque;
108 Job *job = &bjob->job;
109 const BlockJobDriver *drv = block_job_driver(bjob);
110
111
112
113
114 if (!job->busy || job_is_completed(job)) {
115 return false;
116 }
117
118
119
120 if (drv->drained_poll) {
121 return drv->drained_poll(bjob);
122 } else {
123 return true;
124 }
125}
126
127static void child_job_drained_end(BdrvChild *c, int *drained_end_counter)
128{
129 BlockJob *job = c->opaque;
130 job_resume(&job->job);
131}
132
133static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx,
134 GSList **ignore, Error **errp)
135{
136 BlockJob *job = c->opaque;
137 GSList *l;
138
139 for (l = job->nodes; l; l = l->next) {
140 BdrvChild *sibling = l->data;
141 if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) {
142 return false;
143 }
144 }
145 return true;
146}
147
148static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
149 GSList **ignore)
150{
151 BlockJob *job = c->opaque;
152 GSList *l;
153
154 for (l = job->nodes; l; l = l->next) {
155 BdrvChild *sibling = l->data;
156 if (g_slist_find(*ignore, sibling)) {
157 continue;
158 }
159 *ignore = g_slist_prepend(*ignore, sibling);
160 bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
161 }
162
163 job->job.aio_context = ctx;
164}
165
166static const BdrvChildClass child_job = {
167 .get_parent_desc = child_job_get_parent_desc,
168 .drained_begin = child_job_drained_begin,
169 .drained_poll = child_job_drained_poll,
170 .drained_end = child_job_drained_end,
171 .can_set_aio_ctx = child_job_can_set_aio_ctx,
172 .set_aio_ctx = child_job_set_aio_ctx,
173 .stay_at_node = true,
174};
175
176void block_job_remove_all_bdrv(BlockJob *job)
177{
178
179
180
181
182
183
184 while (job->nodes) {
185 GSList *l = job->nodes;
186 BdrvChild *c = l->data;
187
188 job->nodes = l->next;
189
190 bdrv_op_unblock_all(c->bs, job->blocker);
191 bdrv_root_unref_child(c);
192
193 g_slist_free_1(l);
194 }
195}
196
197bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
198{
199 GSList *el;
200
201 for (el = job->nodes; el; el = el->next) {
202 BdrvChild *c = el->data;
203 if (c->bs == bs) {
204 return true;
205 }
206 }
207
208 return false;
209}
210
211int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
212 uint64_t perm, uint64_t shared_perm, Error **errp)
213{
214 BdrvChild *c;
215 bool need_context_ops;
216
217 bdrv_ref(bs);
218
219 need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context;
220
221 if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
222 aio_context_release(job->job.aio_context);
223 }
224 c = bdrv_root_attach_child(bs, name, &child_job, 0,
225 job->job.aio_context, perm, shared_perm, job,
226 errp);
227 if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
228 aio_context_acquire(job->job.aio_context);
229 }
230 if (c == NULL) {
231 return -EPERM;
232 }
233
234 job->nodes = g_slist_prepend(job->nodes, c);
235 bdrv_op_block_all(bs, job->blocker);
236
237 return 0;
238}
239
240static void block_job_on_idle(Notifier *n, void *opaque)
241{
242 aio_wait_kick();
243}
244
245bool block_job_is_internal(BlockJob *job)
246{
247 return (job->job.id == NULL);
248}
249
250const BlockJobDriver *block_job_driver(BlockJob *job)
251{
252 return container_of(job->job.driver, BlockJobDriver, job_driver);
253}
254
255
256static bool job_timer_pending(Job *job)
257{
258 return timer_pending(&job->sleep_timer);
259}
260
261bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
262{
263 const BlockJobDriver *drv = block_job_driver(job);
264 int64_t old_speed = job->speed;
265
266 if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
267 return false;
268 }
269 if (speed < 0) {
270 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
271 "a non-negative value");
272 return false;
273 }
274
275 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
276
277 job->speed = speed;
278
279 if (drv->set_speed) {
280 drv->set_speed(job, speed);
281 }
282
283 if (speed && speed <= old_speed) {
284 return true;
285 }
286
287
288 job_enter_cond(&job->job, job_timer_pending);
289
290 return true;
291}
292
293int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
294{
295 if (!job->speed) {
296 return 0;
297 }
298
299 return ratelimit_calculate_delay(&job->limit, n);
300}
301
302BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
303{
304 BlockJobInfo *info;
305
306 if (block_job_is_internal(job)) {
307 error_setg(errp, "Cannot query QEMU internal jobs");
308 return NULL;
309 }
310 info = g_new0(BlockJobInfo, 1);
311 info->type = g_strdup(job_type_str(&job->job));
312 info->device = g_strdup(job->job.id);
313 info->busy = qatomic_read(&job->job.busy);
314 info->paused = job->job.pause_count > 0;
315 info->offset = job->job.progress.current;
316 info->len = job->job.progress.total;
317 info->speed = job->speed;
318 info->io_status = job->iostatus;
319 info->ready = job_is_ready(&job->job),
320 info->status = job->job.status;
321 info->auto_finalize = job->job.auto_finalize;
322 info->auto_dismiss = job->job.auto_dismiss;
323 if (job->job.ret) {
324 info->has_error = true;
325 info->error = job->job.err ?
326 g_strdup(error_get_pretty(job->job.err)) :
327 g_strdup(strerror(-job->job.ret));
328 }
329 return info;
330}
331
332static void block_job_iostatus_set_err(BlockJob *job, int error)
333{
334 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
335 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
336 BLOCK_DEVICE_IO_STATUS_FAILED;
337 }
338}
339
340static void block_job_event_cancelled(Notifier *n, void *opaque)
341{
342 BlockJob *job = opaque;
343
344 if (block_job_is_internal(job)) {
345 return;
346 }
347
348 qapi_event_send_block_job_cancelled(job_type(&job->job),
349 job->job.id,
350 job->job.progress.total,
351 job->job.progress.current,
352 job->speed);
353}
354
355static void block_job_event_completed(Notifier *n, void *opaque)
356{
357 BlockJob *job = opaque;
358 const char *msg = NULL;
359
360 if (block_job_is_internal(job)) {
361 return;
362 }
363
364 if (job->job.ret < 0) {
365 msg = error_get_pretty(job->job.err);
366 }
367
368 qapi_event_send_block_job_completed(job_type(&job->job),
369 job->job.id,
370 job->job.progress.total,
371 job->job.progress.current,
372 job->speed,
373 !!msg,
374 msg);
375}
376
377static void block_job_event_pending(Notifier *n, void *opaque)
378{
379 BlockJob *job = opaque;
380
381 if (block_job_is_internal(job)) {
382 return;
383 }
384
385 qapi_event_send_block_job_pending(job_type(&job->job),
386 job->job.id);
387}
388
389static void block_job_event_ready(Notifier *n, void *opaque)
390{
391 BlockJob *job = opaque;
392
393 if (block_job_is_internal(job)) {
394 return;
395 }
396
397 qapi_event_send_block_job_ready(job_type(&job->job),
398 job->job.id,
399 job->job.progress.total,
400 job->job.progress.current,
401 job->speed);
402}
403
404
405
406
407
408
409
410void *block_job_create(const char *job_id, const BlockJobDriver *driver,
411 JobTxn *txn, BlockDriverState *bs, uint64_t perm,
412 uint64_t shared_perm, int64_t speed, int flags,
413 BlockCompletionFunc *cb, void *opaque, Error **errp)
414{
415 BlockBackend *blk;
416 BlockJob *job;
417
418 if (job_id == NULL && !(flags & JOB_INTERNAL)) {
419 job_id = bdrv_get_device_name(bs);
420 }
421
422 blk = blk_new_with_bs(bs, perm, shared_perm, errp);
423 if (!blk) {
424 return NULL;
425 }
426
427 job = job_create(job_id, &driver->job_driver, txn, blk_get_aio_context(blk),
428 flags, cb, opaque, errp);
429 if (job == NULL) {
430 blk_unref(blk);
431 return NULL;
432 }
433
434 assert(is_block_job(&job->job));
435 assert(job->job.driver->free == &block_job_free);
436 assert(job->job.driver->user_resume == &block_job_user_resume);
437
438 job->blk = blk;
439
440 job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
441 job->finalize_completed_notifier.notify = block_job_event_completed;
442 job->pending_notifier.notify = block_job_event_pending;
443 job->ready_notifier.notify = block_job_event_ready;
444 job->idle_notifier.notify = block_job_on_idle;
445
446 notifier_list_add(&job->job.on_finalize_cancelled,
447 &job->finalize_cancelled_notifier);
448 notifier_list_add(&job->job.on_finalize_completed,
449 &job->finalize_completed_notifier);
450 notifier_list_add(&job->job.on_pending, &job->pending_notifier);
451 notifier_list_add(&job->job.on_ready, &job->ready_notifier);
452 notifier_list_add(&job->job.on_idle, &job->idle_notifier);
453
454 error_setg(&job->blocker, "block device is in use by block job: %s",
455 job_type_str(&job->job));
456 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
457
458 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
459
460
461
462 blk_set_disable_request_queuing(blk, true);
463 blk_set_allow_aio_context_change(blk, true);
464
465
466 if (speed != 0) {
467 if (!block_job_set_speed(job, speed, errp)) {
468 job_early_fail(&job->job);
469 return NULL;
470 }
471 }
472
473 return job;
474}
475
476void block_job_iostatus_reset(BlockJob *job)
477{
478 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
479 return;
480 }
481 assert(job->job.user_paused && job->job.pause_count > 0);
482 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
483}
484
485void block_job_user_resume(Job *job)
486{
487 BlockJob *bjob = container_of(job, BlockJob, job);
488 block_job_iostatus_reset(bjob);
489}
490
491BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
492 int is_read, int error)
493{
494 BlockErrorAction action;
495
496 switch (on_err) {
497 case BLOCKDEV_ON_ERROR_ENOSPC:
498 case BLOCKDEV_ON_ERROR_AUTO:
499 action = (error == ENOSPC) ?
500 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
501 break;
502 case BLOCKDEV_ON_ERROR_STOP:
503 action = BLOCK_ERROR_ACTION_STOP;
504 break;
505 case BLOCKDEV_ON_ERROR_REPORT:
506 action = BLOCK_ERROR_ACTION_REPORT;
507 break;
508 case BLOCKDEV_ON_ERROR_IGNORE:
509 action = BLOCK_ERROR_ACTION_IGNORE;
510 break;
511 default:
512 abort();
513 }
514 if (!block_job_is_internal(job)) {
515 qapi_event_send_block_job_error(job->job.id,
516 is_read ? IO_OPERATION_TYPE_READ :
517 IO_OPERATION_TYPE_WRITE,
518 action);
519 }
520 if (action == BLOCK_ERROR_ACTION_STOP) {
521 if (!job->job.user_paused) {
522 job_pause(&job->job);
523
524 job->job.user_paused = true;
525 }
526 block_job_iostatus_set_err(job, error);
527 }
528 return action;
529}
530