1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/cutils.h"
16#include "qemu/coroutine.h"
17#include "qemu/range.h"
18#include "trace.h"
19#include "block/blockjob_int.h"
20#include "block/block_int.h"
21#include "sysemu/block-backend.h"
22#include "qapi/error.h"
23#include "qapi/qmp/qerror.h"
24#include "qemu/ratelimit.h"
25#include "qemu/bitmap.h"
26#include "qemu/memalign.h"
27
28#define MAX_IN_FLIGHT 16
29#define MAX_IO_BYTES (1 << 20)
30#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31
32
33
34
35typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37} MirrorBuffer;
38
39typedef struct MirrorOp MirrorOp;
40
41typedef struct MirrorBlockJob {
42 BlockJob common;
43 BlockBackend *target;
44 BlockDriverState *mirror_top_bs;
45 BlockDriverState *base;
46 BlockDriverState *base_overlay;
47
48
49 char *replaces;
50
51 BlockDriverState *to_replace;
52
53 Error *replace_blocker;
54 bool is_none_mode;
55 BlockMirrorBackingMode backing_mode;
56
57 bool zero_target;
58 MirrorCopyMode copy_mode;
59 BlockdevOnError on_source_error, on_target_error;
60
61
62 bool actively_synced;
63 bool should_complete;
64 int64_t granularity;
65 size_t buf_size;
66 int64_t bdev_length;
67 unsigned long *cow_bitmap;
68 BdrvDirtyBitmap *dirty_bitmap;
69 BdrvDirtyBitmapIter *dbi;
70 uint8_t *buf;
71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72 int buf_free_count;
73
74 uint64_t last_pause_ns;
75 unsigned long *in_flight_bitmap;
76 int in_flight;
77 int64_t bytes_in_flight;
78 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
79 int ret;
80 bool unmap;
81 int target_cluster_size;
82 int max_iov;
83 bool initial_zeroing_ongoing;
84 int in_active_write_counter;
85 bool prepared;
86 bool in_drain;
87} MirrorBlockJob;
88
89typedef struct MirrorBDSOpaque {
90 MirrorBlockJob *job;
91 bool stop;
92 bool is_commit;
93} MirrorBDSOpaque;
94
95struct MirrorOp {
96 MirrorBlockJob *s;
97 QEMUIOVector qiov;
98 int64_t offset;
99 uint64_t bytes;
100
101
102
103 int64_t *bytes_handled;
104
105 bool is_pseudo_op;
106 bool is_active_write;
107 bool is_in_flight;
108 CoQueue waiting_requests;
109 Coroutine *co;
110 MirrorOp *waiting_for_op;
111
112 QTAILQ_ENTRY(MirrorOp) next;
113};
114
115typedef enum MirrorMethod {
116 MIRROR_METHOD_COPY,
117 MIRROR_METHOD_ZERO,
118 MIRROR_METHOD_DISCARD,
119} MirrorMethod;
120
121static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
122 int error)
123{
124 s->actively_synced = false;
125 if (read) {
126 return block_job_error_action(&s->common, s->on_source_error,
127 true, error);
128 } else {
129 return block_job_error_action(&s->common, s->on_target_error,
130 false, error);
131 }
132}
133
134static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
135 MirrorBlockJob *s,
136 uint64_t offset,
137 uint64_t bytes)
138{
139 uint64_t self_start_chunk = offset / s->granularity;
140 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
141 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
142
143 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
144 self_start_chunk) < self_end_chunk &&
145 s->ret >= 0)
146 {
147 MirrorOp *op;
148
149 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
150 uint64_t op_start_chunk = op->offset / s->granularity;
151 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
152 s->granularity) -
153 op_start_chunk;
154
155 if (op == self) {
156 continue;
157 }
158
159 if (ranges_overlap(self_start_chunk, self_nb_chunks,
160 op_start_chunk, op_nb_chunks))
161 {
162 if (self) {
163
164
165
166
167
168 if (op->waiting_for_op) {
169 continue;
170 }
171
172 self->waiting_for_op = op;
173 }
174
175 qemu_co_queue_wait(&op->waiting_requests, NULL);
176
177 if (self) {
178 self->waiting_for_op = NULL;
179 }
180
181 break;
182 }
183 }
184 }
185}
186
187static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
188{
189 MirrorBlockJob *s = op->s;
190 struct iovec *iov;
191 int64_t chunk_num;
192 int i, nb_chunks;
193
194 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
195
196 s->in_flight--;
197 s->bytes_in_flight -= op->bytes;
198 iov = op->qiov.iov;
199 for (i = 0; i < op->qiov.niov; i++) {
200 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
201 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
202 s->buf_free_count++;
203 }
204
205 chunk_num = op->offset / s->granularity;
206 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
207
208 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
209 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
210 if (ret >= 0) {
211 if (s->cow_bitmap) {
212 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
213 }
214 if (!s->initial_zeroing_ongoing) {
215 job_progress_update(&s->common.job, op->bytes);
216 }
217 }
218 qemu_iovec_destroy(&op->qiov);
219
220 qemu_co_queue_restart_all(&op->waiting_requests);
221 g_free(op);
222}
223
224static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
225{
226 MirrorBlockJob *s = op->s;
227
228 if (ret < 0) {
229 BlockErrorAction action;
230
231 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
232 action = mirror_error_action(s, false, -ret);
233 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
234 s->ret = ret;
235 }
236 }
237
238 mirror_iteration_done(op, ret);
239}
240
241static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
242{
243 MirrorBlockJob *s = op->s;
244
245 if (ret < 0) {
246 BlockErrorAction action;
247
248 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
249 action = mirror_error_action(s, true, -ret);
250 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
251 s->ret = ret;
252 }
253
254 mirror_iteration_done(op, ret);
255 return;
256 }
257
258 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
259 mirror_write_complete(op, ret);
260}
261
262
263static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
264 int64_t offset,
265 int64_t bytes)
266{
267 return MIN(bytes, s->bdev_length - offset);
268}
269
270
271
272static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
273 uint64_t *bytes)
274{
275 bool need_cow;
276 int ret = 0;
277 int64_t align_offset = *offset;
278 int64_t align_bytes = *bytes;
279 int max_bytes = s->granularity * s->max_iov;
280
281 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
282 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
283 s->cow_bitmap);
284 if (need_cow) {
285 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
286 &align_offset, &align_bytes);
287 }
288
289 if (align_bytes > max_bytes) {
290 align_bytes = max_bytes;
291 if (need_cow) {
292 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
293 }
294 }
295
296
297 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
298
299 ret = align_offset + align_bytes - (*offset + *bytes);
300 *offset = align_offset;
301 *bytes = align_bytes;
302 assert(ret >= 0);
303 return ret;
304}
305
306static inline void coroutine_fn
307mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
308{
309 MirrorOp *op;
310
311 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
312
313
314
315
316
317 if (!op->is_pseudo_op && op->is_in_flight &&
318 op->is_active_write == active)
319 {
320 qemu_co_queue_wait(&op->waiting_requests, NULL);
321 return;
322 }
323 }
324 abort();
325}
326
327static inline void coroutine_fn
328mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
329{
330
331 mirror_wait_for_any_operation(s, false);
332}
333
334
335
336
337
338
339
340
341
342static void coroutine_fn mirror_co_read(void *opaque)
343{
344 MirrorOp *op = opaque;
345 MirrorBlockJob *s = op->s;
346 int nb_chunks;
347 uint64_t ret;
348 uint64_t max_bytes;
349
350 max_bytes = s->granularity * s->max_iov;
351
352
353 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
354 assert(op->bytes);
355 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
356 *op->bytes_handled = op->bytes;
357
358 if (s->cow_bitmap) {
359 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
360 }
361
362 assert(*op->bytes_handled <= UINT_MAX);
363 assert(op->bytes <= s->buf_size);
364
365
366
367 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
368
369 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
370 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
371
372 while (s->buf_free_count < nb_chunks) {
373 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
374 mirror_wait_for_free_in_flight_slot(s);
375 }
376
377
378
379
380 qemu_iovec_init(&op->qiov, nb_chunks);
381 while (nb_chunks-- > 0) {
382 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
383 size_t remaining = op->bytes - op->qiov.size;
384
385 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
386 s->buf_free_count--;
387 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
388 }
389
390
391 s->in_flight++;
392 s->bytes_in_flight += op->bytes;
393 op->is_in_flight = true;
394 trace_mirror_one_iteration(s, op->offset, op->bytes);
395
396 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
397 &op->qiov, 0);
398 mirror_read_complete(op, ret);
399}
400
401static void coroutine_fn mirror_co_zero(void *opaque)
402{
403 MirrorOp *op = opaque;
404 int ret;
405
406 op->s->in_flight++;
407 op->s->bytes_in_flight += op->bytes;
408 *op->bytes_handled = op->bytes;
409 op->is_in_flight = true;
410
411 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
412 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
413 mirror_write_complete(op, ret);
414}
415
416static void coroutine_fn mirror_co_discard(void *opaque)
417{
418 MirrorOp *op = opaque;
419 int ret;
420
421 op->s->in_flight++;
422 op->s->bytes_in_flight += op->bytes;
423 *op->bytes_handled = op->bytes;
424 op->is_in_flight = true;
425
426 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
427 mirror_write_complete(op, ret);
428}
429
430static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
431 unsigned bytes, MirrorMethod mirror_method)
432{
433 MirrorOp *op;
434 Coroutine *co;
435 int64_t bytes_handled = -1;
436
437 op = g_new(MirrorOp, 1);
438 *op = (MirrorOp){
439 .s = s,
440 .offset = offset,
441 .bytes = bytes,
442 .bytes_handled = &bytes_handled,
443 };
444 qemu_co_queue_init(&op->waiting_requests);
445
446 switch (mirror_method) {
447 case MIRROR_METHOD_COPY:
448 co = qemu_coroutine_create(mirror_co_read, op);
449 break;
450 case MIRROR_METHOD_ZERO:
451 co = qemu_coroutine_create(mirror_co_zero, op);
452 break;
453 case MIRROR_METHOD_DISCARD:
454 co = qemu_coroutine_create(mirror_co_discard, op);
455 break;
456 default:
457 abort();
458 }
459 op->co = co;
460
461 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
462 qemu_coroutine_enter(co);
463
464
465
466
467 assert(bytes_handled >= 0);
468
469
470
471
472 assert(bytes_handled <= UINT_MAX);
473 return bytes_handled;
474}
475
476static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
477{
478 BlockDriverState *source = s->mirror_top_bs->backing->bs;
479 MirrorOp *pseudo_op;
480 int64_t offset;
481 uint64_t delay_ns = 0, ret = 0;
482
483 int nb_chunks = 1;
484 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
485 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
486
487 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
488 offset = bdrv_dirty_iter_next(s->dbi);
489 if (offset < 0) {
490 bdrv_set_dirty_iter(s->dbi, 0);
491 offset = bdrv_dirty_iter_next(s->dbi);
492 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
493 assert(offset >= 0);
494 }
495 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
496
497 mirror_wait_on_conflicts(NULL, s, offset, 1);
498
499 job_pause_point(&s->common.job);
500
501
502
503 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
504 while (nb_chunks * s->granularity < s->buf_size) {
505 int64_t next_dirty;
506 int64_t next_offset = offset + nb_chunks * s->granularity;
507 int64_t next_chunk = next_offset / s->granularity;
508 if (next_offset >= s->bdev_length ||
509 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
510 break;
511 }
512 if (test_bit(next_chunk, s->in_flight_bitmap)) {
513 break;
514 }
515
516 next_dirty = bdrv_dirty_iter_next(s->dbi);
517 if (next_dirty > next_offset || next_dirty < 0) {
518
519 bdrv_set_dirty_iter(s->dbi, next_offset);
520 next_dirty = bdrv_dirty_iter_next(s->dbi);
521 }
522 assert(next_dirty == next_offset);
523 nb_chunks++;
524 }
525
526
527
528
529
530 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
531 nb_chunks * s->granularity);
532 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
533
534
535
536
537
538
539
540 pseudo_op = g_new(MirrorOp, 1);
541 *pseudo_op = (MirrorOp){
542 .offset = offset,
543 .bytes = nb_chunks * s->granularity,
544 .is_pseudo_op = true,
545 };
546 qemu_co_queue_init(&pseudo_op->waiting_requests);
547 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
548
549 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
550 while (nb_chunks > 0 && offset < s->bdev_length) {
551 int ret;
552 int64_t io_bytes;
553 int64_t io_bytes_acct;
554 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
555
556 assert(!(offset % s->granularity));
557 ret = bdrv_block_status_above(source, NULL, offset,
558 nb_chunks * s->granularity,
559 &io_bytes, NULL, NULL);
560 if (ret < 0) {
561 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
562 } else if (ret & BDRV_BLOCK_DATA) {
563 io_bytes = MIN(io_bytes, max_io_bytes);
564 }
565
566 io_bytes -= io_bytes % s->granularity;
567 if (io_bytes < s->granularity) {
568 io_bytes = s->granularity;
569 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
570 int64_t target_offset;
571 int64_t target_bytes;
572 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
573 &target_offset, &target_bytes);
574 if (target_offset == offset &&
575 target_bytes == io_bytes) {
576 mirror_method = ret & BDRV_BLOCK_ZERO ?
577 MIRROR_METHOD_ZERO :
578 MIRROR_METHOD_DISCARD;
579 }
580 }
581
582 while (s->in_flight >= MAX_IN_FLIGHT) {
583 trace_mirror_yield_in_flight(s, offset, s->in_flight);
584 mirror_wait_for_free_in_flight_slot(s);
585 }
586
587 if (s->ret < 0) {
588 ret = 0;
589 goto fail;
590 }
591
592 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
593 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
594 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
595 io_bytes_acct = 0;
596 } else {
597 io_bytes_acct = io_bytes;
598 }
599 assert(io_bytes);
600 offset += io_bytes;
601 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
602 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
603 }
604
605 ret = delay_ns;
606fail:
607 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
608 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
609 g_free(pseudo_op);
610
611 return ret;
612}
613
614static void mirror_free_init(MirrorBlockJob *s)
615{
616 int granularity = s->granularity;
617 size_t buf_size = s->buf_size;
618 uint8_t *buf = s->buf;
619
620 assert(s->buf_free_count == 0);
621 QSIMPLEQ_INIT(&s->buf_free);
622 while (buf_size != 0) {
623 MirrorBuffer *cur = (MirrorBuffer *)buf;
624 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
625 s->buf_free_count++;
626 buf_size -= granularity;
627 buf += granularity;
628 }
629}
630
631
632
633
634
635static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
636{
637 while (s->in_flight > 0) {
638 mirror_wait_for_free_in_flight_slot(s);
639 }
640}
641
642
643
644
645
646
647static int mirror_exit_common(Job *job)
648{
649 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
650 BlockJob *bjob = &s->common;
651 MirrorBDSOpaque *bs_opaque;
652 AioContext *replace_aio_context = NULL;
653 BlockDriverState *src;
654 BlockDriverState *target_bs;
655 BlockDriverState *mirror_top_bs;
656 Error *local_err = NULL;
657 bool abort = job->ret < 0;
658 int ret = 0;
659
660 if (s->prepared) {
661 return 0;
662 }
663 s->prepared = true;
664
665 mirror_top_bs = s->mirror_top_bs;
666 bs_opaque = mirror_top_bs->opaque;
667 src = mirror_top_bs->backing->bs;
668 target_bs = blk_bs(s->target);
669
670 if (bdrv_chain_contains(src, target_bs)) {
671 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
672 }
673
674 bdrv_release_dirty_bitmap(s->dirty_bitmap);
675
676
677
678 bdrv_ref(src);
679 bdrv_ref(mirror_top_bs);
680 bdrv_ref(target_bs);
681
682
683
684
685
686
687 blk_unref(s->target);
688 s->target = NULL;
689
690
691
692
693
694 bdrv_drained_begin(mirror_top_bs);
695 bs_opaque->stop = true;
696 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
697 &error_abort);
698 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
699 BlockDriverState *backing = s->is_none_mode ? src : s->base;
700 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
701
702 if (bdrv_cow_bs(unfiltered_target) != backing) {
703 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
704 if (local_err) {
705 error_report_err(local_err);
706 local_err = NULL;
707 ret = -EPERM;
708 }
709 }
710 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
711 assert(!bdrv_backing_chain_next(target_bs));
712 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
713 "backing", &local_err);
714 if (ret < 0) {
715 error_report_err(local_err);
716 local_err = NULL;
717 }
718 }
719
720 if (s->to_replace) {
721 replace_aio_context = bdrv_get_aio_context(s->to_replace);
722 aio_context_acquire(replace_aio_context);
723 }
724
725 if (s->should_complete && !abort) {
726 BlockDriverState *to_replace = s->to_replace ?: src;
727 bool ro = bdrv_is_read_only(to_replace);
728
729 if (ro != bdrv_is_read_only(target_bs)) {
730 bdrv_reopen_set_read_only(target_bs, ro, NULL);
731 }
732
733
734
735 assert(s->in_drain);
736 bdrv_drained_begin(target_bs);
737
738
739
740
741
742 if (bdrv_recurse_can_replace(src, to_replace)) {
743 bdrv_replace_node(to_replace, target_bs, &local_err);
744 } else {
745 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
746 "because it can no longer be guaranteed that doing so "
747 "would not lead to an abrupt change of visible data",
748 to_replace->node_name, target_bs->node_name);
749 }
750 bdrv_drained_end(target_bs);
751 if (local_err) {
752 error_report_err(local_err);
753 ret = -EPERM;
754 }
755 }
756 if (s->to_replace) {
757 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
758 error_free(s->replace_blocker);
759 bdrv_unref(s->to_replace);
760 }
761 if (replace_aio_context) {
762 aio_context_release(replace_aio_context);
763 }
764 g_free(s->replaces);
765 bdrv_unref(target_bs);
766
767
768
769
770
771
772 block_job_remove_all_bdrv(bjob);
773 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
774
775 bs_opaque->job = NULL;
776
777 bdrv_drained_end(src);
778 bdrv_drained_end(mirror_top_bs);
779 s->in_drain = false;
780 bdrv_unref(mirror_top_bs);
781 bdrv_unref(src);
782
783 return ret;
784}
785
786static int mirror_prepare(Job *job)
787{
788 return mirror_exit_common(job);
789}
790
791static void mirror_abort(Job *job)
792{
793 int ret = mirror_exit_common(job);
794 assert(ret == 0);
795}
796
797static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
798{
799 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
800
801 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
802 s->last_pause_ns = now;
803 job_sleep_ns(&s->common.job, 0);
804 } else {
805 job_pause_point(&s->common.job);
806 }
807}
808
809static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
810{
811 int64_t offset;
812 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
813 BlockDriverState *target_bs = blk_bs(s->target);
814 int ret;
815 int64_t count;
816
817 if (s->zero_target) {
818 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
819 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
820 return 0;
821 }
822
823 s->initial_zeroing_ongoing = true;
824 for (offset = 0; offset < s->bdev_length; ) {
825 int bytes = MIN(s->bdev_length - offset,
826 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
827
828 mirror_throttle(s);
829
830 if (job_is_cancelled(&s->common.job)) {
831 s->initial_zeroing_ongoing = false;
832 return 0;
833 }
834
835 if (s->in_flight >= MAX_IN_FLIGHT) {
836 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
837 s->in_flight);
838 mirror_wait_for_free_in_flight_slot(s);
839 continue;
840 }
841
842 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
843 offset += bytes;
844 }
845
846 mirror_wait_for_all_io(s);
847 s->initial_zeroing_ongoing = false;
848 }
849
850
851 for (offset = 0; offset < s->bdev_length; ) {
852
853 int bytes = MIN(s->bdev_length - offset,
854 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
855
856 mirror_throttle(s);
857
858 if (job_is_cancelled(&s->common.job)) {
859 return 0;
860 }
861
862 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
863 &count);
864 if (ret < 0) {
865 return ret;
866 }
867
868 assert(count);
869 if (ret > 0) {
870 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
871 }
872 offset += count;
873 }
874 return 0;
875}
876
877
878
879
880static int mirror_flush(MirrorBlockJob *s)
881{
882 int ret = blk_flush(s->target);
883 if (ret < 0) {
884 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
885 s->ret = ret;
886 }
887 }
888 return ret;
889}
890
891static int coroutine_fn mirror_run(Job *job, Error **errp)
892{
893 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
894 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
895 BlockDriverState *target_bs = blk_bs(s->target);
896 bool need_drain = true;
897 int64_t length;
898 int64_t target_length;
899 BlockDriverInfo bdi;
900 char backing_filename[2];
901
902 int ret = 0;
903
904 if (job_is_cancelled(&s->common.job)) {
905 goto immediate_exit;
906 }
907
908 s->bdev_length = bdrv_getlength(bs);
909 if (s->bdev_length < 0) {
910 ret = s->bdev_length;
911 goto immediate_exit;
912 }
913
914 target_length = blk_getlength(s->target);
915 if (target_length < 0) {
916 ret = target_length;
917 goto immediate_exit;
918 }
919
920
921
922 if (s->base == blk_bs(s->target)) {
923 if (s->bdev_length > target_length) {
924 ret = blk_truncate(s->target, s->bdev_length, false,
925 PREALLOC_MODE_OFF, 0, NULL);
926 if (ret < 0) {
927 goto immediate_exit;
928 }
929 }
930 } else if (s->bdev_length != target_length) {
931 error_setg(errp, "Source and target image have different sizes");
932 ret = -EINVAL;
933 goto immediate_exit;
934 }
935
936 if (s->bdev_length == 0) {
937
938 job_transition_to_ready(&s->common.job);
939 s->actively_synced = true;
940 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
941 job_yield(&s->common.job);
942 }
943 goto immediate_exit;
944 }
945
946 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
947 s->in_flight_bitmap = bitmap_new(length);
948
949
950
951
952
953 bdrv_get_backing_filename(target_bs, backing_filename,
954 sizeof(backing_filename));
955 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
956 s->target_cluster_size = bdi.cluster_size;
957 } else {
958 s->target_cluster_size = BDRV_SECTOR_SIZE;
959 }
960 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
961 s->granularity < s->target_cluster_size) {
962 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
963 s->cow_bitmap = bitmap_new(length);
964 }
965 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
966
967 s->buf = qemu_try_blockalign(bs, s->buf_size);
968 if (s->buf == NULL) {
969 ret = -ENOMEM;
970 goto immediate_exit;
971 }
972
973 mirror_free_init(s);
974
975 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
976 if (!s->is_none_mode) {
977 ret = mirror_dirty_init(s);
978 if (ret < 0 || job_is_cancelled(&s->common.job)) {
979 goto immediate_exit;
980 }
981 }
982
983 assert(!s->dbi);
984 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
985 for (;;) {
986 uint64_t delay_ns = 0;
987 int64_t cnt, delta;
988 bool should_complete;
989
990
991
992 while (s->in_active_write_counter) {
993 mirror_wait_for_any_operation(s, true);
994 }
995
996 if (s->ret < 0) {
997 ret = s->ret;
998 goto immediate_exit;
999 }
1000
1001 job_pause_point(&s->common.job);
1002
1003 if (job_is_cancelled(&s->common.job)) {
1004 ret = 0;
1005 goto immediate_exit;
1006 }
1007
1008 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1009
1010
1011
1012 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
1013
1014
1015
1016
1017
1018 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1019 if (delta < BLOCK_JOB_SLICE_TIME &&
1020 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1021 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1022 (cnt == 0 && s->in_flight > 0)) {
1023 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1024 mirror_wait_for_free_in_flight_slot(s);
1025 continue;
1026 } else if (cnt != 0) {
1027 delay_ns = mirror_iteration(s);
1028 }
1029 }
1030
1031 should_complete = false;
1032 if (s->in_flight == 0 && cnt == 0) {
1033 trace_mirror_before_flush(s);
1034 if (!job_is_ready(&s->common.job)) {
1035 if (mirror_flush(s) < 0) {
1036
1037 continue;
1038 }
1039
1040
1041
1042
1043
1044 job_transition_to_ready(&s->common.job);
1045 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1046 s->actively_synced = true;
1047 }
1048 }
1049
1050 should_complete = s->should_complete ||
1051 job_cancel_requested(&s->common.job);
1052 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1053 }
1054
1055 if (cnt == 0 && should_complete) {
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 trace_mirror_before_drain(s, cnt);
1067
1068 s->in_drain = true;
1069 bdrv_drained_begin(bs);
1070 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1071 if (cnt > 0 || mirror_flush(s) < 0) {
1072 bdrv_drained_end(bs);
1073 s->in_drain = false;
1074 continue;
1075 }
1076
1077
1078
1079
1080 assert(QLIST_EMPTY(&bs->tracked_requests));
1081 need_drain = false;
1082 break;
1083 }
1084
1085 if (job_is_ready(&s->common.job) && !should_complete) {
1086 delay_ns = (s->in_flight == 0 &&
1087 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1088 }
1089 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1090 delay_ns);
1091 job_sleep_ns(&s->common.job, delay_ns);
1092 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1093 }
1094
1095immediate_exit:
1096 if (s->in_flight > 0) {
1097
1098
1099
1100
1101 assert(ret < 0 || job_is_cancelled(&s->common.job));
1102 assert(need_drain);
1103 mirror_wait_for_all_io(s);
1104 }
1105
1106 assert(s->in_flight == 0);
1107 qemu_vfree(s->buf);
1108 g_free(s->cow_bitmap);
1109 g_free(s->in_flight_bitmap);
1110 bdrv_dirty_iter_free(s->dbi);
1111
1112 if (need_drain) {
1113 s->in_drain = true;
1114 bdrv_drained_begin(bs);
1115 }
1116
1117 return ret;
1118}
1119
1120static void mirror_complete(Job *job, Error **errp)
1121{
1122 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1123
1124 if (!job_is_ready(job)) {
1125 error_setg(errp, "The active block job '%s' cannot be completed",
1126 job->id);
1127 return;
1128 }
1129
1130
1131 if (s->replaces) {
1132 AioContext *replace_aio_context;
1133
1134 s->to_replace = bdrv_find_node(s->replaces);
1135 if (!s->to_replace) {
1136 error_setg(errp, "Node name '%s' not found", s->replaces);
1137 return;
1138 }
1139
1140 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1141 aio_context_acquire(replace_aio_context);
1142
1143
1144 error_setg(&s->replace_blocker,
1145 "block device is in use by block-job-complete");
1146 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1147 bdrv_ref(s->to_replace);
1148
1149 aio_context_release(replace_aio_context);
1150 }
1151
1152 s->should_complete = true;
1153
1154
1155 if (!job->paused) {
1156 job_enter(job);
1157 }
1158}
1159
1160static void coroutine_fn mirror_pause(Job *job)
1161{
1162 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1163
1164 mirror_wait_for_all_io(s);
1165}
1166
1167static bool mirror_drained_poll(BlockJob *job)
1168{
1169 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1170
1171
1172
1173
1174
1175
1176 if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) {
1177 return true;
1178 }
1179
1180 return !!s->in_flight;
1181}
1182
1183static bool mirror_cancel(Job *job, bool force)
1184{
1185 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1186 BlockDriverState *target = blk_bs(s->target);
1187
1188
1189
1190
1191
1192 force = force || !job_is_ready(job);
1193
1194 if (force) {
1195 bdrv_cancel_in_flight(target);
1196 }
1197 return force;
1198}
1199
1200static bool commit_active_cancel(Job *job, bool force)
1201{
1202
1203 return force || !job_is_ready(job);
1204}
1205
1206static const BlockJobDriver mirror_job_driver = {
1207 .job_driver = {
1208 .instance_size = sizeof(MirrorBlockJob),
1209 .job_type = JOB_TYPE_MIRROR,
1210 .free = block_job_free,
1211 .user_resume = block_job_user_resume,
1212 .run = mirror_run,
1213 .prepare = mirror_prepare,
1214 .abort = mirror_abort,
1215 .pause = mirror_pause,
1216 .complete = mirror_complete,
1217 .cancel = mirror_cancel,
1218 },
1219 .drained_poll = mirror_drained_poll,
1220};
1221
1222static const BlockJobDriver commit_active_job_driver = {
1223 .job_driver = {
1224 .instance_size = sizeof(MirrorBlockJob),
1225 .job_type = JOB_TYPE_COMMIT,
1226 .free = block_job_free,
1227 .user_resume = block_job_user_resume,
1228 .run = mirror_run,
1229 .prepare = mirror_prepare,
1230 .abort = mirror_abort,
1231 .pause = mirror_pause,
1232 .complete = mirror_complete,
1233 .cancel = commit_active_cancel,
1234 },
1235 .drained_poll = mirror_drained_poll,
1236};
1237
1238static void coroutine_fn
1239do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1240 uint64_t offset, uint64_t bytes,
1241 QEMUIOVector *qiov, int flags)
1242{
1243 int ret;
1244 size_t qiov_offset = 0;
1245 int64_t bitmap_offset, bitmap_end;
1246
1247 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1248 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1249 {
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1268 if (bytes <= qiov_offset) {
1269
1270 return;
1271 }
1272 offset += qiov_offset;
1273 bytes -= qiov_offset;
1274 }
1275
1276 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1277 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1278 {
1279 uint64_t tail = (offset + bytes) % job->granularity;
1280
1281 if (bytes <= tail) {
1282
1283 return;
1284 }
1285 bytes -= tail;
1286 }
1287
1288
1289
1290
1291
1292 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1293 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1294 if (bitmap_offset < bitmap_end) {
1295 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1296 bitmap_end - bitmap_offset);
1297 }
1298
1299 job_progress_increase_remaining(&job->common.job, bytes);
1300
1301 switch (method) {
1302 case MIRROR_METHOD_COPY:
1303 ret = blk_co_pwritev_part(job->target, offset, bytes,
1304 qiov, qiov_offset, flags);
1305 break;
1306
1307 case MIRROR_METHOD_ZERO:
1308 assert(!qiov);
1309 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1310 break;
1311
1312 case MIRROR_METHOD_DISCARD:
1313 assert(!qiov);
1314 ret = blk_co_pdiscard(job->target, offset, bytes);
1315 break;
1316
1317 default:
1318 abort();
1319 }
1320
1321 if (ret >= 0) {
1322 job_progress_update(&job->common.job, bytes);
1323 } else {
1324 BlockErrorAction action;
1325
1326
1327
1328
1329
1330
1331
1332 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1333 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1334 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1335 bitmap_end - bitmap_offset);
1336 job->actively_synced = false;
1337
1338 action = mirror_error_action(job, false, -ret);
1339 if (action == BLOCK_ERROR_ACTION_REPORT) {
1340 if (!job->ret) {
1341 job->ret = ret;
1342 }
1343 }
1344 }
1345}
1346
1347static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1348 uint64_t offset,
1349 uint64_t bytes)
1350{
1351 MirrorOp *op;
1352 uint64_t start_chunk = offset / s->granularity;
1353 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1354
1355 op = g_new(MirrorOp, 1);
1356 *op = (MirrorOp){
1357 .s = s,
1358 .offset = offset,
1359 .bytes = bytes,
1360 .is_active_write = true,
1361 .is_in_flight = true,
1362 .co = qemu_coroutine_self(),
1363 };
1364 qemu_co_queue_init(&op->waiting_requests);
1365 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1366
1367 s->in_active_write_counter++;
1368
1369 mirror_wait_on_conflicts(op, s, offset, bytes);
1370
1371 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1372
1373 return op;
1374}
1375
1376static void coroutine_fn active_write_settle(MirrorOp *op)
1377{
1378 uint64_t start_chunk = op->offset / op->s->granularity;
1379 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1380 op->s->granularity);
1381
1382 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1383 BdrvChild *source = op->s->mirror_top_bs->backing;
1384
1385 if (QLIST_FIRST(&source->bs->parents) == source &&
1386 QLIST_NEXT(source, next_parent) == NULL)
1387 {
1388
1389
1390
1391
1392 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1393 }
1394 }
1395 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1396 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1397 qemu_co_queue_restart_all(&op->waiting_requests);
1398 g_free(op);
1399}
1400
1401static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1402 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1403{
1404 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1405}
1406
1407static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1408 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1409 int flags)
1410{
1411 MirrorOp *op = NULL;
1412 MirrorBDSOpaque *s = bs->opaque;
1413 int ret = 0;
1414 bool copy_to_target;
1415
1416 copy_to_target = s->job->ret >= 0 &&
1417 !job_is_cancelled(&s->job->common.job) &&
1418 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1419
1420 if (copy_to_target) {
1421 op = active_write_prepare(s->job, offset, bytes);
1422 }
1423
1424 switch (method) {
1425 case MIRROR_METHOD_COPY:
1426 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1427 break;
1428
1429 case MIRROR_METHOD_ZERO:
1430 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1431 break;
1432
1433 case MIRROR_METHOD_DISCARD:
1434 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1435 break;
1436
1437 default:
1438 abort();
1439 }
1440
1441 if (ret < 0) {
1442 goto out;
1443 }
1444
1445 if (copy_to_target) {
1446 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1447 }
1448
1449out:
1450 if (copy_to_target) {
1451 active_write_settle(op);
1452 }
1453 return ret;
1454}
1455
1456static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1457 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1458{
1459 MirrorBDSOpaque *s = bs->opaque;
1460 QEMUIOVector bounce_qiov;
1461 void *bounce_buf;
1462 int ret = 0;
1463 bool copy_to_target;
1464
1465 copy_to_target = s->job->ret >= 0 &&
1466 !job_is_cancelled(&s->job->common.job) &&
1467 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1468
1469 if (copy_to_target) {
1470
1471
1472
1473
1474 bounce_buf = qemu_blockalign(bs, bytes);
1475 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1476
1477 qemu_iovec_init(&bounce_qiov, 1);
1478 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1479 qiov = &bounce_qiov;
1480 }
1481
1482 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1483 flags);
1484
1485 if (copy_to_target) {
1486 qemu_iovec_destroy(&bounce_qiov);
1487 qemu_vfree(bounce_buf);
1488 }
1489
1490 return ret;
1491}
1492
1493static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1494{
1495 if (bs->backing == NULL) {
1496
1497 return 0;
1498 }
1499 return bdrv_co_flush(bs->backing->bs);
1500}
1501
1502static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1503 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1504{
1505 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1506 flags);
1507}
1508
1509static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1510 int64_t offset, int64_t bytes)
1511{
1512 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1513 NULL, 0);
1514}
1515
1516static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1517{
1518 if (bs->backing == NULL) {
1519
1520
1521 return;
1522 }
1523 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1524 bs->backing->bs->filename);
1525}
1526
1527static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1528 BdrvChildRole role,
1529 BlockReopenQueue *reopen_queue,
1530 uint64_t perm, uint64_t shared,
1531 uint64_t *nperm, uint64_t *nshared)
1532{
1533 MirrorBDSOpaque *s = bs->opaque;
1534
1535 if (s->stop) {
1536
1537
1538
1539
1540 *nperm = 0;
1541 *nshared = BLK_PERM_ALL;
1542 return;
1543 }
1544
1545 bdrv_default_perms(bs, c, role, reopen_queue,
1546 perm, shared, nperm, nshared);
1547
1548 if (s->is_commit) {
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1564 *nshared |= BLK_PERM_WRITE;
1565 }
1566}
1567
1568
1569
1570static BlockDriver bdrv_mirror_top = {
1571 .format_name = "mirror_top",
1572 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1573 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1574 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1575 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1576 .bdrv_co_flush = bdrv_mirror_top_flush,
1577 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1578 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1579
1580 .is_filter = true,
1581};
1582
1583static BlockJob *mirror_start_job(
1584 const char *job_id, BlockDriverState *bs,
1585 int creation_flags, BlockDriverState *target,
1586 const char *replaces, int64_t speed,
1587 uint32_t granularity, int64_t buf_size,
1588 BlockMirrorBackingMode backing_mode,
1589 bool zero_target,
1590 BlockdevOnError on_source_error,
1591 BlockdevOnError on_target_error,
1592 bool unmap,
1593 BlockCompletionFunc *cb,
1594 void *opaque,
1595 const BlockJobDriver *driver,
1596 bool is_none_mode, BlockDriverState *base,
1597 bool auto_complete, const char *filter_node_name,
1598 bool is_mirror, MirrorCopyMode copy_mode,
1599 Error **errp)
1600{
1601 MirrorBlockJob *s;
1602 MirrorBDSOpaque *bs_opaque;
1603 BlockDriverState *mirror_top_bs;
1604 bool target_is_backing;
1605 uint64_t target_perms, target_shared_perms;
1606 int ret;
1607
1608 if (granularity == 0) {
1609 granularity = bdrv_get_default_bitmap_granularity(target);
1610 }
1611
1612 assert(is_power_of_2(granularity));
1613
1614 if (buf_size < 0) {
1615 error_setg(errp, "Invalid parameter 'buf-size'");
1616 return NULL;
1617 }
1618
1619 if (buf_size == 0) {
1620 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1621 }
1622
1623 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1624 error_setg(errp, "Can't mirror node into itself");
1625 return NULL;
1626 }
1627
1628 target_is_backing = bdrv_chain_contains(bs, target);
1629
1630
1631
1632
1633 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1634 BDRV_O_RDWR, errp);
1635 if (mirror_top_bs == NULL) {
1636 return NULL;
1637 }
1638 if (!filter_node_name) {
1639 mirror_top_bs->implicit = true;
1640 }
1641
1642
1643 mirror_top_bs->never_freeze = true;
1644
1645 mirror_top_bs->total_sectors = bs->total_sectors;
1646 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1647 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1648 BDRV_REQ_NO_FALLBACK;
1649 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1650 mirror_top_bs->opaque = bs_opaque;
1651
1652 bs_opaque->is_commit = target_is_backing;
1653
1654 bdrv_drained_begin(bs);
1655 ret = bdrv_append(mirror_top_bs, bs, errp);
1656 bdrv_drained_end(bs);
1657
1658 if (ret < 0) {
1659 bdrv_unref(mirror_top_bs);
1660 return NULL;
1661 }
1662
1663
1664 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1665 BLK_PERM_CONSISTENT_READ,
1666 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1667 BLK_PERM_WRITE, speed,
1668 creation_flags, cb, opaque, errp);
1669 if (!s) {
1670 goto fail;
1671 }
1672 bs_opaque->job = s;
1673
1674
1675 bdrv_unref(mirror_top_bs);
1676
1677 s->mirror_top_bs = mirror_top_bs;
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 target_perms = BLK_PERM_WRITE;
1689 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1690
1691 if (target_is_backing) {
1692 int64_t bs_size, target_size;
1693 bs_size = bdrv_getlength(bs);
1694 if (bs_size < 0) {
1695 error_setg_errno(errp, -bs_size,
1696 "Could not inquire top image size");
1697 goto fail;
1698 }
1699
1700 target_size = bdrv_getlength(target);
1701 if (target_size < 0) {
1702 error_setg_errno(errp, -target_size,
1703 "Could not inquire base image size");
1704 goto fail;
1705 }
1706
1707 if (target_size < bs_size) {
1708 target_perms |= BLK_PERM_RESIZE;
1709 }
1710
1711 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1712 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1713
1714
1715
1716
1717 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1718 "source's backing chain");
1719 goto fail;
1720 }
1721
1722 s->target = blk_new(s->common.job.aio_context,
1723 target_perms, target_shared_perms);
1724 ret = blk_insert_bs(s->target, target, errp);
1725 if (ret < 0) {
1726 goto fail;
1727 }
1728 if (is_mirror) {
1729
1730
1731
1732
1733
1734
1735 blk_set_force_allow_inactivate(s->target);
1736 }
1737 blk_set_allow_aio_context_change(s->target, true);
1738 blk_set_disable_request_queuing(s->target, true);
1739
1740 s->replaces = g_strdup(replaces);
1741 s->on_source_error = on_source_error;
1742 s->on_target_error = on_target_error;
1743 s->is_none_mode = is_none_mode;
1744 s->backing_mode = backing_mode;
1745 s->zero_target = zero_target;
1746 s->copy_mode = copy_mode;
1747 s->base = base;
1748 s->base_overlay = bdrv_find_overlay(bs, base);
1749 s->granularity = granularity;
1750 s->buf_size = ROUND_UP(buf_size, granularity);
1751 s->unmap = unmap;
1752 if (auto_complete) {
1753 s->should_complete = true;
1754 }
1755
1756 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1757 if (!s->dirty_bitmap) {
1758 goto fail;
1759 }
1760 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1761 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1762 }
1763
1764 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1765 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1766 BLK_PERM_CONSISTENT_READ,
1767 errp);
1768 if (ret < 0) {
1769 goto fail;
1770 }
1771
1772
1773 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1774 &error_abort);
1775
1776
1777
1778 if (target_is_backing) {
1779 BlockDriverState *iter, *filtered_target;
1780 uint64_t iter_shared_perms;
1781
1782
1783
1784
1785
1786 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1787
1788 assert(bdrv_skip_filters(filtered_target) ==
1789 bdrv_skip_filters(target));
1790
1791
1792
1793
1794
1795
1796
1797 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1798
1799 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1800 iter = bdrv_filter_or_cow_bs(iter))
1801 {
1802 if (iter == filtered_target) {
1803
1804
1805
1806
1807 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1808 }
1809
1810 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1811 iter_shared_perms, errp);
1812 if (ret < 0) {
1813 goto fail;
1814 }
1815 }
1816
1817 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1818 goto fail;
1819 }
1820 }
1821
1822 QTAILQ_INIT(&s->ops_in_flight);
1823
1824 trace_mirror_start(bs, s, opaque);
1825 job_start(&s->common.job);
1826
1827 return &s->common;
1828
1829fail:
1830 if (s) {
1831
1832
1833 bdrv_ref(mirror_top_bs);
1834
1835 g_free(s->replaces);
1836 blk_unref(s->target);
1837 bs_opaque->job = NULL;
1838 if (s->dirty_bitmap) {
1839 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1840 }
1841 job_early_fail(&s->common.job);
1842 }
1843
1844 bs_opaque->stop = true;
1845 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1846 &error_abort);
1847 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
1848
1849 bdrv_unref(mirror_top_bs);
1850
1851 return NULL;
1852}
1853
1854void mirror_start(const char *job_id, BlockDriverState *bs,
1855 BlockDriverState *target, const char *replaces,
1856 int creation_flags, int64_t speed,
1857 uint32_t granularity, int64_t buf_size,
1858 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1859 bool zero_target,
1860 BlockdevOnError on_source_error,
1861 BlockdevOnError on_target_error,
1862 bool unmap, const char *filter_node_name,
1863 MirrorCopyMode copy_mode, Error **errp)
1864{
1865 bool is_none_mode;
1866 BlockDriverState *base;
1867
1868 GLOBAL_STATE_CODE();
1869
1870 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1871 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1872 error_setg(errp, "Sync mode '%s' not supported",
1873 MirrorSyncMode_str(mode));
1874 return;
1875 }
1876 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1877 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
1878 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1879 speed, granularity, buf_size, backing_mode, zero_target,
1880 on_source_error, on_target_error, unmap, NULL, NULL,
1881 &mirror_job_driver, is_none_mode, base, false,
1882 filter_node_name, true, copy_mode, errp);
1883}
1884
1885BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1886 BlockDriverState *base, int creation_flags,
1887 int64_t speed, BlockdevOnError on_error,
1888 const char *filter_node_name,
1889 BlockCompletionFunc *cb, void *opaque,
1890 bool auto_complete, Error **errp)
1891{
1892 bool base_read_only;
1893 BlockJob *job;
1894
1895 GLOBAL_STATE_CODE();
1896
1897 base_read_only = bdrv_is_read_only(base);
1898
1899 if (base_read_only) {
1900 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1901 return NULL;
1902 }
1903 }
1904
1905 job = mirror_start_job(
1906 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1907 MIRROR_LEAVE_BACKING_CHAIN, false,
1908 on_error, on_error, true, cb, opaque,
1909 &commit_active_job_driver, false, base, auto_complete,
1910 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1911 errp);
1912 if (!job) {
1913 goto error_restore_flags;
1914 }
1915
1916 return job;
1917
1918error_restore_flags:
1919
1920
1921 if (base_read_only) {
1922 bdrv_reopen_set_read_only(base, true, NULL);
1923 }
1924 return NULL;
1925}
1926