1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/cutils.h"
16#include "qemu/coroutine.h"
17#include "qemu/range.h"
18#include "trace.h"
19#include "block/blockjob_int.h"
20#include "block/block_int.h"
21#include "sysemu/block-backend.h"
22#include "qapi/error.h"
23#include "qapi/qmp/qerror.h"
24#include "qemu/ratelimit.h"
25#include "qemu/bitmap.h"
26
27#define MAX_IN_FLIGHT 16
28#define MAX_IO_BYTES (1 << 20)
29#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30
31
32
33
34typedef struct MirrorBuffer {
35 QSIMPLEQ_ENTRY(MirrorBuffer) next;
36} MirrorBuffer;
37
38typedef struct MirrorOp MirrorOp;
39
40typedef struct MirrorBlockJob {
41 BlockJob common;
42 BlockBackend *target;
43 BlockDriverState *mirror_top_bs;
44 BlockDriverState *base;
45
46
47 char *replaces;
48
49 BlockDriverState *to_replace;
50
51 Error *replace_blocker;
52 bool is_none_mode;
53 BlockMirrorBackingMode backing_mode;
54
55 bool zero_target;
56 MirrorCopyMode copy_mode;
57 BlockdevOnError on_source_error, on_target_error;
58 bool synced;
59
60
61 bool actively_synced;
62 bool should_complete;
63 int64_t granularity;
64 size_t buf_size;
65 int64_t bdev_length;
66 unsigned long *cow_bitmap;
67 BdrvDirtyBitmap *dirty_bitmap;
68 BdrvDirtyBitmapIter *dbi;
69 uint8_t *buf;
70 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
71 int buf_free_count;
72
73 uint64_t last_pause_ns;
74 unsigned long *in_flight_bitmap;
75 int in_flight;
76 int64_t bytes_in_flight;
77 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
78 int ret;
79 bool unmap;
80 int target_cluster_size;
81 int max_iov;
82 bool initial_zeroing_ongoing;
83 int in_active_write_counter;
84 bool prepared;
85 bool in_drain;
86} MirrorBlockJob;
87
88typedef struct MirrorBDSOpaque {
89 MirrorBlockJob *job;
90 bool stop;
91} MirrorBDSOpaque;
92
93struct MirrorOp {
94 MirrorBlockJob *s;
95 QEMUIOVector qiov;
96 int64_t offset;
97 uint64_t bytes;
98
99
100
101 int64_t *bytes_handled;
102
103 bool is_pseudo_op;
104 bool is_active_write;
105 CoQueue waiting_requests;
106
107 QTAILQ_ENTRY(MirrorOp) next;
108};
109
110typedef enum MirrorMethod {
111 MIRROR_METHOD_COPY,
112 MIRROR_METHOD_ZERO,
113 MIRROR_METHOD_DISCARD,
114} MirrorMethod;
115
116static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
117 int error)
118{
119 s->synced = false;
120 s->actively_synced = false;
121 if (read) {
122 return block_job_error_action(&s->common, s->on_source_error,
123 true, error);
124 } else {
125 return block_job_error_action(&s->common, s->on_target_error,
126 false, error);
127 }
128}
129
130static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
131 MirrorBlockJob *s,
132 uint64_t offset,
133 uint64_t bytes)
134{
135 uint64_t self_start_chunk = offset / s->granularity;
136 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
137 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
138
139 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
140 self_start_chunk) < self_end_chunk &&
141 s->ret >= 0)
142 {
143 MirrorOp *op;
144
145 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
146 uint64_t op_start_chunk = op->offset / s->granularity;
147 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
148 s->granularity) -
149 op_start_chunk;
150
151 if (op == self) {
152 continue;
153 }
154
155 if (ranges_overlap(self_start_chunk, self_nb_chunks,
156 op_start_chunk, op_nb_chunks))
157 {
158 qemu_co_queue_wait(&op->waiting_requests, NULL);
159 break;
160 }
161 }
162 }
163}
164
165static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
166{
167 MirrorBlockJob *s = op->s;
168 struct iovec *iov;
169 int64_t chunk_num;
170 int i, nb_chunks;
171
172 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
173
174 s->in_flight--;
175 s->bytes_in_flight -= op->bytes;
176 iov = op->qiov.iov;
177 for (i = 0; i < op->qiov.niov; i++) {
178 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
179 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
180 s->buf_free_count++;
181 }
182
183 chunk_num = op->offset / s->granularity;
184 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
185
186 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
187 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
188 if (ret >= 0) {
189 if (s->cow_bitmap) {
190 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
191 }
192 if (!s->initial_zeroing_ongoing) {
193 job_progress_update(&s->common.job, op->bytes);
194 }
195 }
196 qemu_iovec_destroy(&op->qiov);
197
198 qemu_co_queue_restart_all(&op->waiting_requests);
199 g_free(op);
200}
201
202static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
203{
204 MirrorBlockJob *s = op->s;
205
206 if (ret < 0) {
207 BlockErrorAction action;
208
209 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
210 action = mirror_error_action(s, false, -ret);
211 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
212 s->ret = ret;
213 }
214 }
215
216 mirror_iteration_done(op, ret);
217}
218
219static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
220{
221 MirrorBlockJob *s = op->s;
222
223 if (ret < 0) {
224 BlockErrorAction action;
225
226 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
227 action = mirror_error_action(s, true, -ret);
228 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
229 s->ret = ret;
230 }
231
232 mirror_iteration_done(op, ret);
233 return;
234 }
235
236 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
237 mirror_write_complete(op, ret);
238}
239
240
241static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
242 int64_t offset,
243 int64_t bytes)
244{
245 return MIN(bytes, s->bdev_length - offset);
246}
247
248
249
250static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
251 uint64_t *bytes)
252{
253 bool need_cow;
254 int ret = 0;
255 int64_t align_offset = *offset;
256 int64_t align_bytes = *bytes;
257 int max_bytes = s->granularity * s->max_iov;
258
259 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
260 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
261 s->cow_bitmap);
262 if (need_cow) {
263 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
264 &align_offset, &align_bytes);
265 }
266
267 if (align_bytes > max_bytes) {
268 align_bytes = max_bytes;
269 if (need_cow) {
270 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
271 }
272 }
273
274
275 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
276
277 ret = align_offset + align_bytes - (*offset + *bytes);
278 *offset = align_offset;
279 *bytes = align_bytes;
280 assert(ret >= 0);
281 return ret;
282}
283
284static inline void coroutine_fn
285mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
286{
287 MirrorOp *op;
288
289 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
290
291
292
293
294
295 if (!op->is_pseudo_op && op->is_active_write == active) {
296 qemu_co_queue_wait(&op->waiting_requests, NULL);
297 return;
298 }
299 }
300 abort();
301}
302
303static inline void coroutine_fn
304mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
305{
306
307 mirror_wait_for_any_operation(s, false);
308}
309
310
311
312
313
314
315
316
317
318static void coroutine_fn mirror_co_read(void *opaque)
319{
320 MirrorOp *op = opaque;
321 MirrorBlockJob *s = op->s;
322 int nb_chunks;
323 uint64_t ret;
324 uint64_t max_bytes;
325
326 max_bytes = s->granularity * s->max_iov;
327
328
329 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
330 assert(op->bytes);
331 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
332 *op->bytes_handled = op->bytes;
333
334 if (s->cow_bitmap) {
335 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
336 }
337
338 assert(*op->bytes_handled <= UINT_MAX);
339 assert(op->bytes <= s->buf_size);
340
341
342
343 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
344
345 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
346 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
347
348 while (s->buf_free_count < nb_chunks) {
349 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
350 mirror_wait_for_free_in_flight_slot(s);
351 }
352
353
354
355
356 qemu_iovec_init(&op->qiov, nb_chunks);
357 while (nb_chunks-- > 0) {
358 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
359 size_t remaining = op->bytes - op->qiov.size;
360
361 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
362 s->buf_free_count--;
363 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
364 }
365
366
367 s->in_flight++;
368 s->bytes_in_flight += op->bytes;
369 trace_mirror_one_iteration(s, op->offset, op->bytes);
370
371 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
372 &op->qiov, 0);
373 mirror_read_complete(op, ret);
374}
375
376static void coroutine_fn mirror_co_zero(void *opaque)
377{
378 MirrorOp *op = opaque;
379 int ret;
380
381 op->s->in_flight++;
382 op->s->bytes_in_flight += op->bytes;
383 *op->bytes_handled = op->bytes;
384
385 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
386 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
387 mirror_write_complete(op, ret);
388}
389
390static void coroutine_fn mirror_co_discard(void *opaque)
391{
392 MirrorOp *op = opaque;
393 int ret;
394
395 op->s->in_flight++;
396 op->s->bytes_in_flight += op->bytes;
397 *op->bytes_handled = op->bytes;
398
399 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
400 mirror_write_complete(op, ret);
401}
402
403static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
404 unsigned bytes, MirrorMethod mirror_method)
405{
406 MirrorOp *op;
407 Coroutine *co;
408 int64_t bytes_handled = -1;
409
410 op = g_new(MirrorOp, 1);
411 *op = (MirrorOp){
412 .s = s,
413 .offset = offset,
414 .bytes = bytes,
415 .bytes_handled = &bytes_handled,
416 };
417 qemu_co_queue_init(&op->waiting_requests);
418
419 switch (mirror_method) {
420 case MIRROR_METHOD_COPY:
421 co = qemu_coroutine_create(mirror_co_read, op);
422 break;
423 case MIRROR_METHOD_ZERO:
424 co = qemu_coroutine_create(mirror_co_zero, op);
425 break;
426 case MIRROR_METHOD_DISCARD:
427 co = qemu_coroutine_create(mirror_co_discard, op);
428 break;
429 default:
430 abort();
431 }
432
433 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
434 qemu_coroutine_enter(co);
435
436
437
438
439 assert(bytes_handled >= 0);
440
441
442
443
444 assert(bytes_handled <= UINT_MAX);
445 return bytes_handled;
446}
447
448static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
449{
450 BlockDriverState *source = s->mirror_top_bs->backing->bs;
451 MirrorOp *pseudo_op;
452 int64_t offset;
453 uint64_t delay_ns = 0, ret = 0;
454
455 int nb_chunks = 1;
456 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
457 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
458
459 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
460 offset = bdrv_dirty_iter_next(s->dbi);
461 if (offset < 0) {
462 bdrv_set_dirty_iter(s->dbi, 0);
463 offset = bdrv_dirty_iter_next(s->dbi);
464 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
465 assert(offset >= 0);
466 }
467 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
468
469 mirror_wait_on_conflicts(NULL, s, offset, 1);
470
471 job_pause_point(&s->common.job);
472
473
474
475 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
476 while (nb_chunks * s->granularity < s->buf_size) {
477 int64_t next_dirty;
478 int64_t next_offset = offset + nb_chunks * s->granularity;
479 int64_t next_chunk = next_offset / s->granularity;
480 if (next_offset >= s->bdev_length ||
481 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
482 break;
483 }
484 if (test_bit(next_chunk, s->in_flight_bitmap)) {
485 break;
486 }
487
488 next_dirty = bdrv_dirty_iter_next(s->dbi);
489 if (next_dirty > next_offset || next_dirty < 0) {
490
491 bdrv_set_dirty_iter(s->dbi, next_offset);
492 next_dirty = bdrv_dirty_iter_next(s->dbi);
493 }
494 assert(next_dirty == next_offset);
495 nb_chunks++;
496 }
497
498
499
500
501
502 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
503 nb_chunks * s->granularity);
504 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
505
506
507
508
509
510
511
512 pseudo_op = g_new(MirrorOp, 1);
513 *pseudo_op = (MirrorOp){
514 .offset = offset,
515 .bytes = nb_chunks * s->granularity,
516 .is_pseudo_op = true,
517 };
518 qemu_co_queue_init(&pseudo_op->waiting_requests);
519 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
520
521 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
522 while (nb_chunks > 0 && offset < s->bdev_length) {
523 int ret;
524 int64_t io_bytes;
525 int64_t io_bytes_acct;
526 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
527
528 assert(!(offset % s->granularity));
529 ret = bdrv_block_status_above(source, NULL, offset,
530 nb_chunks * s->granularity,
531 &io_bytes, NULL, NULL);
532 if (ret < 0) {
533 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
534 } else if (ret & BDRV_BLOCK_DATA) {
535 io_bytes = MIN(io_bytes, max_io_bytes);
536 }
537
538 io_bytes -= io_bytes % s->granularity;
539 if (io_bytes < s->granularity) {
540 io_bytes = s->granularity;
541 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
542 int64_t target_offset;
543 int64_t target_bytes;
544 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
545 &target_offset, &target_bytes);
546 if (target_offset == offset &&
547 target_bytes == io_bytes) {
548 mirror_method = ret & BDRV_BLOCK_ZERO ?
549 MIRROR_METHOD_ZERO :
550 MIRROR_METHOD_DISCARD;
551 }
552 }
553
554 while (s->in_flight >= MAX_IN_FLIGHT) {
555 trace_mirror_yield_in_flight(s, offset, s->in_flight);
556 mirror_wait_for_free_in_flight_slot(s);
557 }
558
559 if (s->ret < 0) {
560 ret = 0;
561 goto fail;
562 }
563
564 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
565 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
566 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
567 io_bytes_acct = 0;
568 } else {
569 io_bytes_acct = io_bytes;
570 }
571 assert(io_bytes);
572 offset += io_bytes;
573 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
574 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
575 }
576
577 ret = delay_ns;
578fail:
579 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
580 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
581 g_free(pseudo_op);
582
583 return ret;
584}
585
586static void mirror_free_init(MirrorBlockJob *s)
587{
588 int granularity = s->granularity;
589 size_t buf_size = s->buf_size;
590 uint8_t *buf = s->buf;
591
592 assert(s->buf_free_count == 0);
593 QSIMPLEQ_INIT(&s->buf_free);
594 while (buf_size != 0) {
595 MirrorBuffer *cur = (MirrorBuffer *)buf;
596 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
597 s->buf_free_count++;
598 buf_size -= granularity;
599 buf += granularity;
600 }
601}
602
603
604
605
606
607static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
608{
609 while (s->in_flight > 0) {
610 mirror_wait_for_free_in_flight_slot(s);
611 }
612}
613
614
615
616
617
618
619static int mirror_exit_common(Job *job)
620{
621 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
622 BlockJob *bjob = &s->common;
623 MirrorBDSOpaque *bs_opaque;
624 AioContext *replace_aio_context = NULL;
625 BlockDriverState *src;
626 BlockDriverState *target_bs;
627 BlockDriverState *mirror_top_bs;
628 Error *local_err = NULL;
629 bool abort = job->ret < 0;
630 int ret = 0;
631
632 if (s->prepared) {
633 return 0;
634 }
635 s->prepared = true;
636
637 mirror_top_bs = s->mirror_top_bs;
638 bs_opaque = mirror_top_bs->opaque;
639 src = mirror_top_bs->backing->bs;
640 target_bs = blk_bs(s->target);
641
642 if (bdrv_chain_contains(src, target_bs)) {
643 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
644 }
645
646 bdrv_release_dirty_bitmap(s->dirty_bitmap);
647
648
649
650 bdrv_ref(src);
651 bdrv_ref(mirror_top_bs);
652 bdrv_ref(target_bs);
653
654
655
656
657
658
659 blk_unref(s->target);
660 s->target = NULL;
661
662
663
664
665
666 bdrv_drained_begin(mirror_top_bs);
667 bs_opaque->stop = true;
668 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
669 &error_abort);
670 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
671 BlockDriverState *backing = s->is_none_mode ? src : s->base;
672 if (backing_bs(target_bs) != backing) {
673 bdrv_set_backing_hd(target_bs, backing, &local_err);
674 if (local_err) {
675 error_report_err(local_err);
676 ret = -EPERM;
677 }
678 }
679 }
680
681 if (s->to_replace) {
682 replace_aio_context = bdrv_get_aio_context(s->to_replace);
683 aio_context_acquire(replace_aio_context);
684 }
685
686 if (s->should_complete && !abort) {
687 BlockDriverState *to_replace = s->to_replace ?: src;
688 bool ro = bdrv_is_read_only(to_replace);
689
690 if (ro != bdrv_is_read_only(target_bs)) {
691 bdrv_reopen_set_read_only(target_bs, ro, NULL);
692 }
693
694
695
696 assert(s->in_drain);
697 bdrv_drained_begin(target_bs);
698 bdrv_replace_node(to_replace, target_bs, &local_err);
699 bdrv_drained_end(target_bs);
700 if (local_err) {
701 error_report_err(local_err);
702 ret = -EPERM;
703 }
704 }
705 if (s->to_replace) {
706 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
707 error_free(s->replace_blocker);
708 bdrv_unref(s->to_replace);
709 }
710 if (replace_aio_context) {
711 aio_context_release(replace_aio_context);
712 }
713 g_free(s->replaces);
714 bdrv_unref(target_bs);
715
716
717
718
719
720
721 block_job_remove_all_bdrv(bjob);
722 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
723
724
725
726
727 blk_remove_bs(bjob->blk);
728 blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
729 blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
730
731 bs_opaque->job = NULL;
732
733 bdrv_drained_end(src);
734 bdrv_drained_end(mirror_top_bs);
735 s->in_drain = false;
736 bdrv_unref(mirror_top_bs);
737 bdrv_unref(src);
738
739 return ret;
740}
741
742static int mirror_prepare(Job *job)
743{
744 return mirror_exit_common(job);
745}
746
747static void mirror_abort(Job *job)
748{
749 int ret = mirror_exit_common(job);
750 assert(ret == 0);
751}
752
753static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
754{
755 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
756
757 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
758 s->last_pause_ns = now;
759 job_sleep_ns(&s->common.job, 0);
760 } else {
761 job_pause_point(&s->common.job);
762 }
763}
764
765static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
766{
767 int64_t offset;
768 BlockDriverState *base = s->base;
769 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
770 BlockDriverState *target_bs = blk_bs(s->target);
771 int ret;
772 int64_t count;
773
774 if (s->zero_target) {
775 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
776 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
777 return 0;
778 }
779
780 s->initial_zeroing_ongoing = true;
781 for (offset = 0; offset < s->bdev_length; ) {
782 int bytes = MIN(s->bdev_length - offset,
783 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
784
785 mirror_throttle(s);
786
787 if (job_is_cancelled(&s->common.job)) {
788 s->initial_zeroing_ongoing = false;
789 return 0;
790 }
791
792 if (s->in_flight >= MAX_IN_FLIGHT) {
793 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
794 s->in_flight);
795 mirror_wait_for_free_in_flight_slot(s);
796 continue;
797 }
798
799 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
800 offset += bytes;
801 }
802
803 mirror_wait_for_all_io(s);
804 s->initial_zeroing_ongoing = false;
805 }
806
807
808 for (offset = 0; offset < s->bdev_length; ) {
809
810 int bytes = MIN(s->bdev_length - offset,
811 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
812
813 mirror_throttle(s);
814
815 if (job_is_cancelled(&s->common.job)) {
816 return 0;
817 }
818
819 ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count);
820 if (ret < 0) {
821 return ret;
822 }
823
824 assert(count);
825 if (ret == 1) {
826 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
827 }
828 offset += count;
829 }
830 return 0;
831}
832
833
834
835
836static int mirror_flush(MirrorBlockJob *s)
837{
838 int ret = blk_flush(s->target);
839 if (ret < 0) {
840 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
841 s->ret = ret;
842 }
843 }
844 return ret;
845}
846
847static int coroutine_fn mirror_run(Job *job, Error **errp)
848{
849 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
850 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
851 BlockDriverState *target_bs = blk_bs(s->target);
852 bool need_drain = true;
853 int64_t length;
854 BlockDriverInfo bdi;
855 char backing_filename[2];
856
857 int ret = 0;
858
859 if (job_is_cancelled(&s->common.job)) {
860 goto immediate_exit;
861 }
862
863 s->bdev_length = bdrv_getlength(bs);
864 if (s->bdev_length < 0) {
865 ret = s->bdev_length;
866 goto immediate_exit;
867 }
868
869
870
871 if (s->base == blk_bs(s->target)) {
872 int64_t base_length;
873
874 base_length = blk_getlength(s->target);
875 if (base_length < 0) {
876 ret = base_length;
877 goto immediate_exit;
878 }
879
880 if (s->bdev_length > base_length) {
881 ret = blk_truncate(s->target, s->bdev_length, false,
882 PREALLOC_MODE_OFF, NULL);
883 if (ret < 0) {
884 goto immediate_exit;
885 }
886 }
887 }
888
889 if (s->bdev_length == 0) {
890
891 job_transition_to_ready(&s->common.job);
892 s->synced = true;
893 s->actively_synced = true;
894 while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
895 job_yield(&s->common.job);
896 }
897 s->common.job.cancelled = false;
898 goto immediate_exit;
899 }
900
901 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
902 s->in_flight_bitmap = bitmap_new(length);
903
904
905
906
907
908 bdrv_get_backing_filename(target_bs, backing_filename,
909 sizeof(backing_filename));
910 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
911 s->target_cluster_size = bdi.cluster_size;
912 } else {
913 s->target_cluster_size = BDRV_SECTOR_SIZE;
914 }
915 if (backing_filename[0] && !target_bs->backing &&
916 s->granularity < s->target_cluster_size) {
917 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
918 s->cow_bitmap = bitmap_new(length);
919 }
920 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
921
922 s->buf = qemu_try_blockalign(bs, s->buf_size);
923 if (s->buf == NULL) {
924 ret = -ENOMEM;
925 goto immediate_exit;
926 }
927
928 mirror_free_init(s);
929
930 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
931 if (!s->is_none_mode) {
932 ret = mirror_dirty_init(s);
933 if (ret < 0 || job_is_cancelled(&s->common.job)) {
934 goto immediate_exit;
935 }
936 }
937
938 assert(!s->dbi);
939 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
940 for (;;) {
941 uint64_t delay_ns = 0;
942 int64_t cnt, delta;
943 bool should_complete;
944
945
946
947 while (s->in_active_write_counter) {
948 mirror_wait_for_any_operation(s, true);
949 }
950
951 if (s->ret < 0) {
952 ret = s->ret;
953 goto immediate_exit;
954 }
955
956 job_pause_point(&s->common.job);
957
958 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
959
960
961
962 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
963
964
965
966
967
968 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
969 if (delta < BLOCK_JOB_SLICE_TIME &&
970 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
971 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
972 (cnt == 0 && s->in_flight > 0)) {
973 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
974 mirror_wait_for_free_in_flight_slot(s);
975 continue;
976 } else if (cnt != 0) {
977 delay_ns = mirror_iteration(s);
978 }
979 }
980
981 should_complete = false;
982 if (s->in_flight == 0 && cnt == 0) {
983 trace_mirror_before_flush(s);
984 if (!s->synced) {
985 if (mirror_flush(s) < 0) {
986
987 continue;
988 }
989
990
991
992
993
994 job_transition_to_ready(&s->common.job);
995 s->synced = true;
996 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
997 s->actively_synced = true;
998 }
999 }
1000
1001 should_complete = s->should_complete ||
1002 job_is_cancelled(&s->common.job);
1003 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1004 }
1005
1006 if (cnt == 0 && should_complete) {
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 trace_mirror_before_drain(s, cnt);
1018
1019 s->in_drain = true;
1020 bdrv_drained_begin(bs);
1021 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1022 if (cnt > 0 || mirror_flush(s) < 0) {
1023 bdrv_drained_end(bs);
1024 s->in_drain = false;
1025 continue;
1026 }
1027
1028
1029
1030
1031 assert(QLIST_EMPTY(&bs->tracked_requests));
1032 s->common.job.cancelled = false;
1033 need_drain = false;
1034 break;
1035 }
1036
1037 ret = 0;
1038
1039 if (s->synced && !should_complete) {
1040 delay_ns = (s->in_flight == 0 &&
1041 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1042 }
1043 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
1044 job_sleep_ns(&s->common.job, delay_ns);
1045 if (job_is_cancelled(&s->common.job) &&
1046 (!s->synced || s->common.job.force_cancel))
1047 {
1048 break;
1049 }
1050 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1051 }
1052
1053immediate_exit:
1054 if (s->in_flight > 0) {
1055
1056
1057
1058
1059 assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
1060 job_is_cancelled(&s->common.job)));
1061 assert(need_drain);
1062 mirror_wait_for_all_io(s);
1063 }
1064
1065 assert(s->in_flight == 0);
1066 qemu_vfree(s->buf);
1067 g_free(s->cow_bitmap);
1068 g_free(s->in_flight_bitmap);
1069 bdrv_dirty_iter_free(s->dbi);
1070
1071 if (need_drain) {
1072 s->in_drain = true;
1073 bdrv_drained_begin(bs);
1074 }
1075
1076 return ret;
1077}
1078
1079static void mirror_complete(Job *job, Error **errp)
1080{
1081 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1082 BlockDriverState *target;
1083
1084 target = blk_bs(s->target);
1085
1086 if (!s->synced) {
1087 error_setg(errp, "The active block job '%s' cannot be completed",
1088 job->id);
1089 return;
1090 }
1091
1092 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
1093 int ret;
1094
1095 assert(!target->backing);
1096 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
1097 if (ret < 0) {
1098 return;
1099 }
1100 }
1101
1102
1103 if (s->replaces) {
1104 AioContext *replace_aio_context;
1105
1106 s->to_replace = bdrv_find_node(s->replaces);
1107 if (!s->to_replace) {
1108 error_setg(errp, "Node name '%s' not found", s->replaces);
1109 return;
1110 }
1111
1112 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1113 aio_context_acquire(replace_aio_context);
1114
1115
1116
1117
1118
1119 error_setg(&s->replace_blocker,
1120 "block device is in use by block-job-complete");
1121 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1122 bdrv_ref(s->to_replace);
1123
1124 aio_context_release(replace_aio_context);
1125 }
1126
1127 s->should_complete = true;
1128 job_enter(job);
1129}
1130
1131static void coroutine_fn mirror_pause(Job *job)
1132{
1133 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1134
1135 mirror_wait_for_all_io(s);
1136}
1137
1138static bool mirror_drained_poll(BlockJob *job)
1139{
1140 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1141
1142
1143
1144
1145
1146
1147 if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
1148 return true;
1149 }
1150
1151 return !!s->in_flight;
1152}
1153
1154static const BlockJobDriver mirror_job_driver = {
1155 .job_driver = {
1156 .instance_size = sizeof(MirrorBlockJob),
1157 .job_type = JOB_TYPE_MIRROR,
1158 .free = block_job_free,
1159 .user_resume = block_job_user_resume,
1160 .run = mirror_run,
1161 .prepare = mirror_prepare,
1162 .abort = mirror_abort,
1163 .pause = mirror_pause,
1164 .complete = mirror_complete,
1165 },
1166 .drained_poll = mirror_drained_poll,
1167};
1168
1169static const BlockJobDriver commit_active_job_driver = {
1170 .job_driver = {
1171 .instance_size = sizeof(MirrorBlockJob),
1172 .job_type = JOB_TYPE_COMMIT,
1173 .free = block_job_free,
1174 .user_resume = block_job_user_resume,
1175 .run = mirror_run,
1176 .prepare = mirror_prepare,
1177 .abort = mirror_abort,
1178 .pause = mirror_pause,
1179 .complete = mirror_complete,
1180 },
1181 .drained_poll = mirror_drained_poll,
1182};
1183
1184static void coroutine_fn
1185do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1186 uint64_t offset, uint64_t bytes,
1187 QEMUIOVector *qiov, int flags)
1188{
1189 int ret;
1190 size_t qiov_offset = 0;
1191 int64_t bitmap_offset, bitmap_end;
1192
1193 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1194 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1195 {
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1214 if (bytes <= qiov_offset) {
1215
1216 return;
1217 }
1218 offset += qiov_offset;
1219 bytes -= qiov_offset;
1220 }
1221
1222 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1223 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1224 {
1225 uint64_t tail = (offset + bytes) % job->granularity;
1226
1227 if (bytes <= tail) {
1228
1229 return;
1230 }
1231 bytes -= tail;
1232 }
1233
1234
1235
1236
1237
1238 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1239 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1240 if (bitmap_offset < bitmap_end) {
1241 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1242 bitmap_end - bitmap_offset);
1243 }
1244
1245 job_progress_increase_remaining(&job->common.job, bytes);
1246
1247 switch (method) {
1248 case MIRROR_METHOD_COPY:
1249 ret = blk_co_pwritev_part(job->target, offset, bytes,
1250 qiov, qiov_offset, flags);
1251 break;
1252
1253 case MIRROR_METHOD_ZERO:
1254 assert(!qiov);
1255 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1256 break;
1257
1258 case MIRROR_METHOD_DISCARD:
1259 assert(!qiov);
1260 ret = blk_co_pdiscard(job->target, offset, bytes);
1261 break;
1262
1263 default:
1264 abort();
1265 }
1266
1267 if (ret >= 0) {
1268 job_progress_update(&job->common.job, bytes);
1269 } else {
1270 BlockErrorAction action;
1271
1272
1273
1274
1275
1276
1277
1278 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1279 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1280 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1281 bitmap_end - bitmap_offset);
1282 job->actively_synced = false;
1283
1284 action = mirror_error_action(job, false, -ret);
1285 if (action == BLOCK_ERROR_ACTION_REPORT) {
1286 if (!job->ret) {
1287 job->ret = ret;
1288 }
1289 }
1290 }
1291}
1292
1293static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1294 uint64_t offset,
1295 uint64_t bytes)
1296{
1297 MirrorOp *op;
1298 uint64_t start_chunk = offset / s->granularity;
1299 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1300
1301 op = g_new(MirrorOp, 1);
1302 *op = (MirrorOp){
1303 .s = s,
1304 .offset = offset,
1305 .bytes = bytes,
1306 .is_active_write = true,
1307 };
1308 qemu_co_queue_init(&op->waiting_requests);
1309 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1310
1311 s->in_active_write_counter++;
1312
1313 mirror_wait_on_conflicts(op, s, offset, bytes);
1314
1315 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1316
1317 return op;
1318}
1319
1320static void coroutine_fn active_write_settle(MirrorOp *op)
1321{
1322 uint64_t start_chunk = op->offset / op->s->granularity;
1323 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1324 op->s->granularity);
1325
1326 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1327 BdrvChild *source = op->s->mirror_top_bs->backing;
1328
1329 if (QLIST_FIRST(&source->bs->parents) == source &&
1330 QLIST_NEXT(source, next_parent) == NULL)
1331 {
1332
1333
1334
1335
1336 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1337 }
1338 }
1339 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1340 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1341 qemu_co_queue_restart_all(&op->waiting_requests);
1342 g_free(op);
1343}
1344
1345static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1346 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1347{
1348 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1349}
1350
1351static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1352 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1353 int flags)
1354{
1355 MirrorOp *op = NULL;
1356 MirrorBDSOpaque *s = bs->opaque;
1357 int ret = 0;
1358 bool copy_to_target;
1359
1360 copy_to_target = s->job->ret >= 0 &&
1361 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1362
1363 if (copy_to_target) {
1364 op = active_write_prepare(s->job, offset, bytes);
1365 }
1366
1367 switch (method) {
1368 case MIRROR_METHOD_COPY:
1369 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1370 break;
1371
1372 case MIRROR_METHOD_ZERO:
1373 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1374 break;
1375
1376 case MIRROR_METHOD_DISCARD:
1377 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1378 break;
1379
1380 default:
1381 abort();
1382 }
1383
1384 if (ret < 0) {
1385 goto out;
1386 }
1387
1388 if (copy_to_target) {
1389 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1390 }
1391
1392out:
1393 if (copy_to_target) {
1394 active_write_settle(op);
1395 }
1396 return ret;
1397}
1398
1399static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1400 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1401{
1402 MirrorBDSOpaque *s = bs->opaque;
1403 QEMUIOVector bounce_qiov;
1404 void *bounce_buf;
1405 int ret = 0;
1406 bool copy_to_target;
1407
1408 copy_to_target = s->job->ret >= 0 &&
1409 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1410
1411 if (copy_to_target) {
1412
1413
1414
1415
1416 bounce_buf = qemu_blockalign(bs, bytes);
1417 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1418
1419 qemu_iovec_init(&bounce_qiov, 1);
1420 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1421 qiov = &bounce_qiov;
1422 }
1423
1424 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1425 flags);
1426
1427 if (copy_to_target) {
1428 qemu_iovec_destroy(&bounce_qiov);
1429 qemu_vfree(bounce_buf);
1430 }
1431
1432 return ret;
1433}
1434
1435static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1436{
1437 if (bs->backing == NULL) {
1438
1439 return 0;
1440 }
1441 return bdrv_co_flush(bs->backing->bs);
1442}
1443
1444static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1445 int64_t offset, int bytes, BdrvRequestFlags flags)
1446{
1447 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1448 flags);
1449}
1450
1451static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1452 int64_t offset, int bytes)
1453{
1454 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1455 NULL, 0);
1456}
1457
1458static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1459{
1460 if (bs->backing == NULL) {
1461
1462
1463 return;
1464 }
1465 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1466 bs->backing->bs->filename);
1467}
1468
1469static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1470 const BdrvChildRole *role,
1471 BlockReopenQueue *reopen_queue,
1472 uint64_t perm, uint64_t shared,
1473 uint64_t *nperm, uint64_t *nshared)
1474{
1475 MirrorBDSOpaque *s = bs->opaque;
1476
1477 if (s->stop) {
1478
1479
1480
1481
1482 *nperm = 0;
1483 *nshared = BLK_PERM_ALL;
1484 return;
1485 }
1486
1487
1488 *nperm = 0;
1489 if (perm & BLK_PERM_WRITE) {
1490 *nperm |= BLK_PERM_WRITE;
1491 }
1492
1493 *nshared = BLK_PERM_ALL;
1494}
1495
1496
1497
1498static BlockDriver bdrv_mirror_top = {
1499 .format_name = "mirror_top",
1500 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1501 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1502 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1503 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1504 .bdrv_co_flush = bdrv_mirror_top_flush,
1505 .bdrv_co_block_status = bdrv_co_block_status_from_backing,
1506 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1507 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1508};
1509
1510static BlockJob *mirror_start_job(
1511 const char *job_id, BlockDriverState *bs,
1512 int creation_flags, BlockDriverState *target,
1513 const char *replaces, int64_t speed,
1514 uint32_t granularity, int64_t buf_size,
1515 BlockMirrorBackingMode backing_mode,
1516 bool zero_target,
1517 BlockdevOnError on_source_error,
1518 BlockdevOnError on_target_error,
1519 bool unmap,
1520 BlockCompletionFunc *cb,
1521 void *opaque,
1522 const BlockJobDriver *driver,
1523 bool is_none_mode, BlockDriverState *base,
1524 bool auto_complete, const char *filter_node_name,
1525 bool is_mirror, MirrorCopyMode copy_mode,
1526 Error **errp)
1527{
1528 MirrorBlockJob *s;
1529 MirrorBDSOpaque *bs_opaque;
1530 BlockDriverState *mirror_top_bs;
1531 bool target_graph_mod;
1532 bool target_is_backing;
1533 Error *local_err = NULL;
1534 int ret;
1535
1536 if (granularity == 0) {
1537 granularity = bdrv_get_default_bitmap_granularity(target);
1538 }
1539
1540 assert(is_power_of_2(granularity));
1541
1542 if (buf_size < 0) {
1543 error_setg(errp, "Invalid parameter 'buf-size'");
1544 return NULL;
1545 }
1546
1547 if (buf_size == 0) {
1548 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1549 }
1550
1551 if (bs == target) {
1552 error_setg(errp, "Can't mirror node into itself");
1553 return NULL;
1554 }
1555
1556
1557
1558
1559 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1560 BDRV_O_RDWR, errp);
1561 if (mirror_top_bs == NULL) {
1562 return NULL;
1563 }
1564 if (!filter_node_name) {
1565 mirror_top_bs->implicit = true;
1566 }
1567
1568
1569 mirror_top_bs->never_freeze = true;
1570
1571 mirror_top_bs->total_sectors = bs->total_sectors;
1572 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1573 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1574 BDRV_REQ_NO_FALLBACK;
1575 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1576 mirror_top_bs->opaque = bs_opaque;
1577
1578
1579
1580 bdrv_ref(mirror_top_bs);
1581 bdrv_drained_begin(bs);
1582 bdrv_append(mirror_top_bs, bs, &local_err);
1583 bdrv_drained_end(bs);
1584
1585 if (local_err) {
1586 bdrv_unref(mirror_top_bs);
1587 error_propagate(errp, local_err);
1588 return NULL;
1589 }
1590
1591
1592 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1593 BLK_PERM_CONSISTENT_READ,
1594 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1595 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1596 creation_flags, cb, opaque, errp);
1597 if (!s) {
1598 goto fail;
1599 }
1600 bs_opaque->job = s;
1601
1602
1603 bdrv_unref(mirror_top_bs);
1604
1605 s->mirror_top_bs = mirror_top_bs;
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615 target_is_backing = bdrv_chain_contains(bs, target);
1616 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1617 s->target = blk_new(s->common.job.aio_context,
1618 BLK_PERM_WRITE | BLK_PERM_RESIZE |
1619 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1620 BLK_PERM_WRITE_UNCHANGED |
1621 (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1622 BLK_PERM_WRITE |
1623 BLK_PERM_GRAPH_MOD : 0));
1624 ret = blk_insert_bs(s->target, target, errp);
1625 if (ret < 0) {
1626 goto fail;
1627 }
1628 if (is_mirror) {
1629
1630
1631
1632
1633
1634
1635 blk_set_force_allow_inactivate(s->target);
1636 }
1637 blk_set_allow_aio_context_change(s->target, true);
1638 blk_set_disable_request_queuing(s->target, true);
1639
1640 s->replaces = g_strdup(replaces);
1641 s->on_source_error = on_source_error;
1642 s->on_target_error = on_target_error;
1643 s->is_none_mode = is_none_mode;
1644 s->backing_mode = backing_mode;
1645 s->zero_target = zero_target;
1646 s->copy_mode = copy_mode;
1647 s->base = base;
1648 s->granularity = granularity;
1649 s->buf_size = ROUND_UP(buf_size, granularity);
1650 s->unmap = unmap;
1651 if (auto_complete) {
1652 s->should_complete = true;
1653 }
1654
1655 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1656 if (!s->dirty_bitmap) {
1657 goto fail;
1658 }
1659 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1660 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1661 }
1662
1663 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1664 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1665 BLK_PERM_CONSISTENT_READ,
1666 errp);
1667 if (ret < 0) {
1668 goto fail;
1669 }
1670
1671
1672 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1673 &error_abort);
1674
1675
1676
1677 if (target_is_backing) {
1678 BlockDriverState *iter;
1679 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1680
1681
1682
1683
1684 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1685 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1686 errp);
1687 if (ret < 0) {
1688 goto fail;
1689 }
1690 }
1691
1692 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1693 goto fail;
1694 }
1695 }
1696
1697 QTAILQ_INIT(&s->ops_in_flight);
1698
1699 trace_mirror_start(bs, s, opaque);
1700 job_start(&s->common.job);
1701
1702 return &s->common;
1703
1704fail:
1705 if (s) {
1706
1707
1708 bdrv_ref(mirror_top_bs);
1709
1710 g_free(s->replaces);
1711 blk_unref(s->target);
1712 bs_opaque->job = NULL;
1713 if (s->dirty_bitmap) {
1714 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1715 }
1716 job_early_fail(&s->common.job);
1717 }
1718
1719 bs_opaque->stop = true;
1720 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1721 &error_abort);
1722 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1723
1724 bdrv_unref(mirror_top_bs);
1725
1726 return NULL;
1727}
1728
1729void mirror_start(const char *job_id, BlockDriverState *bs,
1730 BlockDriverState *target, const char *replaces,
1731 int creation_flags, int64_t speed,
1732 uint32_t granularity, int64_t buf_size,
1733 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1734 bool zero_target,
1735 BlockdevOnError on_source_error,
1736 BlockdevOnError on_target_error,
1737 bool unmap, const char *filter_node_name,
1738 MirrorCopyMode copy_mode, Error **errp)
1739{
1740 bool is_none_mode;
1741 BlockDriverState *base;
1742
1743 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1744 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1745 error_setg(errp, "Sync mode '%s' not supported",
1746 MirrorSyncMode_str(mode));
1747 return;
1748 }
1749 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1750 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1751 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1752 speed, granularity, buf_size, backing_mode, zero_target,
1753 on_source_error, on_target_error, unmap, NULL, NULL,
1754 &mirror_job_driver, is_none_mode, base, false,
1755 filter_node_name, true, copy_mode, errp);
1756}
1757
1758BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1759 BlockDriverState *base, int creation_flags,
1760 int64_t speed, BlockdevOnError on_error,
1761 const char *filter_node_name,
1762 BlockCompletionFunc *cb, void *opaque,
1763 bool auto_complete, Error **errp)
1764{
1765 bool base_read_only;
1766 Error *local_err = NULL;
1767 BlockJob *ret;
1768
1769 base_read_only = bdrv_is_read_only(base);
1770
1771 if (base_read_only) {
1772 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1773 return NULL;
1774 }
1775 }
1776
1777 ret = mirror_start_job(
1778 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1779 MIRROR_LEAVE_BACKING_CHAIN, false,
1780 on_error, on_error, true, cb, opaque,
1781 &commit_active_job_driver, false, base, auto_complete,
1782 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1783 &local_err);
1784 if (local_err) {
1785 error_propagate(errp, local_err);
1786 goto error_restore_flags;
1787 }
1788
1789 return ret;
1790
1791error_restore_flags:
1792
1793
1794 if (base_read_only) {
1795 bdrv_reopen_set_read_only(base, true, NULL);
1796 }
1797 return NULL;
1798}
1799