1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/cutils.h"
16#include "qemu/coroutine.h"
17#include "qemu/range.h"
18#include "trace.h"
19#include "block/blockjob_int.h"
20#include "block/block_int.h"
21#include "sysemu/block-backend.h"
22#include "qapi/error.h"
23#include "qapi/qmp/qerror.h"
24#include "qemu/ratelimit.h"
25#include "qemu/bitmap.h"
26
27#define MAX_IN_FLIGHT 16
28#define MAX_IO_BYTES (1 << 20)
29#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30
31
32
33
34typedef struct MirrorBuffer {
35 QSIMPLEQ_ENTRY(MirrorBuffer) next;
36} MirrorBuffer;
37
38typedef struct MirrorOp MirrorOp;
39
40typedef struct MirrorBlockJob {
41 BlockJob common;
42 BlockBackend *target;
43 BlockDriverState *mirror_top_bs;
44 BlockDriverState *base;
45
46
47 char *replaces;
48
49 BlockDriverState *to_replace;
50
51 Error *replace_blocker;
52 bool is_none_mode;
53 BlockMirrorBackingMode backing_mode;
54 MirrorCopyMode copy_mode;
55 BlockdevOnError on_source_error, on_target_error;
56 bool synced;
57
58
59 bool actively_synced;
60 bool should_complete;
61 int64_t granularity;
62 size_t buf_size;
63 int64_t bdev_length;
64 unsigned long *cow_bitmap;
65 BdrvDirtyBitmap *dirty_bitmap;
66 BdrvDirtyBitmapIter *dbi;
67 uint8_t *buf;
68 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
69 int buf_free_count;
70
71 uint64_t last_pause_ns;
72 unsigned long *in_flight_bitmap;
73 int in_flight;
74 int64_t bytes_in_flight;
75 QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight;
76 int ret;
77 bool unmap;
78 int target_cluster_size;
79 int max_iov;
80 bool initial_zeroing_ongoing;
81 int in_active_write_counter;
82} MirrorBlockJob;
83
84typedef struct MirrorBDSOpaque {
85 MirrorBlockJob *job;
86} MirrorBDSOpaque;
87
88struct MirrorOp {
89 MirrorBlockJob *s;
90 QEMUIOVector qiov;
91 int64_t offset;
92 uint64_t bytes;
93
94
95
96 int64_t *bytes_handled;
97
98 bool is_pseudo_op;
99 bool is_active_write;
100 CoQueue waiting_requests;
101
102 QTAILQ_ENTRY(MirrorOp) next;
103};
104
105typedef enum MirrorMethod {
106 MIRROR_METHOD_COPY,
107 MIRROR_METHOD_ZERO,
108 MIRROR_METHOD_DISCARD,
109} MirrorMethod;
110
111static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
112 int error)
113{
114 s->synced = false;
115 s->actively_synced = false;
116 if (read) {
117 return block_job_error_action(&s->common, s->on_source_error,
118 true, error);
119 } else {
120 return block_job_error_action(&s->common, s->on_target_error,
121 false, error);
122 }
123}
124
125static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
126 MirrorBlockJob *s,
127 uint64_t offset,
128 uint64_t bytes)
129{
130 uint64_t self_start_chunk = offset / s->granularity;
131 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
132 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
133
134 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
135 self_start_chunk) < self_end_chunk &&
136 s->ret >= 0)
137 {
138 MirrorOp *op;
139
140 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
141 uint64_t op_start_chunk = op->offset / s->granularity;
142 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
143 s->granularity) -
144 op_start_chunk;
145
146 if (op == self) {
147 continue;
148 }
149
150 if (ranges_overlap(self_start_chunk, self_nb_chunks,
151 op_start_chunk, op_nb_chunks))
152 {
153 qemu_co_queue_wait(&op->waiting_requests, NULL);
154 break;
155 }
156 }
157 }
158}
159
160static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
161{
162 MirrorBlockJob *s = op->s;
163 struct iovec *iov;
164 int64_t chunk_num;
165 int i, nb_chunks;
166
167 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
168
169 s->in_flight--;
170 s->bytes_in_flight -= op->bytes;
171 iov = op->qiov.iov;
172 for (i = 0; i < op->qiov.niov; i++) {
173 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
174 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
175 s->buf_free_count++;
176 }
177
178 chunk_num = op->offset / s->granularity;
179 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
180
181 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
182 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
183 if (ret >= 0) {
184 if (s->cow_bitmap) {
185 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
186 }
187 if (!s->initial_zeroing_ongoing) {
188 job_progress_update(&s->common.job, op->bytes);
189 }
190 }
191 qemu_iovec_destroy(&op->qiov);
192
193 qemu_co_queue_restart_all(&op->waiting_requests);
194 g_free(op);
195}
196
197static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
198{
199 MirrorBlockJob *s = op->s;
200
201 if (ret < 0) {
202 BlockErrorAction action;
203
204 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
205 action = mirror_error_action(s, false, -ret);
206 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
207 s->ret = ret;
208 }
209 }
210
211 mirror_iteration_done(op, ret);
212}
213
214static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
215{
216 MirrorBlockJob *s = op->s;
217
218 if (ret < 0) {
219 BlockErrorAction action;
220
221 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
222 action = mirror_error_action(s, true, -ret);
223 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
224 s->ret = ret;
225 }
226
227 mirror_iteration_done(op, ret);
228 return;
229 }
230
231 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
232 mirror_write_complete(op, ret);
233}
234
235
236static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
237 int64_t offset,
238 int64_t bytes)
239{
240 return MIN(bytes, s->bdev_length - offset);
241}
242
243
244
245static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
246 uint64_t *bytes)
247{
248 bool need_cow;
249 int ret = 0;
250 int64_t align_offset = *offset;
251 int64_t align_bytes = *bytes;
252 int max_bytes = s->granularity * s->max_iov;
253
254 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
255 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
256 s->cow_bitmap);
257 if (need_cow) {
258 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
259 &align_offset, &align_bytes);
260 }
261
262 if (align_bytes > max_bytes) {
263 align_bytes = max_bytes;
264 if (need_cow) {
265 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
266 }
267 }
268
269
270 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
271
272 ret = align_offset + align_bytes - (*offset + *bytes);
273 *offset = align_offset;
274 *bytes = align_bytes;
275 assert(ret >= 0);
276 return ret;
277}
278
279static inline void mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
280{
281 MirrorOp *op;
282
283 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
284
285
286
287
288
289 if (!op->is_pseudo_op && op->is_active_write == active) {
290 qemu_co_queue_wait(&op->waiting_requests, NULL);
291 return;
292 }
293 }
294 abort();
295}
296
297static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
298{
299
300 mirror_wait_for_any_operation(s, false);
301}
302
303
304
305
306
307
308
309
310
311static void coroutine_fn mirror_co_read(void *opaque)
312{
313 MirrorOp *op = opaque;
314 MirrorBlockJob *s = op->s;
315 int nb_chunks;
316 uint64_t ret;
317 uint64_t max_bytes;
318
319 max_bytes = s->granularity * s->max_iov;
320
321
322 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
323 assert(op->bytes);
324 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
325 *op->bytes_handled = op->bytes;
326
327 if (s->cow_bitmap) {
328 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
329 }
330
331 assert(*op->bytes_handled <= UINT_MAX);
332 assert(op->bytes <= s->buf_size);
333
334
335
336 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
337
338 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
339 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
340
341 while (s->buf_free_count < nb_chunks) {
342 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
343 mirror_wait_for_free_in_flight_slot(s);
344 }
345
346
347
348
349 qemu_iovec_init(&op->qiov, nb_chunks);
350 while (nb_chunks-- > 0) {
351 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
352 size_t remaining = op->bytes - op->qiov.size;
353
354 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
355 s->buf_free_count--;
356 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
357 }
358
359
360 s->in_flight++;
361 s->bytes_in_flight += op->bytes;
362 trace_mirror_one_iteration(s, op->offset, op->bytes);
363
364 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
365 &op->qiov, 0);
366 mirror_read_complete(op, ret);
367}
368
369static void coroutine_fn mirror_co_zero(void *opaque)
370{
371 MirrorOp *op = opaque;
372 int ret;
373
374 op->s->in_flight++;
375 op->s->bytes_in_flight += op->bytes;
376 *op->bytes_handled = op->bytes;
377
378 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
379 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
380 mirror_write_complete(op, ret);
381}
382
383static void coroutine_fn mirror_co_discard(void *opaque)
384{
385 MirrorOp *op = opaque;
386 int ret;
387
388 op->s->in_flight++;
389 op->s->bytes_in_flight += op->bytes;
390 *op->bytes_handled = op->bytes;
391
392 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
393 mirror_write_complete(op, ret);
394}
395
396static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
397 unsigned bytes, MirrorMethod mirror_method)
398{
399 MirrorOp *op;
400 Coroutine *co;
401 int64_t bytes_handled = -1;
402
403 op = g_new(MirrorOp, 1);
404 *op = (MirrorOp){
405 .s = s,
406 .offset = offset,
407 .bytes = bytes,
408 .bytes_handled = &bytes_handled,
409 };
410 qemu_co_queue_init(&op->waiting_requests);
411
412 switch (mirror_method) {
413 case MIRROR_METHOD_COPY:
414 co = qemu_coroutine_create(mirror_co_read, op);
415 break;
416 case MIRROR_METHOD_ZERO:
417 co = qemu_coroutine_create(mirror_co_zero, op);
418 break;
419 case MIRROR_METHOD_DISCARD:
420 co = qemu_coroutine_create(mirror_co_discard, op);
421 break;
422 default:
423 abort();
424 }
425
426 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
427 qemu_coroutine_enter(co);
428
429
430
431
432 assert(bytes_handled >= 0);
433
434
435
436
437 assert(bytes_handled <= UINT_MAX);
438 return bytes_handled;
439}
440
441static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
442{
443 BlockDriverState *source = s->mirror_top_bs->backing->bs;
444 MirrorOp *pseudo_op;
445 int64_t offset;
446 uint64_t delay_ns = 0, ret = 0;
447
448 int nb_chunks = 1;
449 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
450 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
451
452 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
453 offset = bdrv_dirty_iter_next(s->dbi);
454 if (offset < 0) {
455 bdrv_set_dirty_iter(s->dbi, 0);
456 offset = bdrv_dirty_iter_next(s->dbi);
457 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
458 assert(offset >= 0);
459 }
460 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
461
462 mirror_wait_on_conflicts(NULL, s, offset, 1);
463
464 job_pause_point(&s->common.job);
465
466
467
468 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
469 while (nb_chunks * s->granularity < s->buf_size) {
470 int64_t next_dirty;
471 int64_t next_offset = offset + nb_chunks * s->granularity;
472 int64_t next_chunk = next_offset / s->granularity;
473 if (next_offset >= s->bdev_length ||
474 !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
475 break;
476 }
477 if (test_bit(next_chunk, s->in_flight_bitmap)) {
478 break;
479 }
480
481 next_dirty = bdrv_dirty_iter_next(s->dbi);
482 if (next_dirty > next_offset || next_dirty < 0) {
483
484 bdrv_set_dirty_iter(s->dbi, next_offset);
485 next_dirty = bdrv_dirty_iter_next(s->dbi);
486 }
487 assert(next_dirty == next_offset);
488 nb_chunks++;
489 }
490
491
492
493
494
495 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
496 nb_chunks * s->granularity);
497 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
498
499
500
501
502
503
504
505 pseudo_op = g_new(MirrorOp, 1);
506 *pseudo_op = (MirrorOp){
507 .offset = offset,
508 .bytes = nb_chunks * s->granularity,
509 .is_pseudo_op = true,
510 };
511 qemu_co_queue_init(&pseudo_op->waiting_requests);
512 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
513
514 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
515 while (nb_chunks > 0 && offset < s->bdev_length) {
516 int ret;
517 int64_t io_bytes;
518 int64_t io_bytes_acct;
519 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
520
521 assert(!(offset % s->granularity));
522 ret = bdrv_block_status_above(source, NULL, offset,
523 nb_chunks * s->granularity,
524 &io_bytes, NULL, NULL);
525 if (ret < 0) {
526 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
527 } else if (ret & BDRV_BLOCK_DATA) {
528 io_bytes = MIN(io_bytes, max_io_bytes);
529 }
530
531 io_bytes -= io_bytes % s->granularity;
532 if (io_bytes < s->granularity) {
533 io_bytes = s->granularity;
534 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
535 int64_t target_offset;
536 int64_t target_bytes;
537 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
538 &target_offset, &target_bytes);
539 if (target_offset == offset &&
540 target_bytes == io_bytes) {
541 mirror_method = ret & BDRV_BLOCK_ZERO ?
542 MIRROR_METHOD_ZERO :
543 MIRROR_METHOD_DISCARD;
544 }
545 }
546
547 while (s->in_flight >= MAX_IN_FLIGHT) {
548 trace_mirror_yield_in_flight(s, offset, s->in_flight);
549 mirror_wait_for_free_in_flight_slot(s);
550 }
551
552 if (s->ret < 0) {
553 ret = 0;
554 goto fail;
555 }
556
557 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
558 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
559 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
560 io_bytes_acct = 0;
561 } else {
562 io_bytes_acct = io_bytes;
563 }
564 assert(io_bytes);
565 offset += io_bytes;
566 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
567 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
568 }
569
570 ret = delay_ns;
571fail:
572 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
573 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
574 g_free(pseudo_op);
575
576 return ret;
577}
578
579static void mirror_free_init(MirrorBlockJob *s)
580{
581 int granularity = s->granularity;
582 size_t buf_size = s->buf_size;
583 uint8_t *buf = s->buf;
584
585 assert(s->buf_free_count == 0);
586 QSIMPLEQ_INIT(&s->buf_free);
587 while (buf_size != 0) {
588 MirrorBuffer *cur = (MirrorBuffer *)buf;
589 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
590 s->buf_free_count++;
591 buf_size -= granularity;
592 buf += granularity;
593 }
594}
595
596
597
598
599
600static void mirror_wait_for_all_io(MirrorBlockJob *s)
601{
602 while (s->in_flight > 0) {
603 mirror_wait_for_free_in_flight_slot(s);
604 }
605}
606
607typedef struct {
608 int ret;
609} MirrorExitData;
610
611static void mirror_exit(Job *job, void *opaque)
612{
613 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
614 BlockJob *bjob = &s->common;
615 MirrorExitData *data = opaque;
616 MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
617 AioContext *replace_aio_context = NULL;
618 BlockDriverState *src = s->mirror_top_bs->backing->bs;
619 BlockDriverState *target_bs = blk_bs(s->target);
620 BlockDriverState *mirror_top_bs = s->mirror_top_bs;
621 Error *local_err = NULL;
622
623 bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
624
625
626
627 bdrv_ref(src);
628 bdrv_ref(mirror_top_bs);
629 bdrv_ref(target_bs);
630
631
632
633
634
635
636
637
638 blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
639 blk_unref(s->target);
640 s->target = NULL;
641
642
643
644 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
645 &error_abort);
646 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
647 BlockDriverState *backing = s->is_none_mode ? src : s->base;
648 if (backing_bs(target_bs) != backing) {
649 bdrv_set_backing_hd(target_bs, backing, &local_err);
650 if (local_err) {
651 error_report_err(local_err);
652 data->ret = -EPERM;
653 }
654 }
655 }
656
657 if (s->to_replace) {
658 replace_aio_context = bdrv_get_aio_context(s->to_replace);
659 aio_context_acquire(replace_aio_context);
660 }
661
662 if (s->should_complete && data->ret == 0) {
663 BlockDriverState *to_replace = src;
664 if (s->to_replace) {
665 to_replace = s->to_replace;
666 }
667
668 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
669 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
670 }
671
672
673
674 bdrv_drained_begin(target_bs);
675 bdrv_replace_node(to_replace, target_bs, &local_err);
676 bdrv_drained_end(target_bs);
677 if (local_err) {
678 error_report_err(local_err);
679 data->ret = -EPERM;
680 }
681 }
682 if (s->to_replace) {
683 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
684 error_free(s->replace_blocker);
685 bdrv_unref(s->to_replace);
686 }
687 if (replace_aio_context) {
688 aio_context_release(replace_aio_context);
689 }
690 g_free(s->replaces);
691 bdrv_unref(target_bs);
692
693
694
695
696
697 block_job_remove_all_bdrv(bjob);
698 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
699 &error_abort);
700 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
701
702
703
704
705 blk_remove_bs(bjob->blk);
706 blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
707 blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
708
709 bs_opaque->job = NULL;
710 job_completed(job, data->ret, NULL);
711
712 g_free(data);
713 bdrv_drained_end(src);
714 bdrv_unref(mirror_top_bs);
715 bdrv_unref(src);
716}
717
718static void mirror_throttle(MirrorBlockJob *s)
719{
720 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
721
722 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
723 s->last_pause_ns = now;
724 job_sleep_ns(&s->common.job, 0);
725 } else {
726 job_pause_point(&s->common.job);
727 }
728}
729
730static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
731{
732 int64_t offset;
733 BlockDriverState *base = s->base;
734 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
735 BlockDriverState *target_bs = blk_bs(s->target);
736 int ret;
737 int64_t count;
738
739 if (base == NULL && !bdrv_has_zero_init(target_bs)) {
740 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
741 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
742 return 0;
743 }
744
745 s->initial_zeroing_ongoing = true;
746 for (offset = 0; offset < s->bdev_length; ) {
747 int bytes = MIN(s->bdev_length - offset,
748 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
749
750 mirror_throttle(s);
751
752 if (job_is_cancelled(&s->common.job)) {
753 s->initial_zeroing_ongoing = false;
754 return 0;
755 }
756
757 if (s->in_flight >= MAX_IN_FLIGHT) {
758 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
759 s->in_flight);
760 mirror_wait_for_free_in_flight_slot(s);
761 continue;
762 }
763
764 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
765 offset += bytes;
766 }
767
768 mirror_wait_for_all_io(s);
769 s->initial_zeroing_ongoing = false;
770 }
771
772
773 for (offset = 0; offset < s->bdev_length; ) {
774
775 int bytes = MIN(s->bdev_length - offset,
776 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
777
778 mirror_throttle(s);
779
780 if (job_is_cancelled(&s->common.job)) {
781 return 0;
782 }
783
784 ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count);
785 if (ret < 0) {
786 return ret;
787 }
788
789 assert(count);
790 if (ret == 1) {
791 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
792 }
793 offset += count;
794 }
795 return 0;
796}
797
798
799
800
801static int mirror_flush(MirrorBlockJob *s)
802{
803 int ret = blk_flush(s->target);
804 if (ret < 0) {
805 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
806 s->ret = ret;
807 }
808 }
809 return ret;
810}
811
812static void coroutine_fn mirror_run(void *opaque)
813{
814 MirrorBlockJob *s = opaque;
815 MirrorExitData *data;
816 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
817 BlockDriverState *target_bs = blk_bs(s->target);
818 bool need_drain = true;
819 int64_t length;
820 BlockDriverInfo bdi;
821 char backing_filename[2];
822
823 int ret = 0;
824
825 if (job_is_cancelled(&s->common.job)) {
826 goto immediate_exit;
827 }
828
829 s->bdev_length = bdrv_getlength(bs);
830 if (s->bdev_length < 0) {
831 ret = s->bdev_length;
832 goto immediate_exit;
833 }
834
835
836
837 if (s->base == blk_bs(s->target)) {
838 int64_t base_length;
839
840 base_length = blk_getlength(s->target);
841 if (base_length < 0) {
842 ret = base_length;
843 goto immediate_exit;
844 }
845
846 if (s->bdev_length > base_length) {
847 ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
848 NULL);
849 if (ret < 0) {
850 goto immediate_exit;
851 }
852 }
853 }
854
855 if (s->bdev_length == 0) {
856
857 job_transition_to_ready(&s->common.job);
858 s->synced = true;
859 s->actively_synced = true;
860 while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
861 job_yield(&s->common.job);
862 }
863 s->common.job.cancelled = false;
864 goto immediate_exit;
865 }
866
867 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
868 s->in_flight_bitmap = bitmap_new(length);
869
870
871
872
873
874 bdrv_get_backing_filename(target_bs, backing_filename,
875 sizeof(backing_filename));
876 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
877 s->target_cluster_size = bdi.cluster_size;
878 } else {
879 s->target_cluster_size = BDRV_SECTOR_SIZE;
880 }
881 if (backing_filename[0] && !target_bs->backing &&
882 s->granularity < s->target_cluster_size) {
883 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
884 s->cow_bitmap = bitmap_new(length);
885 }
886 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
887
888 s->buf = qemu_try_blockalign(bs, s->buf_size);
889 if (s->buf == NULL) {
890 ret = -ENOMEM;
891 goto immediate_exit;
892 }
893
894 mirror_free_init(s);
895
896 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
897 if (!s->is_none_mode) {
898 ret = mirror_dirty_init(s);
899 if (ret < 0 || job_is_cancelled(&s->common.job)) {
900 goto immediate_exit;
901 }
902 }
903
904 assert(!s->dbi);
905 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
906 for (;;) {
907 uint64_t delay_ns = 0;
908 int64_t cnt, delta;
909 bool should_complete;
910
911
912
913 while (s->in_active_write_counter) {
914 mirror_wait_for_any_operation(s, true);
915 }
916
917 if (s->ret < 0) {
918 ret = s->ret;
919 goto immediate_exit;
920 }
921
922 job_pause_point(&s->common.job);
923
924 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
925
926
927
928 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
929
930
931
932
933
934 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
935 if (delta < BLOCK_JOB_SLICE_TIME &&
936 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
937 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
938 (cnt == 0 && s->in_flight > 0)) {
939 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
940 mirror_wait_for_free_in_flight_slot(s);
941 continue;
942 } else if (cnt != 0) {
943 delay_ns = mirror_iteration(s);
944 }
945 }
946
947 should_complete = false;
948 if (s->in_flight == 0 && cnt == 0) {
949 trace_mirror_before_flush(s);
950 if (!s->synced) {
951 if (mirror_flush(s) < 0) {
952
953 continue;
954 }
955
956
957
958
959
960 job_transition_to_ready(&s->common.job);
961 s->synced = true;
962 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
963 s->actively_synced = true;
964 }
965 }
966
967 should_complete = s->should_complete ||
968 job_is_cancelled(&s->common.job);
969 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
970 }
971
972 if (cnt == 0 && should_complete) {
973
974
975
976
977
978
979
980
981
982
983 trace_mirror_before_drain(s, cnt);
984
985 bdrv_drained_begin(bs);
986 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
987 if (cnt > 0 || mirror_flush(s) < 0) {
988 bdrv_drained_end(bs);
989 continue;
990 }
991
992
993
994
995 assert(QLIST_EMPTY(&bs->tracked_requests));
996 s->common.job.cancelled = false;
997 need_drain = false;
998 break;
999 }
1000
1001 ret = 0;
1002
1003 if (s->synced && !should_complete) {
1004 delay_ns = (s->in_flight == 0 &&
1005 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1006 }
1007 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
1008 job_sleep_ns(&s->common.job, delay_ns);
1009 if (job_is_cancelled(&s->common.job) &&
1010 (!s->synced || s->common.job.force_cancel))
1011 {
1012 break;
1013 }
1014 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1015 }
1016
1017immediate_exit:
1018 if (s->in_flight > 0) {
1019
1020
1021
1022
1023 assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
1024 job_is_cancelled(&s->common.job)));
1025 assert(need_drain);
1026 mirror_wait_for_all_io(s);
1027 }
1028
1029 assert(s->in_flight == 0);
1030 qemu_vfree(s->buf);
1031 g_free(s->cow_bitmap);
1032 g_free(s->in_flight_bitmap);
1033 bdrv_dirty_iter_free(s->dbi);
1034
1035 data = g_malloc(sizeof(*data));
1036 data->ret = ret;
1037
1038 if (need_drain) {
1039 bdrv_drained_begin(bs);
1040 }
1041 job_defer_to_main_loop(&s->common.job, mirror_exit, data);
1042}
1043
1044static void mirror_complete(Job *job, Error **errp)
1045{
1046 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1047 BlockDriverState *target;
1048
1049 target = blk_bs(s->target);
1050
1051 if (!s->synced) {
1052 error_setg(errp, "The active block job '%s' cannot be completed",
1053 job->id);
1054 return;
1055 }
1056
1057 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
1058 int ret;
1059
1060 assert(!target->backing);
1061 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
1062 if (ret < 0) {
1063 return;
1064 }
1065 }
1066
1067
1068 if (s->replaces) {
1069 AioContext *replace_aio_context;
1070
1071 s->to_replace = bdrv_find_node(s->replaces);
1072 if (!s->to_replace) {
1073 error_setg(errp, "Node name '%s' not found", s->replaces);
1074 return;
1075 }
1076
1077 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1078 aio_context_acquire(replace_aio_context);
1079
1080
1081
1082
1083
1084 error_setg(&s->replace_blocker,
1085 "block device is in use by block-job-complete");
1086 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1087 bdrv_ref(s->to_replace);
1088
1089 aio_context_release(replace_aio_context);
1090 }
1091
1092 s->should_complete = true;
1093 job_enter(job);
1094}
1095
1096static void mirror_pause(Job *job)
1097{
1098 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1099
1100 mirror_wait_for_all_io(s);
1101}
1102
1103static bool mirror_drained_poll(BlockJob *job)
1104{
1105 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1106 return !!s->in_flight;
1107}
1108
1109static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1110{
1111 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1112
1113 blk_set_aio_context(s->target, new_context);
1114}
1115
1116static void mirror_drain(BlockJob *job)
1117{
1118 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1119
1120
1121
1122
1123 if (s->target) {
1124 BlockBackend *target = s->target;
1125 blk_ref(target);
1126 blk_drain(target);
1127 blk_unref(target);
1128 }
1129}
1130
1131static const BlockJobDriver mirror_job_driver = {
1132 .job_driver = {
1133 .instance_size = sizeof(MirrorBlockJob),
1134 .job_type = JOB_TYPE_MIRROR,
1135 .free = block_job_free,
1136 .user_resume = block_job_user_resume,
1137 .drain = block_job_drain,
1138 .start = mirror_run,
1139 .pause = mirror_pause,
1140 .complete = mirror_complete,
1141 },
1142 .drained_poll = mirror_drained_poll,
1143 .attached_aio_context = mirror_attached_aio_context,
1144 .drain = mirror_drain,
1145};
1146
1147static const BlockJobDriver commit_active_job_driver = {
1148 .job_driver = {
1149 .instance_size = sizeof(MirrorBlockJob),
1150 .job_type = JOB_TYPE_COMMIT,
1151 .free = block_job_free,
1152 .user_resume = block_job_user_resume,
1153 .drain = block_job_drain,
1154 .start = mirror_run,
1155 .pause = mirror_pause,
1156 .complete = mirror_complete,
1157 },
1158 .drained_poll = mirror_drained_poll,
1159 .attached_aio_context = mirror_attached_aio_context,
1160 .drain = mirror_drain,
1161};
1162
1163static void do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1164 uint64_t offset, uint64_t bytes,
1165 QEMUIOVector *qiov, int flags)
1166{
1167 BdrvDirtyBitmapIter *iter;
1168 QEMUIOVector target_qiov;
1169 uint64_t dirty_offset;
1170 int dirty_bytes;
1171
1172 if (qiov) {
1173 qemu_iovec_init(&target_qiov, qiov->niov);
1174 }
1175
1176 iter = bdrv_dirty_iter_new(job->dirty_bitmap);
1177 bdrv_set_dirty_iter(iter, offset);
1178
1179 while (true) {
1180 bool valid_area;
1181 int ret;
1182
1183 bdrv_dirty_bitmap_lock(job->dirty_bitmap);
1184 valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes,
1185 &dirty_offset, &dirty_bytes);
1186 if (!valid_area) {
1187 bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1188 break;
1189 }
1190
1191 bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
1192 dirty_offset, dirty_bytes);
1193 bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1194
1195 job_progress_increase_remaining(&job->common.job, dirty_bytes);
1196
1197 assert(dirty_offset - offset <= SIZE_MAX);
1198 if (qiov) {
1199 qemu_iovec_reset(&target_qiov);
1200 qemu_iovec_concat(&target_qiov, qiov,
1201 dirty_offset - offset, dirty_bytes);
1202 }
1203
1204 switch (method) {
1205 case MIRROR_METHOD_COPY:
1206 ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
1207 qiov ? &target_qiov : NULL, flags);
1208 break;
1209
1210 case MIRROR_METHOD_ZERO:
1211 assert(!qiov);
1212 ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
1213 flags);
1214 break;
1215
1216 case MIRROR_METHOD_DISCARD:
1217 assert(!qiov);
1218 ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
1219 break;
1220
1221 default:
1222 abort();
1223 }
1224
1225 if (ret >= 0) {
1226 job_progress_update(&job->common.job, dirty_bytes);
1227 } else {
1228 BlockErrorAction action;
1229
1230 bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
1231 job->actively_synced = false;
1232
1233 action = mirror_error_action(job, false, -ret);
1234 if (action == BLOCK_ERROR_ACTION_REPORT) {
1235 if (!job->ret) {
1236 job->ret = ret;
1237 }
1238 break;
1239 }
1240 }
1241 }
1242
1243 bdrv_dirty_iter_free(iter);
1244 if (qiov) {
1245 qemu_iovec_destroy(&target_qiov);
1246 }
1247}
1248
1249static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1250 uint64_t offset,
1251 uint64_t bytes)
1252{
1253 MirrorOp *op;
1254 uint64_t start_chunk = offset / s->granularity;
1255 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1256
1257 op = g_new(MirrorOp, 1);
1258 *op = (MirrorOp){
1259 .s = s,
1260 .offset = offset,
1261 .bytes = bytes,
1262 .is_active_write = true,
1263 };
1264 qemu_co_queue_init(&op->waiting_requests);
1265 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1266
1267 s->in_active_write_counter++;
1268
1269 mirror_wait_on_conflicts(op, s, offset, bytes);
1270
1271 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1272
1273 return op;
1274}
1275
1276static void coroutine_fn active_write_settle(MirrorOp *op)
1277{
1278 uint64_t start_chunk = op->offset / op->s->granularity;
1279 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1280 op->s->granularity);
1281
1282 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1283 BdrvChild *source = op->s->mirror_top_bs->backing;
1284
1285 if (QLIST_FIRST(&source->bs->parents) == source &&
1286 QLIST_NEXT(source, next_parent) == NULL)
1287 {
1288
1289
1290
1291
1292 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1293 }
1294 }
1295 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1296 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1297 qemu_co_queue_restart_all(&op->waiting_requests);
1298 g_free(op);
1299}
1300
1301static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1302 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1303{
1304 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1305}
1306
1307static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1308 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1309 int flags)
1310{
1311 MirrorOp *op = NULL;
1312 MirrorBDSOpaque *s = bs->opaque;
1313 int ret = 0;
1314 bool copy_to_target;
1315
1316 copy_to_target = s->job->ret >= 0 &&
1317 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1318
1319 if (copy_to_target) {
1320 op = active_write_prepare(s->job, offset, bytes);
1321 }
1322
1323 switch (method) {
1324 case MIRROR_METHOD_COPY:
1325 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1326 break;
1327
1328 case MIRROR_METHOD_ZERO:
1329 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1330 break;
1331
1332 case MIRROR_METHOD_DISCARD:
1333 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1334 break;
1335
1336 default:
1337 abort();
1338 }
1339
1340 if (ret < 0) {
1341 goto out;
1342 }
1343
1344 if (copy_to_target) {
1345 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1346 }
1347
1348out:
1349 if (copy_to_target) {
1350 active_write_settle(op);
1351 }
1352 return ret;
1353}
1354
1355static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1356 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1357{
1358 MirrorBDSOpaque *s = bs->opaque;
1359 QEMUIOVector bounce_qiov;
1360 void *bounce_buf;
1361 int ret = 0;
1362 bool copy_to_target;
1363
1364 copy_to_target = s->job->ret >= 0 &&
1365 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1366
1367 if (copy_to_target) {
1368
1369
1370
1371
1372 bounce_buf = qemu_blockalign(bs, bytes);
1373 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1374
1375 qemu_iovec_init(&bounce_qiov, 1);
1376 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1377 qiov = &bounce_qiov;
1378 }
1379
1380 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1381 flags);
1382
1383 if (copy_to_target) {
1384 qemu_iovec_destroy(&bounce_qiov);
1385 qemu_vfree(bounce_buf);
1386 }
1387
1388 return ret;
1389}
1390
1391static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1392{
1393 if (bs->backing == NULL) {
1394
1395 return 0;
1396 }
1397 return bdrv_co_flush(bs->backing->bs);
1398}
1399
1400static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1401 int64_t offset, int bytes, BdrvRequestFlags flags)
1402{
1403 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1404 flags);
1405}
1406
1407static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1408 int64_t offset, int bytes)
1409{
1410 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1411 NULL, 0);
1412}
1413
1414static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1415{
1416 if (bs->backing == NULL) {
1417
1418
1419 return;
1420 }
1421 bdrv_refresh_filename(bs->backing->bs);
1422 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1423 bs->backing->bs->filename);
1424}
1425
1426static void bdrv_mirror_top_close(BlockDriverState *bs)
1427{
1428}
1429
1430static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1431 const BdrvChildRole *role,
1432 BlockReopenQueue *reopen_queue,
1433 uint64_t perm, uint64_t shared,
1434 uint64_t *nperm, uint64_t *nshared)
1435{
1436
1437 *nperm = 0;
1438 if (perm & BLK_PERM_WRITE) {
1439 *nperm |= BLK_PERM_WRITE;
1440 }
1441
1442 *nshared = BLK_PERM_ALL;
1443}
1444
1445
1446
1447static BlockDriver bdrv_mirror_top = {
1448 .format_name = "mirror_top",
1449 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1450 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1451 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1452 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1453 .bdrv_co_flush = bdrv_mirror_top_flush,
1454 .bdrv_co_block_status = bdrv_co_block_status_from_backing,
1455 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1456 .bdrv_close = bdrv_mirror_top_close,
1457 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1458};
1459
1460static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1461 int creation_flags, BlockDriverState *target,
1462 const char *replaces, int64_t speed,
1463 uint32_t granularity, int64_t buf_size,
1464 BlockMirrorBackingMode backing_mode,
1465 BlockdevOnError on_source_error,
1466 BlockdevOnError on_target_error,
1467 bool unmap,
1468 BlockCompletionFunc *cb,
1469 void *opaque,
1470 const BlockJobDriver *driver,
1471 bool is_none_mode, BlockDriverState *base,
1472 bool auto_complete, const char *filter_node_name,
1473 bool is_mirror, MirrorCopyMode copy_mode,
1474 Error **errp)
1475{
1476 MirrorBlockJob *s;
1477 MirrorBDSOpaque *bs_opaque;
1478 BlockDriverState *mirror_top_bs;
1479 bool target_graph_mod;
1480 bool target_is_backing;
1481 Error *local_err = NULL;
1482 int ret;
1483
1484 if (granularity == 0) {
1485 granularity = bdrv_get_default_bitmap_granularity(target);
1486 }
1487
1488 assert(is_power_of_2(granularity));
1489
1490 if (buf_size < 0) {
1491 error_setg(errp, "Invalid parameter 'buf-size'");
1492 return;
1493 }
1494
1495 if (buf_size == 0) {
1496 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1497 }
1498
1499 if (bs == target) {
1500 error_setg(errp, "Can't mirror node into itself");
1501 return;
1502 }
1503
1504
1505
1506
1507 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1508 BDRV_O_RDWR, errp);
1509 if (mirror_top_bs == NULL) {
1510 return;
1511 }
1512 if (!filter_node_name) {
1513 mirror_top_bs->implicit = true;
1514 }
1515 mirror_top_bs->total_sectors = bs->total_sectors;
1516 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1517 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
1518 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1519 mirror_top_bs->opaque = bs_opaque;
1520 bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1521
1522
1523
1524 bdrv_ref(mirror_top_bs);
1525 bdrv_drained_begin(bs);
1526 bdrv_append(mirror_top_bs, bs, &local_err);
1527 bdrv_drained_end(bs);
1528
1529 if (local_err) {
1530 bdrv_unref(mirror_top_bs);
1531 error_propagate(errp, local_err);
1532 return;
1533 }
1534
1535
1536 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1537 BLK_PERM_CONSISTENT_READ,
1538 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1539 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1540 creation_flags, cb, opaque, errp);
1541 if (!s) {
1542 goto fail;
1543 }
1544 bs_opaque->job = s;
1545
1546
1547 bdrv_unref(mirror_top_bs);
1548
1549 s->mirror_top_bs = mirror_top_bs;
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 target_is_backing = bdrv_chain_contains(bs, target);
1560 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1561 s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1562 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1563 BLK_PERM_WRITE_UNCHANGED |
1564 (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1565 BLK_PERM_WRITE |
1566 BLK_PERM_GRAPH_MOD : 0));
1567 ret = blk_insert_bs(s->target, target, errp);
1568 if (ret < 0) {
1569 goto fail;
1570 }
1571 if (is_mirror) {
1572
1573
1574
1575
1576
1577
1578 blk_set_force_allow_inactivate(s->target);
1579 }
1580
1581 s->replaces = g_strdup(replaces);
1582 s->on_source_error = on_source_error;
1583 s->on_target_error = on_target_error;
1584 s->is_none_mode = is_none_mode;
1585 s->backing_mode = backing_mode;
1586 s->copy_mode = copy_mode;
1587 s->base = base;
1588 s->granularity = granularity;
1589 s->buf_size = ROUND_UP(buf_size, granularity);
1590 s->unmap = unmap;
1591 if (auto_complete) {
1592 s->should_complete = true;
1593 }
1594
1595 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1596 if (!s->dirty_bitmap) {
1597 goto fail;
1598 }
1599
1600
1601 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1602 &error_abort);
1603
1604
1605
1606 if (target_is_backing) {
1607 BlockDriverState *iter;
1608 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1609
1610
1611
1612
1613 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1614 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1615 errp);
1616 if (ret < 0) {
1617 goto fail;
1618 }
1619 }
1620 }
1621
1622 QTAILQ_INIT(&s->ops_in_flight);
1623
1624 trace_mirror_start(bs, s, opaque);
1625 job_start(&s->common.job);
1626 return;
1627
1628fail:
1629 if (s) {
1630
1631
1632 bdrv_ref(mirror_top_bs);
1633
1634 g_free(s->replaces);
1635 blk_unref(s->target);
1636 bs_opaque->job = NULL;
1637 job_early_fail(&s->common.job);
1638 }
1639
1640 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1641 &error_abort);
1642 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1643
1644 bdrv_unref(mirror_top_bs);
1645}
1646
1647void mirror_start(const char *job_id, BlockDriverState *bs,
1648 BlockDriverState *target, const char *replaces,
1649 int64_t speed, uint32_t granularity, int64_t buf_size,
1650 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1651 BlockdevOnError on_source_error,
1652 BlockdevOnError on_target_error,
1653 bool unmap, const char *filter_node_name,
1654 MirrorCopyMode copy_mode, Error **errp)
1655{
1656 bool is_none_mode;
1657 BlockDriverState *base;
1658
1659 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1660 error_setg(errp, "Sync mode 'incremental' not supported");
1661 return;
1662 }
1663 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1664 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1665 mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
1666 speed, granularity, buf_size, backing_mode,
1667 on_source_error, on_target_error, unmap, NULL, NULL,
1668 &mirror_job_driver, is_none_mode, base, false,
1669 filter_node_name, true, copy_mode, errp);
1670}
1671
1672void commit_active_start(const char *job_id, BlockDriverState *bs,
1673 BlockDriverState *base, int creation_flags,
1674 int64_t speed, BlockdevOnError on_error,
1675 const char *filter_node_name,
1676 BlockCompletionFunc *cb, void *opaque,
1677 bool auto_complete, Error **errp)
1678{
1679 int orig_base_flags;
1680 Error *local_err = NULL;
1681
1682 orig_base_flags = bdrv_get_flags(base);
1683
1684 if (bdrv_reopen(base, bs->open_flags, errp)) {
1685 return;
1686 }
1687
1688 mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1689 MIRROR_LEAVE_BACKING_CHAIN,
1690 on_error, on_error, true, cb, opaque,
1691 &commit_active_job_driver, false, base, auto_complete,
1692 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1693 &local_err);
1694 if (local_err) {
1695 error_propagate(errp, local_err);
1696 goto error_restore_flags;
1697 }
1698
1699 return;
1700
1701error_restore_flags:
1702
1703
1704 bdrv_reopen(base, orig_base_flags, NULL);
1705 return;
1706}
1707