1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/cutils.h"
16#include "qemu/coroutine.h"
17#include "qemu/range.h"
18#include "trace.h"
19#include "block/blockjob_int.h"
20#include "block/block_int.h"
21#include "sysemu/block-backend.h"
22#include "qapi/error.h"
23#include "qapi/qmp/qerror.h"
24#include "qemu/ratelimit.h"
25#include "qemu/bitmap.h"
26
27#define MAX_IN_FLIGHT 16
28#define MAX_IO_BYTES (1 << 20)
29#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30
31
32
33
34typedef struct MirrorBuffer {
35 QSIMPLEQ_ENTRY(MirrorBuffer) next;
36} MirrorBuffer;
37
38typedef struct MirrorOp MirrorOp;
39
40typedef struct MirrorBlockJob {
41 BlockJob common;
42 BlockBackend *target;
43 BlockDriverState *mirror_top_bs;
44 BlockDriverState *base;
45 BlockDriverState *base_overlay;
46
47
48 char *replaces;
49
50 BlockDriverState *to_replace;
51
52 Error *replace_blocker;
53 bool is_none_mode;
54 BlockMirrorBackingMode backing_mode;
55
56 bool zero_target;
57 MirrorCopyMode copy_mode;
58 BlockdevOnError on_source_error, on_target_error;
59
60
61 bool actively_synced;
62 bool should_complete;
63 int64_t granularity;
64 size_t buf_size;
65 int64_t bdev_length;
66 unsigned long *cow_bitmap;
67 BdrvDirtyBitmap *dirty_bitmap;
68 BdrvDirtyBitmapIter *dbi;
69 uint8_t *buf;
70 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
71 int buf_free_count;
72
73 uint64_t last_pause_ns;
74 unsigned long *in_flight_bitmap;
75 int in_flight;
76 int64_t bytes_in_flight;
77 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
78 int ret;
79 bool unmap;
80 int target_cluster_size;
81 int max_iov;
82 bool initial_zeroing_ongoing;
83 int in_active_write_counter;
84 bool prepared;
85 bool in_drain;
86} MirrorBlockJob;
87
88typedef struct MirrorBDSOpaque {
89 MirrorBlockJob *job;
90 bool stop;
91 bool is_commit;
92} MirrorBDSOpaque;
93
94struct MirrorOp {
95 MirrorBlockJob *s;
96 QEMUIOVector qiov;
97 int64_t offset;
98 uint64_t bytes;
99
100
101
102 int64_t *bytes_handled;
103
104 bool is_pseudo_op;
105 bool is_active_write;
106 bool is_in_flight;
107 CoQueue waiting_requests;
108 Coroutine *co;
109 MirrorOp *waiting_for_op;
110
111 QTAILQ_ENTRY(MirrorOp) next;
112};
113
114typedef enum MirrorMethod {
115 MIRROR_METHOD_COPY,
116 MIRROR_METHOD_ZERO,
117 MIRROR_METHOD_DISCARD,
118} MirrorMethod;
119
120static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
121 int error)
122{
123 s->actively_synced = false;
124 if (read) {
125 return block_job_error_action(&s->common, s->on_source_error,
126 true, error);
127 } else {
128 return block_job_error_action(&s->common, s->on_target_error,
129 false, error);
130 }
131}
132
133static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
134 MirrorBlockJob *s,
135 uint64_t offset,
136 uint64_t bytes)
137{
138 uint64_t self_start_chunk = offset / s->granularity;
139 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
140 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
141
142 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
143 self_start_chunk) < self_end_chunk &&
144 s->ret >= 0)
145 {
146 MirrorOp *op;
147
148 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
149 uint64_t op_start_chunk = op->offset / s->granularity;
150 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
151 s->granularity) -
152 op_start_chunk;
153
154 if (op == self) {
155 continue;
156 }
157
158 if (ranges_overlap(self_start_chunk, self_nb_chunks,
159 op_start_chunk, op_nb_chunks))
160 {
161 if (self) {
162
163
164
165
166
167 if (op->waiting_for_op) {
168 continue;
169 }
170
171 self->waiting_for_op = op;
172 }
173
174 qemu_co_queue_wait(&op->waiting_requests, NULL);
175
176 if (self) {
177 self->waiting_for_op = NULL;
178 }
179
180 break;
181 }
182 }
183 }
184}
185
186static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
187{
188 MirrorBlockJob *s = op->s;
189 struct iovec *iov;
190 int64_t chunk_num;
191 int i, nb_chunks;
192
193 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
194
195 s->in_flight--;
196 s->bytes_in_flight -= op->bytes;
197 iov = op->qiov.iov;
198 for (i = 0; i < op->qiov.niov; i++) {
199 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
200 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
201 s->buf_free_count++;
202 }
203
204 chunk_num = op->offset / s->granularity;
205 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
206
207 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
208 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
209 if (ret >= 0) {
210 if (s->cow_bitmap) {
211 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
212 }
213 if (!s->initial_zeroing_ongoing) {
214 job_progress_update(&s->common.job, op->bytes);
215 }
216 }
217 qemu_iovec_destroy(&op->qiov);
218
219 qemu_co_queue_restart_all(&op->waiting_requests);
220 g_free(op);
221}
222
223static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
224{
225 MirrorBlockJob *s = op->s;
226
227 if (ret < 0) {
228 BlockErrorAction action;
229
230 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
231 action = mirror_error_action(s, false, -ret);
232 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
233 s->ret = ret;
234 }
235 }
236
237 mirror_iteration_done(op, ret);
238}
239
240static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
241{
242 MirrorBlockJob *s = op->s;
243
244 if (ret < 0) {
245 BlockErrorAction action;
246
247 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
248 action = mirror_error_action(s, true, -ret);
249 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
250 s->ret = ret;
251 }
252
253 mirror_iteration_done(op, ret);
254 return;
255 }
256
257 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
258 mirror_write_complete(op, ret);
259}
260
261
262static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
263 int64_t offset,
264 int64_t bytes)
265{
266 return MIN(bytes, s->bdev_length - offset);
267}
268
269
270
271static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
272 uint64_t *bytes)
273{
274 bool need_cow;
275 int ret = 0;
276 int64_t align_offset = *offset;
277 int64_t align_bytes = *bytes;
278 int max_bytes = s->granularity * s->max_iov;
279
280 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
281 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
282 s->cow_bitmap);
283 if (need_cow) {
284 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
285 &align_offset, &align_bytes);
286 }
287
288 if (align_bytes > max_bytes) {
289 align_bytes = max_bytes;
290 if (need_cow) {
291 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
292 }
293 }
294
295
296 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
297
298 ret = align_offset + align_bytes - (*offset + *bytes);
299 *offset = align_offset;
300 *bytes = align_bytes;
301 assert(ret >= 0);
302 return ret;
303}
304
305static inline void coroutine_fn
306mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
307{
308 MirrorOp *op;
309
310 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
311
312
313
314
315
316 if (!op->is_pseudo_op && op->is_in_flight &&
317 op->is_active_write == active)
318 {
319 qemu_co_queue_wait(&op->waiting_requests, NULL);
320 return;
321 }
322 }
323 abort();
324}
325
326static inline void coroutine_fn
327mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
328{
329
330 mirror_wait_for_any_operation(s, false);
331}
332
333
334
335
336
337
338
339
340
341static void coroutine_fn mirror_co_read(void *opaque)
342{
343 MirrorOp *op = opaque;
344 MirrorBlockJob *s = op->s;
345 int nb_chunks;
346 uint64_t ret;
347 uint64_t max_bytes;
348
349 max_bytes = s->granularity * s->max_iov;
350
351
352 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
353 assert(op->bytes);
354 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
355 *op->bytes_handled = op->bytes;
356
357 if (s->cow_bitmap) {
358 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
359 }
360
361 assert(*op->bytes_handled <= UINT_MAX);
362 assert(op->bytes <= s->buf_size);
363
364
365
366 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
367
368 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
369 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
370
371 while (s->buf_free_count < nb_chunks) {
372 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
373 mirror_wait_for_free_in_flight_slot(s);
374 }
375
376
377
378
379 qemu_iovec_init(&op->qiov, nb_chunks);
380 while (nb_chunks-- > 0) {
381 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
382 size_t remaining = op->bytes - op->qiov.size;
383
384 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
385 s->buf_free_count--;
386 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
387 }
388
389
390 s->in_flight++;
391 s->bytes_in_flight += op->bytes;
392 op->is_in_flight = true;
393 trace_mirror_one_iteration(s, op->offset, op->bytes);
394
395 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
396 &op->qiov, 0);
397 mirror_read_complete(op, ret);
398}
399
400static void coroutine_fn mirror_co_zero(void *opaque)
401{
402 MirrorOp *op = opaque;
403 int ret;
404
405 op->s->in_flight++;
406 op->s->bytes_in_flight += op->bytes;
407 *op->bytes_handled = op->bytes;
408 op->is_in_flight = true;
409
410 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
411 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
412 mirror_write_complete(op, ret);
413}
414
415static void coroutine_fn mirror_co_discard(void *opaque)
416{
417 MirrorOp *op = opaque;
418 int ret;
419
420 op->s->in_flight++;
421 op->s->bytes_in_flight += op->bytes;
422 *op->bytes_handled = op->bytes;
423 op->is_in_flight = true;
424
425 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
426 mirror_write_complete(op, ret);
427}
428
429static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
430 unsigned bytes, MirrorMethod mirror_method)
431{
432 MirrorOp *op;
433 Coroutine *co;
434 int64_t bytes_handled = -1;
435
436 op = g_new(MirrorOp, 1);
437 *op = (MirrorOp){
438 .s = s,
439 .offset = offset,
440 .bytes = bytes,
441 .bytes_handled = &bytes_handled,
442 };
443 qemu_co_queue_init(&op->waiting_requests);
444
445 switch (mirror_method) {
446 case MIRROR_METHOD_COPY:
447 co = qemu_coroutine_create(mirror_co_read, op);
448 break;
449 case MIRROR_METHOD_ZERO:
450 co = qemu_coroutine_create(mirror_co_zero, op);
451 break;
452 case MIRROR_METHOD_DISCARD:
453 co = qemu_coroutine_create(mirror_co_discard, op);
454 break;
455 default:
456 abort();
457 }
458 op->co = co;
459
460 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
461 qemu_coroutine_enter(co);
462
463
464
465
466 assert(bytes_handled >= 0);
467
468
469
470
471 assert(bytes_handled <= UINT_MAX);
472 return bytes_handled;
473}
474
475static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
476{
477 BlockDriverState *source = s->mirror_top_bs->backing->bs;
478 MirrorOp *pseudo_op;
479 int64_t offset;
480 uint64_t delay_ns = 0, ret = 0;
481
482 int nb_chunks = 1;
483 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
484 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
485
486 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
487 offset = bdrv_dirty_iter_next(s->dbi);
488 if (offset < 0) {
489 bdrv_set_dirty_iter(s->dbi, 0);
490 offset = bdrv_dirty_iter_next(s->dbi);
491 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
492 assert(offset >= 0);
493 }
494 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
495
496 mirror_wait_on_conflicts(NULL, s, offset, 1);
497
498 job_pause_point(&s->common.job);
499
500
501
502 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
503 while (nb_chunks * s->granularity < s->buf_size) {
504 int64_t next_dirty;
505 int64_t next_offset = offset + nb_chunks * s->granularity;
506 int64_t next_chunk = next_offset / s->granularity;
507 if (next_offset >= s->bdev_length ||
508 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
509 break;
510 }
511 if (test_bit(next_chunk, s->in_flight_bitmap)) {
512 break;
513 }
514
515 next_dirty = bdrv_dirty_iter_next(s->dbi);
516 if (next_dirty > next_offset || next_dirty < 0) {
517
518 bdrv_set_dirty_iter(s->dbi, next_offset);
519 next_dirty = bdrv_dirty_iter_next(s->dbi);
520 }
521 assert(next_dirty == next_offset);
522 nb_chunks++;
523 }
524
525
526
527
528
529 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
530 nb_chunks * s->granularity);
531 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
532
533
534
535
536
537
538
539 pseudo_op = g_new(MirrorOp, 1);
540 *pseudo_op = (MirrorOp){
541 .offset = offset,
542 .bytes = nb_chunks * s->granularity,
543 .is_pseudo_op = true,
544 };
545 qemu_co_queue_init(&pseudo_op->waiting_requests);
546 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
547
548 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
549 while (nb_chunks > 0 && offset < s->bdev_length) {
550 int ret;
551 int64_t io_bytes;
552 int64_t io_bytes_acct;
553 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
554
555 assert(!(offset % s->granularity));
556 ret = bdrv_block_status_above(source, NULL, offset,
557 nb_chunks * s->granularity,
558 &io_bytes, NULL, NULL);
559 if (ret < 0) {
560 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
561 } else if (ret & BDRV_BLOCK_DATA) {
562 io_bytes = MIN(io_bytes, max_io_bytes);
563 }
564
565 io_bytes -= io_bytes % s->granularity;
566 if (io_bytes < s->granularity) {
567 io_bytes = s->granularity;
568 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
569 int64_t target_offset;
570 int64_t target_bytes;
571 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
572 &target_offset, &target_bytes);
573 if (target_offset == offset &&
574 target_bytes == io_bytes) {
575 mirror_method = ret & BDRV_BLOCK_ZERO ?
576 MIRROR_METHOD_ZERO :
577 MIRROR_METHOD_DISCARD;
578 }
579 }
580
581 while (s->in_flight >= MAX_IN_FLIGHT) {
582 trace_mirror_yield_in_flight(s, offset, s->in_flight);
583 mirror_wait_for_free_in_flight_slot(s);
584 }
585
586 if (s->ret < 0) {
587 ret = 0;
588 goto fail;
589 }
590
591 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
592 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
593 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
594 io_bytes_acct = 0;
595 } else {
596 io_bytes_acct = io_bytes;
597 }
598 assert(io_bytes);
599 offset += io_bytes;
600 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
601 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
602 }
603
604 ret = delay_ns;
605fail:
606 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
607 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
608 g_free(pseudo_op);
609
610 return ret;
611}
612
613static void mirror_free_init(MirrorBlockJob *s)
614{
615 int granularity = s->granularity;
616 size_t buf_size = s->buf_size;
617 uint8_t *buf = s->buf;
618
619 assert(s->buf_free_count == 0);
620 QSIMPLEQ_INIT(&s->buf_free);
621 while (buf_size != 0) {
622 MirrorBuffer *cur = (MirrorBuffer *)buf;
623 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
624 s->buf_free_count++;
625 buf_size -= granularity;
626 buf += granularity;
627 }
628}
629
630
631
632
633
634static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
635{
636 while (s->in_flight > 0) {
637 mirror_wait_for_free_in_flight_slot(s);
638 }
639}
640
641
642
643
644
645
646static int mirror_exit_common(Job *job)
647{
648 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
649 BlockJob *bjob = &s->common;
650 MirrorBDSOpaque *bs_opaque;
651 AioContext *replace_aio_context = NULL;
652 BlockDriverState *src;
653 BlockDriverState *target_bs;
654 BlockDriverState *mirror_top_bs;
655 Error *local_err = NULL;
656 bool abort = job->ret < 0;
657 int ret = 0;
658
659 if (s->prepared) {
660 return 0;
661 }
662 s->prepared = true;
663
664 mirror_top_bs = s->mirror_top_bs;
665 bs_opaque = mirror_top_bs->opaque;
666 src = mirror_top_bs->backing->bs;
667 target_bs = blk_bs(s->target);
668
669 if (bdrv_chain_contains(src, target_bs)) {
670 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
671 }
672
673 bdrv_release_dirty_bitmap(s->dirty_bitmap);
674
675
676
677 bdrv_ref(src);
678 bdrv_ref(mirror_top_bs);
679 bdrv_ref(target_bs);
680
681
682
683
684
685
686 blk_unref(s->target);
687 s->target = NULL;
688
689
690
691
692
693 bdrv_drained_begin(mirror_top_bs);
694 bs_opaque->stop = true;
695 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
696 &error_abort);
697 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
698 BlockDriverState *backing = s->is_none_mode ? src : s->base;
699 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
700
701 if (bdrv_cow_bs(unfiltered_target) != backing) {
702 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
703 if (local_err) {
704 error_report_err(local_err);
705 local_err = NULL;
706 ret = -EPERM;
707 }
708 }
709 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
710 assert(!bdrv_backing_chain_next(target_bs));
711 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
712 "backing", &local_err);
713 if (ret < 0) {
714 error_report_err(local_err);
715 local_err = NULL;
716 }
717 }
718
719 if (s->to_replace) {
720 replace_aio_context = bdrv_get_aio_context(s->to_replace);
721 aio_context_acquire(replace_aio_context);
722 }
723
724 if (s->should_complete && !abort) {
725 BlockDriverState *to_replace = s->to_replace ?: src;
726 bool ro = bdrv_is_read_only(to_replace);
727
728 if (ro != bdrv_is_read_only(target_bs)) {
729 bdrv_reopen_set_read_only(target_bs, ro, NULL);
730 }
731
732
733
734 assert(s->in_drain);
735 bdrv_drained_begin(target_bs);
736
737
738
739
740
741 if (bdrv_recurse_can_replace(src, to_replace)) {
742 bdrv_replace_node(to_replace, target_bs, &local_err);
743 } else {
744 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
745 "because it can no longer be guaranteed that doing so "
746 "would not lead to an abrupt change of visible data",
747 to_replace->node_name, target_bs->node_name);
748 }
749 bdrv_drained_end(target_bs);
750 if (local_err) {
751 error_report_err(local_err);
752 ret = -EPERM;
753 }
754 }
755 if (s->to_replace) {
756 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
757 error_free(s->replace_blocker);
758 bdrv_unref(s->to_replace);
759 }
760 if (replace_aio_context) {
761 aio_context_release(replace_aio_context);
762 }
763 g_free(s->replaces);
764 bdrv_unref(target_bs);
765
766
767
768
769
770
771 block_job_remove_all_bdrv(bjob);
772 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
773
774
775
776
777 blk_remove_bs(bjob->blk);
778 blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
779 blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
780
781 bs_opaque->job = NULL;
782
783 bdrv_drained_end(src);
784 bdrv_drained_end(mirror_top_bs);
785 s->in_drain = false;
786 bdrv_unref(mirror_top_bs);
787 bdrv_unref(src);
788
789 return ret;
790}
791
792static int mirror_prepare(Job *job)
793{
794 return mirror_exit_common(job);
795}
796
797static void mirror_abort(Job *job)
798{
799 int ret = mirror_exit_common(job);
800 assert(ret == 0);
801}
802
803static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
804{
805 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
806
807 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
808 s->last_pause_ns = now;
809 job_sleep_ns(&s->common.job, 0);
810 } else {
811 job_pause_point(&s->common.job);
812 }
813}
814
815static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
816{
817 int64_t offset;
818 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
819 BlockDriverState *target_bs = blk_bs(s->target);
820 int ret;
821 int64_t count;
822
823 if (s->zero_target) {
824 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
825 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
826 return 0;
827 }
828
829 s->initial_zeroing_ongoing = true;
830 for (offset = 0; offset < s->bdev_length; ) {
831 int bytes = MIN(s->bdev_length - offset,
832 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
833
834 mirror_throttle(s);
835
836 if (job_is_cancelled(&s->common.job)) {
837 s->initial_zeroing_ongoing = false;
838 return 0;
839 }
840
841 if (s->in_flight >= MAX_IN_FLIGHT) {
842 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
843 s->in_flight);
844 mirror_wait_for_free_in_flight_slot(s);
845 continue;
846 }
847
848 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
849 offset += bytes;
850 }
851
852 mirror_wait_for_all_io(s);
853 s->initial_zeroing_ongoing = false;
854 }
855
856
857 for (offset = 0; offset < s->bdev_length; ) {
858
859 int bytes = MIN(s->bdev_length - offset,
860 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
861
862 mirror_throttle(s);
863
864 if (job_is_cancelled(&s->common.job)) {
865 return 0;
866 }
867
868 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
869 &count);
870 if (ret < 0) {
871 return ret;
872 }
873
874 assert(count);
875 if (ret > 0) {
876 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
877 }
878 offset += count;
879 }
880 return 0;
881}
882
883
884
885
886static int mirror_flush(MirrorBlockJob *s)
887{
888 int ret = blk_flush(s->target);
889 if (ret < 0) {
890 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
891 s->ret = ret;
892 }
893 }
894 return ret;
895}
896
897static int coroutine_fn mirror_run(Job *job, Error **errp)
898{
899 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
900 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
901 BlockDriverState *target_bs = blk_bs(s->target);
902 bool need_drain = true;
903 int64_t length;
904 int64_t target_length;
905 BlockDriverInfo bdi;
906 char backing_filename[2];
907
908 int ret = 0;
909
910 if (job_is_cancelled(&s->common.job)) {
911 goto immediate_exit;
912 }
913
914 s->bdev_length = bdrv_getlength(bs);
915 if (s->bdev_length < 0) {
916 ret = s->bdev_length;
917 goto immediate_exit;
918 }
919
920 target_length = blk_getlength(s->target);
921 if (target_length < 0) {
922 ret = target_length;
923 goto immediate_exit;
924 }
925
926
927
928 if (s->base == blk_bs(s->target)) {
929 if (s->bdev_length > target_length) {
930 ret = blk_truncate(s->target, s->bdev_length, false,
931 PREALLOC_MODE_OFF, 0, NULL);
932 if (ret < 0) {
933 goto immediate_exit;
934 }
935 }
936 } else if (s->bdev_length != target_length) {
937 error_setg(errp, "Source and target image have different sizes");
938 ret = -EINVAL;
939 goto immediate_exit;
940 }
941
942 if (s->bdev_length == 0) {
943
944 job_transition_to_ready(&s->common.job);
945 s->actively_synced = true;
946 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
947 job_yield(&s->common.job);
948 }
949 goto immediate_exit;
950 }
951
952 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
953 s->in_flight_bitmap = bitmap_new(length);
954
955
956
957
958
959 bdrv_get_backing_filename(target_bs, backing_filename,
960 sizeof(backing_filename));
961 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
962 s->target_cluster_size = bdi.cluster_size;
963 } else {
964 s->target_cluster_size = BDRV_SECTOR_SIZE;
965 }
966 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
967 s->granularity < s->target_cluster_size) {
968 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
969 s->cow_bitmap = bitmap_new(length);
970 }
971 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
972
973 s->buf = qemu_try_blockalign(bs, s->buf_size);
974 if (s->buf == NULL) {
975 ret = -ENOMEM;
976 goto immediate_exit;
977 }
978
979 mirror_free_init(s);
980
981 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
982 if (!s->is_none_mode) {
983 ret = mirror_dirty_init(s);
984 if (ret < 0 || job_is_cancelled(&s->common.job)) {
985 goto immediate_exit;
986 }
987 }
988
989 assert(!s->dbi);
990 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
991 for (;;) {
992 uint64_t delay_ns = 0;
993 int64_t cnt, delta;
994 bool should_complete;
995
996
997
998 while (s->in_active_write_counter) {
999 mirror_wait_for_any_operation(s, true);
1000 }
1001
1002 if (s->ret < 0) {
1003 ret = s->ret;
1004 goto immediate_exit;
1005 }
1006
1007 job_pause_point(&s->common.job);
1008
1009 if (job_is_cancelled(&s->common.job)) {
1010 ret = 0;
1011 goto immediate_exit;
1012 }
1013
1014 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1015
1016
1017
1018 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
1019
1020
1021
1022
1023
1024 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1025 if (delta < BLOCK_JOB_SLICE_TIME &&
1026 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1027 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1028 (cnt == 0 && s->in_flight > 0)) {
1029 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1030 mirror_wait_for_free_in_flight_slot(s);
1031 continue;
1032 } else if (cnt != 0) {
1033 delay_ns = mirror_iteration(s);
1034 }
1035 }
1036
1037 should_complete = false;
1038 if (s->in_flight == 0 && cnt == 0) {
1039 trace_mirror_before_flush(s);
1040 if (!job_is_ready(&s->common.job)) {
1041 if (mirror_flush(s) < 0) {
1042
1043 continue;
1044 }
1045
1046
1047
1048
1049
1050 job_transition_to_ready(&s->common.job);
1051 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1052 s->actively_synced = true;
1053 }
1054 }
1055
1056 should_complete = s->should_complete ||
1057 job_cancel_requested(&s->common.job);
1058 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1059 }
1060
1061 if (cnt == 0 && should_complete) {
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 trace_mirror_before_drain(s, cnt);
1073
1074 s->in_drain = true;
1075 bdrv_drained_begin(bs);
1076 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1077 if (cnt > 0 || mirror_flush(s) < 0) {
1078 bdrv_drained_end(bs);
1079 s->in_drain = false;
1080 continue;
1081 }
1082
1083
1084
1085
1086 assert(QLIST_EMPTY(&bs->tracked_requests));
1087 need_drain = false;
1088 break;
1089 }
1090
1091 if (job_is_ready(&s->common.job) && !should_complete) {
1092 delay_ns = (s->in_flight == 0 &&
1093 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1094 }
1095 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1096 delay_ns);
1097 job_sleep_ns(&s->common.job, delay_ns);
1098 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1099 }
1100
1101immediate_exit:
1102 if (s->in_flight > 0) {
1103
1104
1105
1106
1107 assert(ret < 0 || job_is_cancelled(&s->common.job));
1108 assert(need_drain);
1109 mirror_wait_for_all_io(s);
1110 }
1111
1112 assert(s->in_flight == 0);
1113 qemu_vfree(s->buf);
1114 g_free(s->cow_bitmap);
1115 g_free(s->in_flight_bitmap);
1116 bdrv_dirty_iter_free(s->dbi);
1117
1118 if (need_drain) {
1119 s->in_drain = true;
1120 bdrv_drained_begin(bs);
1121 }
1122
1123 return ret;
1124}
1125
1126static void mirror_complete(Job *job, Error **errp)
1127{
1128 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1129
1130 if (!job_is_ready(job)) {
1131 error_setg(errp, "The active block job '%s' cannot be completed",
1132 job->id);
1133 return;
1134 }
1135
1136
1137 if (s->replaces) {
1138 AioContext *replace_aio_context;
1139
1140 s->to_replace = bdrv_find_node(s->replaces);
1141 if (!s->to_replace) {
1142 error_setg(errp, "Node name '%s' not found", s->replaces);
1143 return;
1144 }
1145
1146 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1147 aio_context_acquire(replace_aio_context);
1148
1149
1150
1151
1152
1153 error_setg(&s->replace_blocker,
1154 "block device is in use by block-job-complete");
1155 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1156 bdrv_ref(s->to_replace);
1157
1158 aio_context_release(replace_aio_context);
1159 }
1160
1161 s->should_complete = true;
1162
1163
1164 if (!job->paused) {
1165 job_enter(job);
1166 }
1167}
1168
1169static void coroutine_fn mirror_pause(Job *job)
1170{
1171 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1172
1173 mirror_wait_for_all_io(s);
1174}
1175
1176static bool mirror_drained_poll(BlockJob *job)
1177{
1178 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1179
1180
1181
1182
1183
1184
1185 if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) {
1186 return true;
1187 }
1188
1189 return !!s->in_flight;
1190}
1191
1192static bool mirror_cancel(Job *job, bool force)
1193{
1194 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1195 BlockDriverState *target = blk_bs(s->target);
1196
1197
1198
1199
1200
1201 force = force || !job_is_ready(job);
1202
1203 if (force) {
1204 bdrv_cancel_in_flight(target);
1205 }
1206 return force;
1207}
1208
1209static bool commit_active_cancel(Job *job, bool force)
1210{
1211
1212 return force || !job_is_ready(job);
1213}
1214
1215static const BlockJobDriver mirror_job_driver = {
1216 .job_driver = {
1217 .instance_size = sizeof(MirrorBlockJob),
1218 .job_type = JOB_TYPE_MIRROR,
1219 .free = block_job_free,
1220 .user_resume = block_job_user_resume,
1221 .run = mirror_run,
1222 .prepare = mirror_prepare,
1223 .abort = mirror_abort,
1224 .pause = mirror_pause,
1225 .complete = mirror_complete,
1226 .cancel = mirror_cancel,
1227 },
1228 .drained_poll = mirror_drained_poll,
1229};
1230
1231static const BlockJobDriver commit_active_job_driver = {
1232 .job_driver = {
1233 .instance_size = sizeof(MirrorBlockJob),
1234 .job_type = JOB_TYPE_COMMIT,
1235 .free = block_job_free,
1236 .user_resume = block_job_user_resume,
1237 .run = mirror_run,
1238 .prepare = mirror_prepare,
1239 .abort = mirror_abort,
1240 .pause = mirror_pause,
1241 .complete = mirror_complete,
1242 .cancel = commit_active_cancel,
1243 },
1244 .drained_poll = mirror_drained_poll,
1245};
1246
1247static void coroutine_fn
1248do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1249 uint64_t offset, uint64_t bytes,
1250 QEMUIOVector *qiov, int flags)
1251{
1252 int ret;
1253 size_t qiov_offset = 0;
1254 int64_t bitmap_offset, bitmap_end;
1255
1256 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1257 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1258 {
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1277 if (bytes <= qiov_offset) {
1278
1279 return;
1280 }
1281 offset += qiov_offset;
1282 bytes -= qiov_offset;
1283 }
1284
1285 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1286 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1287 {
1288 uint64_t tail = (offset + bytes) % job->granularity;
1289
1290 if (bytes <= tail) {
1291
1292 return;
1293 }
1294 bytes -= tail;
1295 }
1296
1297
1298
1299
1300
1301 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1302 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1303 if (bitmap_offset < bitmap_end) {
1304 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1305 bitmap_end - bitmap_offset);
1306 }
1307
1308 job_progress_increase_remaining(&job->common.job, bytes);
1309
1310 switch (method) {
1311 case MIRROR_METHOD_COPY:
1312 ret = blk_co_pwritev_part(job->target, offset, bytes,
1313 qiov, qiov_offset, flags);
1314 break;
1315
1316 case MIRROR_METHOD_ZERO:
1317 assert(!qiov);
1318 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1319 break;
1320
1321 case MIRROR_METHOD_DISCARD:
1322 assert(!qiov);
1323 ret = blk_co_pdiscard(job->target, offset, bytes);
1324 break;
1325
1326 default:
1327 abort();
1328 }
1329
1330 if (ret >= 0) {
1331 job_progress_update(&job->common.job, bytes);
1332 } else {
1333 BlockErrorAction action;
1334
1335
1336
1337
1338
1339
1340
1341 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1342 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1343 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1344 bitmap_end - bitmap_offset);
1345 job->actively_synced = false;
1346
1347 action = mirror_error_action(job, false, -ret);
1348 if (action == BLOCK_ERROR_ACTION_REPORT) {
1349 if (!job->ret) {
1350 job->ret = ret;
1351 }
1352 }
1353 }
1354}
1355
1356static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1357 uint64_t offset,
1358 uint64_t bytes)
1359{
1360 MirrorOp *op;
1361 uint64_t start_chunk = offset / s->granularity;
1362 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1363
1364 op = g_new(MirrorOp, 1);
1365 *op = (MirrorOp){
1366 .s = s,
1367 .offset = offset,
1368 .bytes = bytes,
1369 .is_active_write = true,
1370 .is_in_flight = true,
1371 .co = qemu_coroutine_self(),
1372 };
1373 qemu_co_queue_init(&op->waiting_requests);
1374 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1375
1376 s->in_active_write_counter++;
1377
1378 mirror_wait_on_conflicts(op, s, offset, bytes);
1379
1380 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1381
1382 return op;
1383}
1384
1385static void coroutine_fn active_write_settle(MirrorOp *op)
1386{
1387 uint64_t start_chunk = op->offset / op->s->granularity;
1388 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1389 op->s->granularity);
1390
1391 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1392 BdrvChild *source = op->s->mirror_top_bs->backing;
1393
1394 if (QLIST_FIRST(&source->bs->parents) == source &&
1395 QLIST_NEXT(source, next_parent) == NULL)
1396 {
1397
1398
1399
1400
1401 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1402 }
1403 }
1404 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1405 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1406 qemu_co_queue_restart_all(&op->waiting_requests);
1407 g_free(op);
1408}
1409
1410static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1411 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1412{
1413 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1414}
1415
1416static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1417 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1418 int flags)
1419{
1420 MirrorOp *op = NULL;
1421 MirrorBDSOpaque *s = bs->opaque;
1422 int ret = 0;
1423 bool copy_to_target;
1424
1425 copy_to_target = s->job->ret >= 0 &&
1426 !job_is_cancelled(&s->job->common.job) &&
1427 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1428
1429 if (copy_to_target) {
1430 op = active_write_prepare(s->job, offset, bytes);
1431 }
1432
1433 switch (method) {
1434 case MIRROR_METHOD_COPY:
1435 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1436 break;
1437
1438 case MIRROR_METHOD_ZERO:
1439 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1440 break;
1441
1442 case MIRROR_METHOD_DISCARD:
1443 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1444 break;
1445
1446 default:
1447 abort();
1448 }
1449
1450 if (ret < 0) {
1451 goto out;
1452 }
1453
1454 if (copy_to_target) {
1455 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1456 }
1457
1458out:
1459 if (copy_to_target) {
1460 active_write_settle(op);
1461 }
1462 return ret;
1463}
1464
1465static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1466 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1467{
1468 MirrorBDSOpaque *s = bs->opaque;
1469 QEMUIOVector bounce_qiov;
1470 void *bounce_buf;
1471 int ret = 0;
1472 bool copy_to_target;
1473
1474 copy_to_target = s->job->ret >= 0 &&
1475 !job_is_cancelled(&s->job->common.job) &&
1476 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1477
1478 if (copy_to_target) {
1479
1480
1481
1482
1483 bounce_buf = qemu_blockalign(bs, bytes);
1484 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1485
1486 qemu_iovec_init(&bounce_qiov, 1);
1487 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1488 qiov = &bounce_qiov;
1489 }
1490
1491 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1492 flags);
1493
1494 if (copy_to_target) {
1495 qemu_iovec_destroy(&bounce_qiov);
1496 qemu_vfree(bounce_buf);
1497 }
1498
1499 return ret;
1500}
1501
1502static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1503{
1504 if (bs->backing == NULL) {
1505
1506 return 0;
1507 }
1508 return bdrv_co_flush(bs->backing->bs);
1509}
1510
1511static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1512 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1513{
1514 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1515 flags);
1516}
1517
1518static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1519 int64_t offset, int64_t bytes)
1520{
1521 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1522 NULL, 0);
1523}
1524
1525static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1526{
1527 if (bs->backing == NULL) {
1528
1529
1530 return;
1531 }
1532 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1533 bs->backing->bs->filename);
1534}
1535
1536static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1537 BdrvChildRole role,
1538 BlockReopenQueue *reopen_queue,
1539 uint64_t perm, uint64_t shared,
1540 uint64_t *nperm, uint64_t *nshared)
1541{
1542 MirrorBDSOpaque *s = bs->opaque;
1543
1544 if (s->stop) {
1545
1546
1547
1548
1549 *nperm = 0;
1550 *nshared = BLK_PERM_ALL;
1551 return;
1552 }
1553
1554 bdrv_default_perms(bs, c, role, reopen_queue,
1555 perm, shared, nperm, nshared);
1556
1557 if (s->is_commit) {
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1573 *nshared |= BLK_PERM_WRITE;
1574 }
1575}
1576
1577
1578
1579static BlockDriver bdrv_mirror_top = {
1580 .format_name = "mirror_top",
1581 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1582 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1583 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1584 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1585 .bdrv_co_flush = bdrv_mirror_top_flush,
1586 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1587 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1588
1589 .is_filter = true,
1590};
1591
1592static BlockJob *mirror_start_job(
1593 const char *job_id, BlockDriverState *bs,
1594 int creation_flags, BlockDriverState *target,
1595 const char *replaces, int64_t speed,
1596 uint32_t granularity, int64_t buf_size,
1597 BlockMirrorBackingMode backing_mode,
1598 bool zero_target,
1599 BlockdevOnError on_source_error,
1600 BlockdevOnError on_target_error,
1601 bool unmap,
1602 BlockCompletionFunc *cb,
1603 void *opaque,
1604 const BlockJobDriver *driver,
1605 bool is_none_mode, BlockDriverState *base,
1606 bool auto_complete, const char *filter_node_name,
1607 bool is_mirror, MirrorCopyMode copy_mode,
1608 Error **errp)
1609{
1610 MirrorBlockJob *s;
1611 MirrorBDSOpaque *bs_opaque;
1612 BlockDriverState *mirror_top_bs;
1613 bool target_is_backing;
1614 uint64_t target_perms, target_shared_perms;
1615 int ret;
1616
1617 if (granularity == 0) {
1618 granularity = bdrv_get_default_bitmap_granularity(target);
1619 }
1620
1621 assert(is_power_of_2(granularity));
1622
1623 if (buf_size < 0) {
1624 error_setg(errp, "Invalid parameter 'buf-size'");
1625 return NULL;
1626 }
1627
1628 if (buf_size == 0) {
1629 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1630 }
1631
1632 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1633 error_setg(errp, "Can't mirror node into itself");
1634 return NULL;
1635 }
1636
1637 target_is_backing = bdrv_chain_contains(bs, target);
1638
1639
1640
1641
1642 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1643 BDRV_O_RDWR, errp);
1644 if (mirror_top_bs == NULL) {
1645 return NULL;
1646 }
1647 if (!filter_node_name) {
1648 mirror_top_bs->implicit = true;
1649 }
1650
1651
1652 mirror_top_bs->never_freeze = true;
1653
1654 mirror_top_bs->total_sectors = bs->total_sectors;
1655 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1656 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1657 BDRV_REQ_NO_FALLBACK;
1658 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1659 mirror_top_bs->opaque = bs_opaque;
1660
1661 bs_opaque->is_commit = target_is_backing;
1662
1663 bdrv_drained_begin(bs);
1664 ret = bdrv_append(mirror_top_bs, bs, errp);
1665 bdrv_drained_end(bs);
1666
1667 if (ret < 0) {
1668 bdrv_unref(mirror_top_bs);
1669 return NULL;
1670 }
1671
1672
1673 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1674 BLK_PERM_CONSISTENT_READ,
1675 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1676 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1677 creation_flags, cb, opaque, errp);
1678 if (!s) {
1679 goto fail;
1680 }
1681 bs_opaque->job = s;
1682
1683
1684 bdrv_unref(mirror_top_bs);
1685
1686 s->mirror_top_bs = mirror_top_bs;
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 target_perms = BLK_PERM_WRITE;
1698 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1699
1700 if (target_is_backing) {
1701 int64_t bs_size, target_size;
1702 bs_size = bdrv_getlength(bs);
1703 if (bs_size < 0) {
1704 error_setg_errno(errp, -bs_size,
1705 "Could not inquire top image size");
1706 goto fail;
1707 }
1708
1709 target_size = bdrv_getlength(target);
1710 if (target_size < 0) {
1711 error_setg_errno(errp, -target_size,
1712 "Could not inquire base image size");
1713 goto fail;
1714 }
1715
1716 if (target_size < bs_size) {
1717 target_perms |= BLK_PERM_RESIZE;
1718 }
1719
1720 target_shared_perms |= BLK_PERM_CONSISTENT_READ
1721 | BLK_PERM_WRITE
1722 | BLK_PERM_GRAPH_MOD;
1723 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1724
1725
1726
1727
1728 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1729 "source's backing chain");
1730 goto fail;
1731 }
1732
1733 if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) {
1734 target_perms |= BLK_PERM_GRAPH_MOD;
1735 }
1736
1737 s->target = blk_new(s->common.job.aio_context,
1738 target_perms, target_shared_perms);
1739 ret = blk_insert_bs(s->target, target, errp);
1740 if (ret < 0) {
1741 goto fail;
1742 }
1743 if (is_mirror) {
1744
1745
1746
1747
1748
1749
1750 blk_set_force_allow_inactivate(s->target);
1751 }
1752 blk_set_allow_aio_context_change(s->target, true);
1753 blk_set_disable_request_queuing(s->target, true);
1754
1755 s->replaces = g_strdup(replaces);
1756 s->on_source_error = on_source_error;
1757 s->on_target_error = on_target_error;
1758 s->is_none_mode = is_none_mode;
1759 s->backing_mode = backing_mode;
1760 s->zero_target = zero_target;
1761 s->copy_mode = copy_mode;
1762 s->base = base;
1763 s->base_overlay = bdrv_find_overlay(bs, base);
1764 s->granularity = granularity;
1765 s->buf_size = ROUND_UP(buf_size, granularity);
1766 s->unmap = unmap;
1767 if (auto_complete) {
1768 s->should_complete = true;
1769 }
1770
1771 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1772 if (!s->dirty_bitmap) {
1773 goto fail;
1774 }
1775 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1776 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1777 }
1778
1779 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1780 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1781 BLK_PERM_CONSISTENT_READ,
1782 errp);
1783 if (ret < 0) {
1784 goto fail;
1785 }
1786
1787
1788 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1789 &error_abort);
1790
1791
1792
1793 if (target_is_backing) {
1794 BlockDriverState *iter, *filtered_target;
1795 uint64_t iter_shared_perms;
1796
1797
1798
1799
1800
1801 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1802
1803 assert(bdrv_skip_filters(filtered_target) ==
1804 bdrv_skip_filters(target));
1805
1806
1807
1808
1809
1810
1811
1812 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1813
1814 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1815 iter = bdrv_filter_or_cow_bs(iter))
1816 {
1817 if (iter == filtered_target) {
1818
1819
1820
1821
1822 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1823 }
1824
1825 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1826 iter_shared_perms, errp);
1827 if (ret < 0) {
1828 goto fail;
1829 }
1830 }
1831
1832 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1833 goto fail;
1834 }
1835 }
1836
1837 QTAILQ_INIT(&s->ops_in_flight);
1838
1839 trace_mirror_start(bs, s, opaque);
1840 job_start(&s->common.job);
1841
1842 return &s->common;
1843
1844fail:
1845 if (s) {
1846
1847
1848 bdrv_ref(mirror_top_bs);
1849
1850 g_free(s->replaces);
1851 blk_unref(s->target);
1852 bs_opaque->job = NULL;
1853 if (s->dirty_bitmap) {
1854 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1855 }
1856 job_early_fail(&s->common.job);
1857 }
1858
1859 bs_opaque->stop = true;
1860 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1861 &error_abort);
1862 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
1863
1864 bdrv_unref(mirror_top_bs);
1865
1866 return NULL;
1867}
1868
1869void mirror_start(const char *job_id, BlockDriverState *bs,
1870 BlockDriverState *target, const char *replaces,
1871 int creation_flags, int64_t speed,
1872 uint32_t granularity, int64_t buf_size,
1873 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1874 bool zero_target,
1875 BlockdevOnError on_source_error,
1876 BlockdevOnError on_target_error,
1877 bool unmap, const char *filter_node_name,
1878 MirrorCopyMode copy_mode, Error **errp)
1879{
1880 bool is_none_mode;
1881 BlockDriverState *base;
1882
1883 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1884 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1885 error_setg(errp, "Sync mode '%s' not supported",
1886 MirrorSyncMode_str(mode));
1887 return;
1888 }
1889 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1890 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
1891 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1892 speed, granularity, buf_size, backing_mode, zero_target,
1893 on_source_error, on_target_error, unmap, NULL, NULL,
1894 &mirror_job_driver, is_none_mode, base, false,
1895 filter_node_name, true, copy_mode, errp);
1896}
1897
1898BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1899 BlockDriverState *base, int creation_flags,
1900 int64_t speed, BlockdevOnError on_error,
1901 const char *filter_node_name,
1902 BlockCompletionFunc *cb, void *opaque,
1903 bool auto_complete, Error **errp)
1904{
1905 bool base_read_only;
1906 BlockJob *job;
1907
1908 base_read_only = bdrv_is_read_only(base);
1909
1910 if (base_read_only) {
1911 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1912 return NULL;
1913 }
1914 }
1915
1916 job = mirror_start_job(
1917 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1918 MIRROR_LEAVE_BACKING_CHAIN, false,
1919 on_error, on_error, true, cb, opaque,
1920 &commit_active_job_driver, false, base, auto_complete,
1921 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1922 errp);
1923 if (!job) {
1924 goto error_restore_flags;
1925 }
1926
1927 return job;
1928
1929error_restore_flags:
1930
1931
1932 if (base_read_only) {
1933 bdrv_reopen_set_read_only(base, true, NULL);
1934 }
1935 return NULL;
1936}
1937