1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "qemu/osdep.h"
16
17#include "trace.h"
18#include "qapi/error.h"
19#include "block/block-copy.h"
20#include "block/block_int-io.h"
21#include "block/dirty-bitmap.h"
22#include "block/reqlist.h"
23#include "sysemu/block-backend.h"
24#include "qemu/units.h"
25#include "qemu/co-shared-resource.h"
26#include "qemu/coroutine.h"
27#include "qemu/ratelimit.h"
28#include "block/aio_task.h"
29#include "qemu/error-report.h"
30#include "qemu/memalign.h"
31
32#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
33#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
34#define BLOCK_COPY_MAX_MEM (128 * MiB)
35#define BLOCK_COPY_MAX_WORKERS 64
36#define BLOCK_COPY_SLICE_TIME 100000000ULL
37#define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
38
39typedef enum {
40 COPY_READ_WRITE_CLUSTER,
41 COPY_READ_WRITE,
42 COPY_WRITE_ZEROES,
43 COPY_RANGE_SMALL,
44 COPY_RANGE_FULL
45} BlockCopyMethod;
46
47static coroutine_fn int block_copy_task_entry(AioTask *task);
48
49typedef struct BlockCopyCallState {
50
51 BlockCopyState *s;
52 int64_t offset;
53 int64_t bytes;
54 int max_workers;
55 int64_t max_chunk;
56 bool ignore_ratelimit;
57 BlockCopyAsyncCallbackFunc cb;
58 void *cb_opaque;
59
60 Coroutine *co;
61
62
63 bool finished;
64 QemuCoSleep sleep;
65 bool cancelled;
66
67 QLIST_ENTRY(BlockCopyCallState) list;
68
69
70
71
72
73 bool error_is_read;
74
75
76
77
78
79
80 int ret;
81} BlockCopyCallState;
82
83typedef struct BlockCopyTask {
84 AioTask task;
85
86
87
88
89
90 BlockCopyState *s;
91 BlockCopyCallState *call_state;
92
93
94
95
96
97
98
99 BlockCopyMethod method;
100
101
102
103
104
105
106
107 BlockReq req;
108} BlockCopyTask;
109
110static int64_t task_end(BlockCopyTask *task)
111{
112 return task->req.offset + task->req.bytes;
113}
114
115typedef struct BlockCopyState {
116
117
118
119
120
121 BdrvChild *source;
122 BdrvChild *target;
123
124
125
126
127
128 int64_t cluster_size;
129 int64_t max_transfer;
130 uint64_t len;
131 BdrvRequestFlags write_flags;
132
133
134
135
136
137 CoMutex lock;
138 int64_t in_flight_bytes;
139 BlockCopyMethod method;
140 BlockReqList reqs;
141 QLIST_HEAD(, BlockCopyCallState) calls;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 bool skip_unallocated;
157
158 BdrvDirtyBitmap *copy_bitmap;
159 ProgressMeter *progress;
160 SharedResource *mem;
161 RateLimit rate_limit;
162} BlockCopyState;
163
164
165static int64_t block_copy_chunk_size(BlockCopyState *s)
166{
167 switch (s->method) {
168 case COPY_READ_WRITE_CLUSTER:
169 return s->cluster_size;
170 case COPY_READ_WRITE:
171 case COPY_RANGE_SMALL:
172 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
173 s->max_transfer);
174 case COPY_RANGE_FULL:
175 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
176 s->max_transfer);
177 default:
178
179 abort();
180 }
181}
182
183
184
185
186
187static coroutine_fn BlockCopyTask *
188block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
189 int64_t offset, int64_t bytes)
190{
191 BlockCopyTask *task;
192 int64_t max_chunk;
193
194 QEMU_LOCK_GUARD(&s->lock);
195 max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
196 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
197 offset, offset + bytes,
198 max_chunk, &offset, &bytes))
199 {
200 return NULL;
201 }
202
203 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
204 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
205
206
207 assert(!reqlist_find_conflict(&s->reqs, offset, bytes));
208
209 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
210 s->in_flight_bytes += bytes;
211
212 task = g_new(BlockCopyTask, 1);
213 *task = (BlockCopyTask) {
214 .task.func = block_copy_task_entry,
215 .s = s,
216 .call_state = call_state,
217 .method = s->method,
218 };
219 reqlist_init_req(&s->reqs, &task->req, offset, bytes);
220
221 return task;
222}
223
224
225
226
227
228
229
230
231static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
232 int64_t new_bytes)
233{
234 QEMU_LOCK_GUARD(&task->s->lock);
235 if (new_bytes == task->req.bytes) {
236 return;
237 }
238
239 assert(new_bytes > 0 && new_bytes < task->req.bytes);
240
241 task->s->in_flight_bytes -= task->req.bytes - new_bytes;
242 bdrv_set_dirty_bitmap(task->s->copy_bitmap,
243 task->req.offset + new_bytes,
244 task->req.bytes - new_bytes);
245
246 reqlist_shrink_req(&task->req, new_bytes);
247}
248
249static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
250{
251 QEMU_LOCK_GUARD(&task->s->lock);
252 task->s->in_flight_bytes -= task->req.bytes;
253 if (ret < 0) {
254 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset,
255 task->req.bytes);
256 }
257 if (task->s->progress) {
258 progress_set_remaining(task->s->progress,
259 bdrv_get_dirty_count(task->s->copy_bitmap) +
260 task->s->in_flight_bytes);
261 }
262 reqlist_remove_req(&task->req);
263}
264
265void block_copy_state_free(BlockCopyState *s)
266{
267 if (!s) {
268 return;
269 }
270
271 ratelimit_destroy(&s->rate_limit);
272 bdrv_release_dirty_bitmap(s->copy_bitmap);
273 shres_destroy(s->mem);
274 g_free(s);
275}
276
277static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
278{
279 return MIN_NON_ZERO(INT_MAX,
280 MIN_NON_ZERO(source->bs->bl.max_transfer,
281 target->bs->bl.max_transfer));
282}
283
284void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
285 bool compress)
286{
287
288 s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
289 (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
290
291 if (s->max_transfer < s->cluster_size) {
292
293
294
295
296
297
298 s->method = COPY_READ_WRITE_CLUSTER;
299 } else if (compress) {
300
301 s->method = COPY_READ_WRITE_CLUSTER;
302 } else {
303
304
305
306
307 s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
308 }
309}
310
311static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
312 Error **errp)
313{
314 int ret;
315 BlockDriverInfo bdi;
316 bool target_does_cow = bdrv_backing_chain_next(target);
317
318
319
320
321
322
323 ret = bdrv_get_info(target, &bdi);
324 if (ret == -ENOTSUP && !target_does_cow) {
325
326 warn_report("The target block device doesn't provide "
327 "information about the block size and it doesn't have a "
328 "backing file. The default block size of %u bytes is "
329 "used. If the actual block size of the target exceeds "
330 "this default, the backup may be unusable",
331 BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
332 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
333 } else if (ret < 0 && !target_does_cow) {
334 error_setg_errno(errp, -ret,
335 "Couldn't determine the cluster size of the target image, "
336 "which has no backing file");
337 error_append_hint(errp,
338 "Aborting, since this may create an unusable destination image\n");
339 return ret;
340 } else if (ret < 0 && target_does_cow) {
341
342 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
343 }
344
345 return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
346}
347
348BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
349 const BdrvDirtyBitmap *bitmap,
350 Error **errp)
351{
352 ERRP_GUARD();
353 BlockCopyState *s;
354 int64_t cluster_size;
355 BdrvDirtyBitmap *copy_bitmap;
356 bool is_fleecing;
357
358 cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
359 if (cluster_size < 0) {
360 return NULL;
361 }
362
363 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
364 errp);
365 if (!copy_bitmap) {
366 return NULL;
367 }
368 bdrv_disable_dirty_bitmap(copy_bitmap);
369 if (bitmap) {
370 if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) {
371 error_prepend(errp, "Failed to merge bitmap '%s' to internal "
372 "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap));
373 bdrv_release_dirty_bitmap(copy_bitmap);
374 return NULL;
375 }
376 } else {
377 bdrv_set_dirty_bitmap(copy_bitmap, 0,
378 bdrv_dirty_bitmap_size(copy_bitmap));
379 }
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 is_fleecing = bdrv_chain_contains(target->bs, source->bs);
396
397 s = g_new(BlockCopyState, 1);
398 *s = (BlockCopyState) {
399 .source = source,
400 .target = target,
401 .copy_bitmap = copy_bitmap,
402 .cluster_size = cluster_size,
403 .len = bdrv_dirty_bitmap_size(copy_bitmap),
404 .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
405 .mem = shres_create(BLOCK_COPY_MAX_MEM),
406 .max_transfer = QEMU_ALIGN_DOWN(
407 block_copy_max_transfer(source, target),
408 cluster_size),
409 };
410
411 block_copy_set_copy_opts(s, false, false);
412
413 ratelimit_init(&s->rate_limit);
414 qemu_co_mutex_init(&s->lock);
415 QLIST_INIT(&s->reqs);
416 QLIST_INIT(&s->calls);
417
418 return s;
419}
420
421
422void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
423{
424 s->progress = pm;
425}
426
427
428
429
430
431
432
433
434
435
436static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
437 BlockCopyTask *task)
438{
439 if (!pool) {
440 int ret = task->task.func(&task->task);
441
442 g_free(task);
443 return ret;
444 }
445
446 aio_task_pool_wait_slot(pool);
447 if (aio_task_pool_status(pool) < 0) {
448 co_put_to_shres(task->s->mem, task->req.bytes);
449 block_copy_task_end(task, -ECANCELED);
450 g_free(task);
451 return -ECANCELED;
452 }
453
454 aio_task_pool_start_task(pool, &task->task);
455
456 return 0;
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472static int coroutine_fn GRAPH_RDLOCK
473block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
474 BlockCopyMethod *method, bool *error_is_read)
475{
476 int ret;
477 int64_t nbytes = MIN(offset + bytes, s->len) - offset;
478 void *bounce_buffer = NULL;
479
480 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
481 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
482 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
483 assert(offset < s->len);
484 assert(offset + bytes <= s->len ||
485 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
486 assert(nbytes < INT_MAX);
487
488 switch (*method) {
489 case COPY_WRITE_ZEROES:
490 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
491 ~BDRV_REQ_WRITE_COMPRESSED);
492 if (ret < 0) {
493 trace_block_copy_write_zeroes_fail(s, offset, ret);
494 *error_is_read = false;
495 }
496 return ret;
497
498 case COPY_RANGE_SMALL:
499 case COPY_RANGE_FULL:
500 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
501 0, s->write_flags);
502 if (ret >= 0) {
503
504 *method = COPY_RANGE_FULL;
505 return 0;
506 }
507
508 trace_block_copy_copy_range_fail(s, offset, ret);
509 *method = COPY_READ_WRITE;
510
511
512 case COPY_READ_WRITE_CLUSTER:
513 case COPY_READ_WRITE:
514
515
516
517
518
519
520
521
522
523
524 bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
525
526 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
527 if (ret < 0) {
528 trace_block_copy_read_fail(s, offset, ret);
529 *error_is_read = true;
530 goto out;
531 }
532
533 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
534 s->write_flags);
535 if (ret < 0) {
536 trace_block_copy_write_fail(s, offset, ret);
537 *error_is_read = false;
538 goto out;
539 }
540
541 out:
542 qemu_vfree(bounce_buffer);
543 break;
544
545 default:
546 abort();
547 }
548
549 return ret;
550}
551
552static coroutine_fn int block_copy_task_entry(AioTask *task)
553{
554 BlockCopyTask *t = container_of(task, BlockCopyTask, task);
555 BlockCopyState *s = t->s;
556 bool error_is_read = false;
557 BlockCopyMethod method = t->method;
558 int ret;
559
560 WITH_GRAPH_RDLOCK_GUARD() {
561 ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
562 &error_is_read);
563 }
564
565 WITH_QEMU_LOCK_GUARD(&s->lock) {
566 if (s->method == t->method) {
567 s->method = method;
568 }
569
570 if (ret < 0) {
571 if (!t->call_state->ret) {
572 t->call_state->ret = ret;
573 t->call_state->error_is_read = error_is_read;
574 }
575 } else if (s->progress) {
576 progress_work_done(s->progress, t->req.bytes);
577 }
578 }
579 co_put_to_shres(s->mem, t->req.bytes);
580 block_copy_task_end(t, ret);
581
582 return ret;
583}
584
585static coroutine_fn GRAPH_RDLOCK
586int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes,
587 int64_t *pnum)
588{
589 int64_t num;
590 BlockDriverState *base;
591 int ret;
592
593 if (qatomic_read(&s->skip_unallocated)) {
594 base = bdrv_backing_chain_next(s->source->bs);
595 } else {
596 base = NULL;
597 }
598
599 ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
600 NULL, NULL);
601 if (ret < 0 || num < s->cluster_size) {
602
603
604
605
606 num = s->cluster_size;
607 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
608 } else if (offset + num == s->len) {
609 num = QEMU_ALIGN_UP(num, s->cluster_size);
610 } else {
611 num = QEMU_ALIGN_DOWN(num, s->cluster_size);
612 }
613
614 *pnum = num;
615 return ret;
616}
617
618
619
620
621
622static int coroutine_fn GRAPH_RDLOCK
623block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
624 int64_t *pnum)
625{
626 BlockDriverState *bs = s->source->bs;
627 int64_t count, total_count = 0;
628 int64_t bytes = s->len - offset;
629 int ret;
630
631 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
632
633 while (true) {
634
635 ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
636 if (ret < 0) {
637 return ret;
638 }
639
640 total_count += count;
641
642 if (ret || count == 0) {
643
644
645
646
647 *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
648 return ret;
649 }
650
651
652 if (total_count >= s->cluster_size) {
653 *pnum = total_count / s->cluster_size;
654 return 0;
655 }
656
657 offset += count;
658 bytes -= count;
659 }
660}
661
662void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
663{
664 QEMU_LOCK_GUARD(&s->lock);
665
666 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
667 if (s->progress) {
668 progress_set_remaining(s->progress,
669 bdrv_get_dirty_count(s->copy_bitmap) +
670 s->in_flight_bytes);
671 }
672}
673
674
675
676
677
678
679
680int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
681 int64_t offset,
682 int64_t *count)
683{
684 int ret;
685 int64_t clusters, bytes;
686
687 ret = block_copy_is_cluster_allocated(s, offset, &clusters);
688 if (ret < 0) {
689 return ret;
690 }
691
692 bytes = clusters * s->cluster_size;
693
694 if (!ret) {
695 block_copy_reset(s, offset, bytes);
696 }
697
698 *count = bytes;
699 return ret;
700}
701
702
703
704
705
706
707
708
709static int coroutine_fn GRAPH_RDLOCK
710block_copy_dirty_clusters(BlockCopyCallState *call_state)
711{
712 BlockCopyState *s = call_state->s;
713 int64_t offset = call_state->offset;
714 int64_t bytes = call_state->bytes;
715
716 int ret = 0;
717 bool found_dirty = false;
718 int64_t end = offset + bytes;
719 AioTaskPool *aio = NULL;
720
721
722
723
724
725 assert(bdrv_get_aio_context(s->source->bs) ==
726 bdrv_get_aio_context(s->target->bs));
727
728 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
729 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
730
731 while (bytes && aio_task_pool_status(aio) == 0 &&
732 !qatomic_read(&call_state->cancelled)) {
733 BlockCopyTask *task;
734 int64_t status_bytes;
735
736 task = block_copy_task_create(s, call_state, offset, bytes);
737 if (!task) {
738
739 trace_block_copy_skip_range(s, offset, bytes);
740 break;
741 }
742 if (task->req.offset > offset) {
743 trace_block_copy_skip_range(s, offset, task->req.offset - offset);
744 }
745
746 found_dirty = true;
747
748 ret = block_copy_block_status(s, task->req.offset, task->req.bytes,
749 &status_bytes);
750 assert(ret >= 0);
751 if (status_bytes < task->req.bytes) {
752 block_copy_task_shrink(task, status_bytes);
753 }
754 if (qatomic_read(&s->skip_unallocated) &&
755 !(ret & BDRV_BLOCK_ALLOCATED)) {
756 block_copy_task_end(task, 0);
757 trace_block_copy_skip_range(s, task->req.offset, task->req.bytes);
758 offset = task_end(task);
759 bytes = end - offset;
760 g_free(task);
761 continue;
762 }
763 if (ret & BDRV_BLOCK_ZERO) {
764 task->method = COPY_WRITE_ZEROES;
765 }
766
767 if (!call_state->ignore_ratelimit) {
768 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
769 if (ns > 0) {
770 block_copy_task_end(task, -EAGAIN);
771 g_free(task);
772 qemu_co_sleep_ns_wakeable(&call_state->sleep,
773 QEMU_CLOCK_REALTIME, ns);
774 continue;
775 }
776 }
777
778 ratelimit_calculate_delay(&s->rate_limit, task->req.bytes);
779
780 trace_block_copy_process(s, task->req.offset);
781
782 co_get_from_shres(s->mem, task->req.bytes);
783
784 offset = task_end(task);
785 bytes = end - offset;
786
787 if (!aio && bytes) {
788 aio = aio_task_pool_new(call_state->max_workers);
789 }
790
791 ret = block_copy_task_run(aio, task);
792 if (ret < 0) {
793 goto out;
794 }
795 }
796
797out:
798 if (aio) {
799 aio_task_pool_wait_all(aio);
800
801
802
803
804
805
806
807
808
809 assert(ret >= 0 || aio_task_pool_status(aio) < 0);
810 ret = aio_task_pool_status(aio);
811
812 aio_task_pool_free(aio);
813 }
814
815 return ret < 0 ? ret : found_dirty;
816}
817
818void block_copy_kick(BlockCopyCallState *call_state)
819{
820 qemu_co_sleep_wake(&call_state->sleep);
821}
822
823
824
825
826
827
828
829
830
831
832static int coroutine_fn GRAPH_RDLOCK
833block_copy_common(BlockCopyCallState *call_state)
834{
835 int ret;
836 BlockCopyState *s = call_state->s;
837
838 qemu_co_mutex_lock(&s->lock);
839 QLIST_INSERT_HEAD(&s->calls, call_state, list);
840 qemu_co_mutex_unlock(&s->lock);
841
842 do {
843 ret = block_copy_dirty_clusters(call_state);
844
845 if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
846 WITH_QEMU_LOCK_GUARD(&s->lock) {
847
848
849
850
851 ret = reqlist_wait_one(&s->reqs, call_state->offset,
852 call_state->bytes, &s->lock);
853 if (ret == 0) {
854
855
856
857
858
859
860
861
862
863
864
865 ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
866 call_state->offset,
867 call_state->bytes) >= 0;
868 }
869 }
870 }
871
872
873
874
875
876
877
878
879
880
881 } while (ret > 0 && !qatomic_read(&call_state->cancelled));
882
883 qatomic_store_release(&call_state->finished, true);
884
885 if (call_state->cb) {
886 call_state->cb(call_state->cb_opaque);
887 }
888
889 qemu_co_mutex_lock(&s->lock);
890 QLIST_REMOVE(call_state, list);
891 qemu_co_mutex_unlock(&s->lock);
892
893 return ret;
894}
895
896static void coroutine_fn block_copy_async_co_entry(void *opaque)
897{
898 GRAPH_RDLOCK_GUARD();
899 block_copy_common(opaque);
900}
901
902int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
903 bool ignore_ratelimit, uint64_t timeout_ns,
904 BlockCopyAsyncCallbackFunc cb,
905 void *cb_opaque)
906{
907 int ret;
908 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
909
910 *call_state = (BlockCopyCallState) {
911 .s = s,
912 .offset = start,
913 .bytes = bytes,
914 .ignore_ratelimit = ignore_ratelimit,
915 .max_workers = BLOCK_COPY_MAX_WORKERS,
916 .cb = cb,
917 .cb_opaque = cb_opaque,
918 };
919
920 ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns,
921 g_free);
922 if (ret < 0) {
923 assert(ret == -ETIMEDOUT);
924 block_copy_call_cancel(call_state);
925
926 return ret;
927 }
928
929 ret = call_state->ret;
930 g_free(call_state);
931
932 return ret;
933}
934
935BlockCopyCallState *block_copy_async(BlockCopyState *s,
936 int64_t offset, int64_t bytes,
937 int max_workers, int64_t max_chunk,
938 BlockCopyAsyncCallbackFunc cb,
939 void *cb_opaque)
940{
941 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
942
943 *call_state = (BlockCopyCallState) {
944 .s = s,
945 .offset = offset,
946 .bytes = bytes,
947 .max_workers = max_workers,
948 .max_chunk = max_chunk,
949 .cb = cb,
950 .cb_opaque = cb_opaque,
951
952 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
953 };
954
955 qemu_coroutine_enter(call_state->co);
956
957 return call_state;
958}
959
960void block_copy_call_free(BlockCopyCallState *call_state)
961{
962 if (!call_state) {
963 return;
964 }
965
966 assert(qatomic_read(&call_state->finished));
967 g_free(call_state);
968}
969
970bool block_copy_call_finished(BlockCopyCallState *call_state)
971{
972 return qatomic_read(&call_state->finished);
973}
974
975bool block_copy_call_succeeded(BlockCopyCallState *call_state)
976{
977 return qatomic_load_acquire(&call_state->finished) &&
978 !qatomic_read(&call_state->cancelled) &&
979 call_state->ret == 0;
980}
981
982bool block_copy_call_failed(BlockCopyCallState *call_state)
983{
984 return qatomic_load_acquire(&call_state->finished) &&
985 !qatomic_read(&call_state->cancelled) &&
986 call_state->ret < 0;
987}
988
989bool block_copy_call_cancelled(BlockCopyCallState *call_state)
990{
991 return qatomic_read(&call_state->cancelled);
992}
993
994int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
995{
996 assert(qatomic_load_acquire(&call_state->finished));
997 if (error_is_read) {
998 *error_is_read = call_state->error_is_read;
999 }
1000 return call_state->ret;
1001}
1002
1003
1004
1005
1006
1007void block_copy_call_cancel(BlockCopyCallState *call_state)
1008{
1009 qatomic_set(&call_state->cancelled, true);
1010 block_copy_kick(call_state);
1011}
1012
1013BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
1014{
1015 return s->copy_bitmap;
1016}
1017
1018int64_t block_copy_cluster_size(BlockCopyState *s)
1019{
1020 return s->cluster_size;
1021}
1022
1023void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
1024{
1025 qatomic_set(&s->skip_unallocated, skip);
1026}
1027
1028void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
1029{
1030 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
1031
1032
1033
1034
1035
1036
1037
1038}
1039