1
2
3
4
5
6
7
8
9
10
11
12
13#include "qemu/osdep.h"
14#include "sysemu/block-backend.h"
15#include "block/block_int.h"
16#include "block/blockjob.h"
17#include "block/throttle-groups.h"
18#include "sysemu/blockdev.h"
19#include "sysemu/sysemu.h"
20#include "qapi/error.h"
21#include "qapi/qapi-events-block.h"
22#include "qemu/id.h"
23#include "qemu/option.h"
24#include "trace.h"
25#include "migration/misc.h"
26
27
28#define COROUTINE_POOL_RESERVATION 64
29
30#define NOT_DONE 0x7fffffff
31
32static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
33
34typedef struct BlockBackendAioNotifier {
35 void (*attached_aio_context)(AioContext *new_context, void *opaque);
36 void (*detach_aio_context)(void *opaque);
37 void *opaque;
38 QLIST_ENTRY(BlockBackendAioNotifier) list;
39} BlockBackendAioNotifier;
40
41struct BlockBackend {
42 char *name;
43 int refcnt;
44 BdrvChild *root;
45 DriveInfo *legacy_dinfo;
46 QTAILQ_ENTRY(BlockBackend) link;
47 QTAILQ_ENTRY(BlockBackend) monitor_link;
48 BlockBackendPublic public;
49
50 void *dev;
51 bool legacy_dev;
52
53 const BlockDevOps *dev_ops;
54 void *dev_opaque;
55
56
57 int guest_block_size;
58
59
60
61 BlockBackendRootState root_state;
62
63 bool enable_write_cache;
64
65
66 BlockAcctStats stats;
67
68 BlockdevOnError on_read_error, on_write_error;
69 bool iostatus_enabled;
70 BlockDeviceIoStatus iostatus;
71
72 uint64_t perm;
73 uint64_t shared_perm;
74 bool disable_perm;
75
76 bool allow_write_beyond_eof;
77
78 NotifierList remove_bs_notifiers, insert_bs_notifiers;
79 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
80
81 int quiesce_counter;
82 VMChangeStateEntry *vmsh;
83 bool force_allow_inactivate;
84
85
86
87
88
89
90 unsigned int in_flight;
91 AioWait wait;
92};
93
94typedef struct BlockBackendAIOCB {
95 BlockAIOCB common;
96 BlockBackend *blk;
97 int ret;
98} BlockBackendAIOCB;
99
100static const AIOCBInfo block_backend_aiocb_info = {
101 .get_aio_context = blk_aiocb_get_aio_context,
102 .aiocb_size = sizeof(BlockBackendAIOCB),
103};
104
105static void drive_info_del(DriveInfo *dinfo);
106static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
107
108
109static QTAILQ_HEAD(, BlockBackend) block_backends =
110 QTAILQ_HEAD_INITIALIZER(block_backends);
111
112
113
114static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
115 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
116
117static void blk_root_inherit_options(int *child_flags, QDict *child_options,
118 int parent_flags, QDict *parent_options)
119{
120
121 abort();
122}
123static void blk_root_drained_begin(BdrvChild *child);
124static void blk_root_drained_end(BdrvChild *child);
125
126static void blk_root_change_media(BdrvChild *child, bool load);
127static void blk_root_resize(BdrvChild *child);
128
129static char *blk_root_get_parent_desc(BdrvChild *child)
130{
131 BlockBackend *blk = child->opaque;
132 char *dev_id;
133
134 if (blk->name) {
135 return g_strdup(blk->name);
136 }
137
138 dev_id = blk_get_attached_dev_id(blk);
139 if (*dev_id) {
140 return dev_id;
141 } else {
142
143 g_free(dev_id);
144 return g_strdup("a block device");
145 }
146}
147
148static const char *blk_root_get_name(BdrvChild *child)
149{
150 return blk_name(child->opaque);
151}
152
153static void blk_vm_state_changed(void *opaque, int running, RunState state)
154{
155 Error *local_err = NULL;
156 BlockBackend *blk = opaque;
157
158 if (state == RUN_STATE_INMIGRATE) {
159 return;
160 }
161
162 qemu_del_vm_change_state_handler(blk->vmsh);
163 blk->vmsh = NULL;
164 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
165 if (local_err) {
166 error_report_err(local_err);
167 }
168}
169
170
171
172
173
174
175
176
177static void blk_root_activate(BdrvChild *child, Error **errp)
178{
179 BlockBackend *blk = child->opaque;
180 Error *local_err = NULL;
181
182 if (!blk->disable_perm) {
183 return;
184 }
185
186 blk->disable_perm = false;
187
188 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
189 if (local_err) {
190 error_propagate(errp, local_err);
191 blk->disable_perm = true;
192 return;
193 }
194
195 if (runstate_check(RUN_STATE_INMIGRATE)) {
196
197
198
199 if (!blk->vmsh) {
200 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
201 blk);
202 }
203 return;
204 }
205
206 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
207 if (local_err) {
208 error_propagate(errp, local_err);
209 blk->disable_perm = true;
210 return;
211 }
212}
213
214void blk_set_force_allow_inactivate(BlockBackend *blk)
215{
216 blk->force_allow_inactivate = true;
217}
218
219static bool blk_can_inactivate(BlockBackend *blk)
220{
221
222 if (blk->dev || blk_name(blk)[0]) {
223 return true;
224 }
225
226
227
228
229
230
231 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
232 return true;
233 }
234
235 return blk->force_allow_inactivate;
236}
237
238static int blk_root_inactivate(BdrvChild *child)
239{
240 BlockBackend *blk = child->opaque;
241
242 if (blk->disable_perm) {
243 return 0;
244 }
245
246 if (!blk_can_inactivate(blk)) {
247 return -EPERM;
248 }
249
250 blk->disable_perm = true;
251 if (blk->root) {
252 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
253 }
254
255 return 0;
256}
257
258static void blk_root_attach(BdrvChild *child)
259{
260 BlockBackend *blk = child->opaque;
261 BlockBackendAioNotifier *notifier;
262
263 trace_blk_root_attach(child, blk, child->bs);
264
265 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
266 bdrv_add_aio_context_notifier(child->bs,
267 notifier->attached_aio_context,
268 notifier->detach_aio_context,
269 notifier->opaque);
270 }
271}
272
273static void blk_root_detach(BdrvChild *child)
274{
275 BlockBackend *blk = child->opaque;
276 BlockBackendAioNotifier *notifier;
277
278 trace_blk_root_detach(child, blk, child->bs);
279
280 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
281 bdrv_remove_aio_context_notifier(child->bs,
282 notifier->attached_aio_context,
283 notifier->detach_aio_context,
284 notifier->opaque);
285 }
286}
287
288static const BdrvChildRole child_root = {
289 .inherit_options = blk_root_inherit_options,
290
291 .change_media = blk_root_change_media,
292 .resize = blk_root_resize,
293 .get_name = blk_root_get_name,
294 .get_parent_desc = blk_root_get_parent_desc,
295
296 .drained_begin = blk_root_drained_begin,
297 .drained_end = blk_root_drained_end,
298
299 .activate = blk_root_activate,
300 .inactivate = blk_root_inactivate,
301
302 .attach = blk_root_attach,
303 .detach = blk_root_detach,
304};
305
306
307
308
309
310
311
312
313
314
315
316
317BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
318{
319 BlockBackend *blk;
320
321 blk = g_new0(BlockBackend, 1);
322 blk->refcnt = 1;
323 blk->perm = perm;
324 blk->shared_perm = shared_perm;
325 blk_set_enable_write_cache(blk, true);
326
327 block_acct_init(&blk->stats);
328
329 notifier_list_init(&blk->remove_bs_notifiers);
330 notifier_list_init(&blk->insert_bs_notifiers);
331 QLIST_INIT(&blk->aio_notifiers);
332
333 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
334 return blk;
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349BlockBackend *blk_new_open(const char *filename, const char *reference,
350 QDict *options, int flags, Error **errp)
351{
352 BlockBackend *blk;
353 BlockDriverState *bs;
354 uint64_t perm = 0;
355
356
357
358
359
360
361
362
363
364 if ((flags & BDRV_O_NO_IO) == 0) {
365 perm |= BLK_PERM_CONSISTENT_READ;
366 if (flags & BDRV_O_RDWR) {
367 perm |= BLK_PERM_WRITE;
368 }
369 }
370 if (flags & BDRV_O_RESIZE) {
371 perm |= BLK_PERM_RESIZE;
372 }
373
374 blk = blk_new(perm, BLK_PERM_ALL);
375 bs = bdrv_open(filename, reference, options, flags, errp);
376 if (!bs) {
377 blk_unref(blk);
378 return NULL;
379 }
380
381 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
382 perm, BLK_PERM_ALL, blk, errp);
383 if (!blk->root) {
384 bdrv_unref(bs);
385 blk_unref(blk);
386 return NULL;
387 }
388
389 return blk;
390}
391
392static void blk_delete(BlockBackend *blk)
393{
394 assert(!blk->refcnt);
395 assert(!blk->name);
396 assert(!blk->dev);
397 if (blk->public.throttle_group_member.throttle_state) {
398 blk_io_limits_disable(blk);
399 }
400 if (blk->root) {
401 blk_remove_bs(blk);
402 }
403 if (blk->vmsh) {
404 qemu_del_vm_change_state_handler(blk->vmsh);
405 blk->vmsh = NULL;
406 }
407 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
408 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
409 assert(QLIST_EMPTY(&blk->aio_notifiers));
410 QTAILQ_REMOVE(&block_backends, blk, link);
411 drive_info_del(blk->legacy_dinfo);
412 block_acct_cleanup(&blk->stats);
413 g_free(blk);
414}
415
416static void drive_info_del(DriveInfo *dinfo)
417{
418 if (!dinfo) {
419 return;
420 }
421 qemu_opts_del(dinfo->opts);
422 g_free(dinfo->serial);
423 g_free(dinfo);
424}
425
426int blk_get_refcnt(BlockBackend *blk)
427{
428 return blk ? blk->refcnt : 0;
429}
430
431
432
433
434
435void blk_ref(BlockBackend *blk)
436{
437 blk->refcnt++;
438}
439
440
441
442
443
444
445void blk_unref(BlockBackend *blk)
446{
447 if (blk) {
448 assert(blk->refcnt > 0);
449 if (!--blk->refcnt) {
450 blk_delete(blk);
451 }
452 }
453}
454
455
456
457
458
459BlockBackend *blk_all_next(BlockBackend *blk)
460{
461 return blk ? QTAILQ_NEXT(blk, link)
462 : QTAILQ_FIRST(&block_backends);
463}
464
465void blk_remove_all_bs(void)
466{
467 BlockBackend *blk = NULL;
468
469 while ((blk = blk_all_next(blk)) != NULL) {
470 AioContext *ctx = blk_get_aio_context(blk);
471
472 aio_context_acquire(ctx);
473 if (blk->root) {
474 blk_remove_bs(blk);
475 }
476 aio_context_release(ctx);
477 }
478}
479
480
481
482
483
484
485
486
487
488
489
490BlockBackend *blk_next(BlockBackend *blk)
491{
492 return blk ? QTAILQ_NEXT(blk, monitor_link)
493 : QTAILQ_FIRST(&monitor_block_backends);
494}
495
496
497
498BlockDriverState *bdrv_next(BdrvNextIterator *it)
499{
500 BlockDriverState *bs, *old_bs;
501
502
503 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
504
505
506
507
508 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
509 BlockBackend *old_blk = it->blk;
510
511 old_bs = old_blk ? blk_bs(old_blk) : NULL;
512
513 do {
514 it->blk = blk_all_next(it->blk);
515 bs = it->blk ? blk_bs(it->blk) : NULL;
516 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
517
518 if (it->blk) {
519 blk_ref(it->blk);
520 }
521 blk_unref(old_blk);
522
523 if (bs) {
524 bdrv_ref(bs);
525 bdrv_unref(old_bs);
526 return bs;
527 }
528 it->phase = BDRV_NEXT_MONITOR_OWNED;
529 } else {
530 old_bs = it->bs;
531 }
532
533
534
535
536 do {
537 it->bs = bdrv_next_monitor_owned(it->bs);
538 bs = it->bs;
539 } while (bs && bdrv_has_blk(bs));
540
541 if (bs) {
542 bdrv_ref(bs);
543 }
544 bdrv_unref(old_bs);
545
546 return bs;
547}
548
549static void bdrv_next_reset(BdrvNextIterator *it)
550{
551 *it = (BdrvNextIterator) {
552 .phase = BDRV_NEXT_BACKEND_ROOTS,
553 };
554}
555
556BlockDriverState *bdrv_first(BdrvNextIterator *it)
557{
558 bdrv_next_reset(it);
559 return bdrv_next(it);
560}
561
562
563
564void bdrv_next_cleanup(BdrvNextIterator *it)
565{
566
567 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
568
569 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
570 if (it->blk) {
571 bdrv_unref(blk_bs(it->blk));
572 blk_unref(it->blk);
573 }
574 } else {
575 bdrv_unref(it->bs);
576 }
577
578 bdrv_next_reset(it);
579}
580
581
582
583
584
585
586
587
588
589
590
591bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
592{
593 assert(!blk->name);
594 assert(name && name[0]);
595
596 if (!id_wellformed(name)) {
597 error_setg(errp, "Invalid device name");
598 return false;
599 }
600 if (blk_by_name(name)) {
601 error_setg(errp, "Device with id '%s' already exists", name);
602 return false;
603 }
604 if (bdrv_find_node(name)) {
605 error_setg(errp,
606 "Device name '%s' conflicts with an existing node name",
607 name);
608 return false;
609 }
610
611 blk->name = g_strdup(name);
612 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
613 return true;
614}
615
616
617
618
619
620void monitor_remove_blk(BlockBackend *blk)
621{
622 if (!blk->name) {
623 return;
624 }
625
626 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
627 g_free(blk->name);
628 blk->name = NULL;
629}
630
631
632
633
634
635const char *blk_name(const BlockBackend *blk)
636{
637 return blk->name ?: "";
638}
639
640
641
642
643
644BlockBackend *blk_by_name(const char *name)
645{
646 BlockBackend *blk = NULL;
647
648 assert(name);
649 while ((blk = blk_next(blk)) != NULL) {
650 if (!strcmp(name, blk->name)) {
651 return blk;
652 }
653 }
654 return NULL;
655}
656
657
658
659
660BlockDriverState *blk_bs(BlockBackend *blk)
661{
662 return blk->root ? blk->root->bs : NULL;
663}
664
665static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
666{
667 BdrvChild *child;
668 QLIST_FOREACH(child, &bs->parents, next_parent) {
669 if (child->role == &child_root) {
670 return child->opaque;
671 }
672 }
673
674 return NULL;
675}
676
677
678
679
680bool bdrv_has_blk(BlockDriverState *bs)
681{
682 return bdrv_first_blk(bs) != NULL;
683}
684
685
686
687
688bool bdrv_is_root_node(BlockDriverState *bs)
689{
690 BdrvChild *c;
691
692 QLIST_FOREACH(c, &bs->parents, next_parent) {
693 if (c->role != &child_root) {
694 return false;
695 }
696 }
697
698 return true;
699}
700
701
702
703
704DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
705{
706 return blk->legacy_dinfo;
707}
708
709
710
711
712
713
714DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
715{
716 assert(!blk->legacy_dinfo);
717 return blk->legacy_dinfo = dinfo;
718}
719
720
721
722
723
724BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
725{
726 BlockBackend *blk = NULL;
727
728 while ((blk = blk_next(blk)) != NULL) {
729 if (blk->legacy_dinfo == dinfo) {
730 return blk;
731 }
732 }
733 abort();
734}
735
736
737
738
739BlockBackendPublic *blk_get_public(BlockBackend *blk)
740{
741 return &blk->public;
742}
743
744
745
746
747BlockBackend *blk_by_public(BlockBackendPublic *public)
748{
749 return container_of(public, BlockBackend, public);
750}
751
752
753
754
755void blk_remove_bs(BlockBackend *blk)
756{
757 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
758 BlockDriverState *bs;
759
760 notifier_list_notify(&blk->remove_bs_notifiers, blk);
761 if (tgm->throttle_state) {
762 bs = blk_bs(blk);
763 bdrv_drained_begin(bs);
764 throttle_group_detach_aio_context(tgm);
765 throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
766 bdrv_drained_end(bs);
767 }
768
769 blk_update_root_state(blk);
770
771 bdrv_root_unref_child(blk->root);
772 blk->root = NULL;
773}
774
775
776
777
778int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
779{
780 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
781 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
782 blk->perm, blk->shared_perm, blk, errp);
783 if (blk->root == NULL) {
784 return -EPERM;
785 }
786 bdrv_ref(bs);
787
788 notifier_list_notify(&blk->insert_bs_notifiers, blk);
789 if (tgm->throttle_state) {
790 throttle_group_detach_aio_context(tgm);
791 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
792 }
793
794 return 0;
795}
796
797
798
799
800int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
801 Error **errp)
802{
803 int ret;
804
805 if (blk->root && !blk->disable_perm) {
806 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
807 if (ret < 0) {
808 return ret;
809 }
810 }
811
812 blk->perm = perm;
813 blk->shared_perm = shared_perm;
814
815 return 0;
816}
817
818void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
819{
820 *perm = blk->perm;
821 *shared_perm = blk->shared_perm;
822}
823
824static int blk_do_attach_dev(BlockBackend *blk, void *dev)
825{
826 if (blk->dev) {
827 return -EBUSY;
828 }
829
830
831
832
833 if (runstate_check(RUN_STATE_INMIGRATE)) {
834 blk->disable_perm = true;
835 }
836
837 blk_ref(blk);
838 blk->dev = dev;
839 blk->legacy_dev = false;
840 blk_iostatus_reset(blk);
841
842 return 0;
843}
844
845
846
847
848
849int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
850{
851 return blk_do_attach_dev(blk, dev);
852}
853
854
855
856
857
858
859void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
860{
861 if (blk_do_attach_dev(blk, dev) < 0) {
862 abort();
863 }
864 blk->legacy_dev = true;
865}
866
867
868
869
870
871void blk_detach_dev(BlockBackend *blk, void *dev)
872
873{
874 assert(blk->dev == dev);
875 blk->dev = NULL;
876 blk->dev_ops = NULL;
877 blk->dev_opaque = NULL;
878 blk->guest_block_size = 512;
879 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
880 blk_unref(blk);
881}
882
883
884
885
886void *blk_get_attached_dev(BlockBackend *blk)
887
888{
889 return blk->dev;
890}
891
892
893
894char *blk_get_attached_dev_id(BlockBackend *blk)
895{
896 DeviceState *dev;
897
898 assert(!blk->legacy_dev);
899 dev = blk->dev;
900
901 if (!dev) {
902 return g_strdup("");
903 } else if (dev->id) {
904 return g_strdup(dev->id);
905 }
906 return object_get_canonical_path(OBJECT(dev));
907}
908
909
910
911
912
913
914
915BlockBackend *blk_by_dev(void *dev)
916{
917 BlockBackend *blk = NULL;
918
919 assert(dev != NULL);
920 while ((blk = blk_all_next(blk)) != NULL) {
921 if (blk->dev == dev) {
922 return blk;
923 }
924 }
925 return NULL;
926}
927
928
929
930
931
932
933void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
934 void *opaque)
935{
936
937
938
939 assert(!blk->legacy_dev);
940
941 blk->dev_ops = ops;
942 blk->dev_opaque = opaque;
943
944
945 if (blk->quiesce_counter && ops->drained_begin) {
946 ops->drained_begin(opaque);
947 }
948}
949
950
951
952
953
954
955
956
957
958
959
960void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
961{
962 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
963 bool tray_was_open, tray_is_open;
964 Error *local_err = NULL;
965
966 assert(!blk->legacy_dev);
967
968 tray_was_open = blk_dev_is_tray_open(blk);
969 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
970 if (local_err) {
971 assert(load == true);
972 error_propagate(errp, local_err);
973 return;
974 }
975 tray_is_open = blk_dev_is_tray_open(blk);
976
977 if (tray_was_open != tray_is_open) {
978 char *id = blk_get_attached_dev_id(blk);
979 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open,
980 &error_abort);
981 g_free(id);
982 }
983 }
984}
985
986static void blk_root_change_media(BdrvChild *child, bool load)
987{
988 blk_dev_change_media_cb(child->opaque, load, NULL);
989}
990
991
992
993
994
995bool blk_dev_has_removable_media(BlockBackend *blk)
996{
997 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
998}
999
1000
1001
1002
1003bool blk_dev_has_tray(BlockBackend *blk)
1004{
1005 return blk->dev_ops && blk->dev_ops->is_tray_open;
1006}
1007
1008
1009
1010
1011
1012void blk_dev_eject_request(BlockBackend *blk, bool force)
1013{
1014 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1015 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1016 }
1017}
1018
1019
1020
1021
1022bool blk_dev_is_tray_open(BlockBackend *blk)
1023{
1024 if (blk_dev_has_tray(blk)) {
1025 return blk->dev_ops->is_tray_open(blk->dev_opaque);
1026 }
1027 return false;
1028}
1029
1030
1031
1032
1033
1034bool blk_dev_is_medium_locked(BlockBackend *blk)
1035{
1036 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1037 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1038 }
1039 return false;
1040}
1041
1042
1043
1044
1045static void blk_root_resize(BdrvChild *child)
1046{
1047 BlockBackend *blk = child->opaque;
1048
1049 if (blk->dev_ops && blk->dev_ops->resize_cb) {
1050 blk->dev_ops->resize_cb(blk->dev_opaque);
1051 }
1052}
1053
1054void blk_iostatus_enable(BlockBackend *blk)
1055{
1056 blk->iostatus_enabled = true;
1057 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1058}
1059
1060
1061
1062bool blk_iostatus_is_enabled(const BlockBackend *blk)
1063{
1064 return (blk->iostatus_enabled &&
1065 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1066 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
1067 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1068}
1069
1070BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1071{
1072 return blk->iostatus;
1073}
1074
1075void blk_iostatus_disable(BlockBackend *blk)
1076{
1077 blk->iostatus_enabled = false;
1078}
1079
1080void blk_iostatus_reset(BlockBackend *blk)
1081{
1082 if (blk_iostatus_is_enabled(blk)) {
1083 BlockDriverState *bs = blk_bs(blk);
1084 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1085 if (bs && bs->job) {
1086 block_job_iostatus_reset(bs->job);
1087 }
1088 }
1089}
1090
1091void blk_iostatus_set_err(BlockBackend *blk, int error)
1092{
1093 assert(blk_iostatus_is_enabled(blk));
1094 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1095 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1096 BLOCK_DEVICE_IO_STATUS_FAILED;
1097 }
1098}
1099
1100void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1101{
1102 blk->allow_write_beyond_eof = allow;
1103}
1104
1105static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1106 size_t size)
1107{
1108 int64_t len;
1109
1110 if (size > INT_MAX) {
1111 return -EIO;
1112 }
1113
1114 if (!blk_is_available(blk)) {
1115 return -ENOMEDIUM;
1116 }
1117
1118 if (offset < 0) {
1119 return -EIO;
1120 }
1121
1122 if (!blk->allow_write_beyond_eof) {
1123 len = blk_getlength(blk);
1124 if (len < 0) {
1125 return len;
1126 }
1127
1128 if (offset > len || len - offset < size) {
1129 return -EIO;
1130 }
1131 }
1132
1133 return 0;
1134}
1135
1136int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1137 unsigned int bytes, QEMUIOVector *qiov,
1138 BdrvRequestFlags flags)
1139{
1140 int ret;
1141 BlockDriverState *bs = blk_bs(blk);
1142
1143 trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1144
1145 ret = blk_check_byte_request(blk, offset, bytes);
1146 if (ret < 0) {
1147 return ret;
1148 }
1149
1150 bdrv_inc_in_flight(bs);
1151
1152
1153 if (blk->public.throttle_group_member.throttle_state) {
1154 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1155 bytes, false);
1156 }
1157
1158 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1159 bdrv_dec_in_flight(bs);
1160 return ret;
1161}
1162
1163int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1164 unsigned int bytes, QEMUIOVector *qiov,
1165 BdrvRequestFlags flags)
1166{
1167 int ret;
1168 BlockDriverState *bs = blk_bs(blk);
1169
1170 trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1171
1172 ret = blk_check_byte_request(blk, offset, bytes);
1173 if (ret < 0) {
1174 return ret;
1175 }
1176
1177 bdrv_inc_in_flight(bs);
1178
1179 if (blk->public.throttle_group_member.throttle_state) {
1180 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1181 bytes, true);
1182 }
1183
1184 if (!blk->enable_write_cache) {
1185 flags |= BDRV_REQ_FUA;
1186 }
1187
1188 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
1189 bdrv_dec_in_flight(bs);
1190 return ret;
1191}
1192
1193typedef struct BlkRwCo {
1194 BlockBackend *blk;
1195 int64_t offset;
1196 void *iobuf;
1197 int ret;
1198 BdrvRequestFlags flags;
1199} BlkRwCo;
1200
1201static void blk_read_entry(void *opaque)
1202{
1203 BlkRwCo *rwco = opaque;
1204 QEMUIOVector *qiov = rwco->iobuf;
1205
1206 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
1207 qiov, rwco->flags);
1208}
1209
1210static void blk_write_entry(void *opaque)
1211{
1212 BlkRwCo *rwco = opaque;
1213 QEMUIOVector *qiov = rwco->iobuf;
1214
1215 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
1216 qiov, rwco->flags);
1217}
1218
1219static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1220 int64_t bytes, CoroutineEntry co_entry,
1221 BdrvRequestFlags flags)
1222{
1223 QEMUIOVector qiov;
1224 struct iovec iov;
1225 BlkRwCo rwco;
1226
1227 iov = (struct iovec) {
1228 .iov_base = buf,
1229 .iov_len = bytes,
1230 };
1231 qemu_iovec_init_external(&qiov, &iov, 1);
1232
1233 rwco = (BlkRwCo) {
1234 .blk = blk,
1235 .offset = offset,
1236 .iobuf = &qiov,
1237 .flags = flags,
1238 .ret = NOT_DONE,
1239 };
1240
1241 if (qemu_in_coroutine()) {
1242
1243 co_entry(&rwco);
1244 } else {
1245 Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1246 bdrv_coroutine_enter(blk_bs(blk), co);
1247 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1248 }
1249
1250 return rwco.ret;
1251}
1252
1253int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
1254 int count)
1255{
1256 int ret;
1257
1258 ret = blk_check_byte_request(blk, offset, count);
1259 if (ret < 0) {
1260 return ret;
1261 }
1262
1263 blk_root_drained_begin(blk->root);
1264 ret = blk_pread(blk, offset, buf, count);
1265 blk_root_drained_end(blk->root);
1266 return ret;
1267}
1268
1269int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1270 int bytes, BdrvRequestFlags flags)
1271{
1272 return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1273 flags | BDRV_REQ_ZERO_WRITE);
1274}
1275
1276int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1277{
1278 return bdrv_make_zero(blk->root, flags);
1279}
1280
1281static void blk_inc_in_flight(BlockBackend *blk)
1282{
1283 atomic_inc(&blk->in_flight);
1284}
1285
1286static void blk_dec_in_flight(BlockBackend *blk)
1287{
1288 atomic_dec(&blk->in_flight);
1289 aio_wait_kick(&blk->wait);
1290}
1291
1292static void error_callback_bh(void *opaque)
1293{
1294 struct BlockBackendAIOCB *acb = opaque;
1295
1296 blk_dec_in_flight(acb->blk);
1297 acb->common.cb(acb->common.opaque, acb->ret);
1298 qemu_aio_unref(acb);
1299}
1300
1301BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1302 BlockCompletionFunc *cb,
1303 void *opaque, int ret)
1304{
1305 struct BlockBackendAIOCB *acb;
1306
1307 blk_inc_in_flight(blk);
1308 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1309 acb->blk = blk;
1310 acb->ret = ret;
1311
1312 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
1313 return &acb->common;
1314}
1315
1316typedef struct BlkAioEmAIOCB {
1317 BlockAIOCB common;
1318 BlkRwCo rwco;
1319 int bytes;
1320 bool has_returned;
1321} BlkAioEmAIOCB;
1322
1323static const AIOCBInfo blk_aio_em_aiocb_info = {
1324 .aiocb_size = sizeof(BlkAioEmAIOCB),
1325};
1326
1327static void blk_aio_complete(BlkAioEmAIOCB *acb)
1328{
1329 if (acb->has_returned) {
1330 blk_dec_in_flight(acb->rwco.blk);
1331 acb->common.cb(acb->common.opaque, acb->rwco.ret);
1332 qemu_aio_unref(acb);
1333 }
1334}
1335
1336static void blk_aio_complete_bh(void *opaque)
1337{
1338 BlkAioEmAIOCB *acb = opaque;
1339 assert(acb->has_returned);
1340 blk_aio_complete(acb);
1341}
1342
1343static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1344 void *iobuf, CoroutineEntry co_entry,
1345 BdrvRequestFlags flags,
1346 BlockCompletionFunc *cb, void *opaque)
1347{
1348 BlkAioEmAIOCB *acb;
1349 Coroutine *co;
1350
1351 blk_inc_in_flight(blk);
1352 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1353 acb->rwco = (BlkRwCo) {
1354 .blk = blk,
1355 .offset = offset,
1356 .iobuf = iobuf,
1357 .flags = flags,
1358 .ret = NOT_DONE,
1359 };
1360 acb->bytes = bytes;
1361 acb->has_returned = false;
1362
1363 co = qemu_coroutine_create(co_entry, acb);
1364 bdrv_coroutine_enter(blk_bs(blk), co);
1365
1366 acb->has_returned = true;
1367 if (acb->rwco.ret != NOT_DONE) {
1368 aio_bh_schedule_oneshot(blk_get_aio_context(blk),
1369 blk_aio_complete_bh, acb);
1370 }
1371
1372 return &acb->common;
1373}
1374
1375static void blk_aio_read_entry(void *opaque)
1376{
1377 BlkAioEmAIOCB *acb = opaque;
1378 BlkRwCo *rwco = &acb->rwco;
1379 QEMUIOVector *qiov = rwco->iobuf;
1380
1381 assert(qiov->size == acb->bytes);
1382 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
1383 qiov, rwco->flags);
1384 blk_aio_complete(acb);
1385}
1386
1387static void blk_aio_write_entry(void *opaque)
1388{
1389 BlkAioEmAIOCB *acb = opaque;
1390 BlkRwCo *rwco = &acb->rwco;
1391 QEMUIOVector *qiov = rwco->iobuf;
1392
1393 assert(!qiov || qiov->size == acb->bytes);
1394 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1395 qiov, rwco->flags);
1396 blk_aio_complete(acb);
1397}
1398
1399BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1400 int count, BdrvRequestFlags flags,
1401 BlockCompletionFunc *cb, void *opaque)
1402{
1403 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1404 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1405}
1406
1407int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1408{
1409 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1410 if (ret < 0) {
1411 return ret;
1412 }
1413 return count;
1414}
1415
1416int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1417 BdrvRequestFlags flags)
1418{
1419 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1420 flags);
1421 if (ret < 0) {
1422 return ret;
1423 }
1424 return count;
1425}
1426
1427int64_t blk_getlength(BlockBackend *blk)
1428{
1429 if (!blk_is_available(blk)) {
1430 return -ENOMEDIUM;
1431 }
1432
1433 return bdrv_getlength(blk_bs(blk));
1434}
1435
1436void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1437{
1438 if (!blk_bs(blk)) {
1439 *nb_sectors_ptr = 0;
1440 } else {
1441 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1442 }
1443}
1444
1445int64_t blk_nb_sectors(BlockBackend *blk)
1446{
1447 if (!blk_is_available(blk)) {
1448 return -ENOMEDIUM;
1449 }
1450
1451 return bdrv_nb_sectors(blk_bs(blk));
1452}
1453
1454BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1455 QEMUIOVector *qiov, BdrvRequestFlags flags,
1456 BlockCompletionFunc *cb, void *opaque)
1457{
1458 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1459 blk_aio_read_entry, flags, cb, opaque);
1460}
1461
1462BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1463 QEMUIOVector *qiov, BdrvRequestFlags flags,
1464 BlockCompletionFunc *cb, void *opaque)
1465{
1466 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1467 blk_aio_write_entry, flags, cb, opaque);
1468}
1469
1470static void blk_aio_flush_entry(void *opaque)
1471{
1472 BlkAioEmAIOCB *acb = opaque;
1473 BlkRwCo *rwco = &acb->rwco;
1474
1475 rwco->ret = blk_co_flush(rwco->blk);
1476 blk_aio_complete(acb);
1477}
1478
1479BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1480 BlockCompletionFunc *cb, void *opaque)
1481{
1482 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1483}
1484
1485static void blk_aio_pdiscard_entry(void *opaque)
1486{
1487 BlkAioEmAIOCB *acb = opaque;
1488 BlkRwCo *rwco = &acb->rwco;
1489
1490 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1491 blk_aio_complete(acb);
1492}
1493
1494BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1495 int64_t offset, int bytes,
1496 BlockCompletionFunc *cb, void *opaque)
1497{
1498 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1499 cb, opaque);
1500}
1501
1502void blk_aio_cancel(BlockAIOCB *acb)
1503{
1504 bdrv_aio_cancel(acb);
1505}
1506
1507void blk_aio_cancel_async(BlockAIOCB *acb)
1508{
1509 bdrv_aio_cancel_async(acb);
1510}
1511
1512int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1513{
1514 if (!blk_is_available(blk)) {
1515 return -ENOMEDIUM;
1516 }
1517
1518 return bdrv_co_ioctl(blk_bs(blk), req, buf);
1519}
1520
1521static void blk_ioctl_entry(void *opaque)
1522{
1523 BlkRwCo *rwco = opaque;
1524 QEMUIOVector *qiov = rwco->iobuf;
1525
1526 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
1527 qiov->iov[0].iov_base);
1528}
1529
1530int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1531{
1532 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1533}
1534
1535static void blk_aio_ioctl_entry(void *opaque)
1536{
1537 BlkAioEmAIOCB *acb = opaque;
1538 BlkRwCo *rwco = &acb->rwco;
1539
1540 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1541
1542 blk_aio_complete(acb);
1543}
1544
1545BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1546 BlockCompletionFunc *cb, void *opaque)
1547{
1548 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1549}
1550
1551int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1552{
1553 int ret = blk_check_byte_request(blk, offset, bytes);
1554 if (ret < 0) {
1555 return ret;
1556 }
1557
1558 return bdrv_co_pdiscard(blk_bs(blk), offset, bytes);
1559}
1560
1561int blk_co_flush(BlockBackend *blk)
1562{
1563 if (!blk_is_available(blk)) {
1564 return -ENOMEDIUM;
1565 }
1566
1567 return bdrv_co_flush(blk_bs(blk));
1568}
1569
1570static void blk_flush_entry(void *opaque)
1571{
1572 BlkRwCo *rwco = opaque;
1573 rwco->ret = blk_co_flush(rwco->blk);
1574}
1575
1576int blk_flush(BlockBackend *blk)
1577{
1578 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1579}
1580
1581void blk_drain(BlockBackend *blk)
1582{
1583 BlockDriverState *bs = blk_bs(blk);
1584
1585 if (bs) {
1586 bdrv_drained_begin(bs);
1587 }
1588
1589
1590 AIO_WAIT_WHILE(&blk->wait,
1591 blk_get_aio_context(blk),
1592 atomic_mb_read(&blk->in_flight) > 0);
1593
1594 if (bs) {
1595 bdrv_drained_end(bs);
1596 }
1597}
1598
1599void blk_drain_all(void)
1600{
1601 BlockBackend *blk = NULL;
1602
1603 bdrv_drain_all_begin();
1604
1605 while ((blk = blk_all_next(blk)) != NULL) {
1606 AioContext *ctx = blk_get_aio_context(blk);
1607
1608 aio_context_acquire(ctx);
1609
1610
1611 AIO_WAIT_WHILE(&blk->wait, ctx,
1612 atomic_mb_read(&blk->in_flight) > 0);
1613
1614 aio_context_release(ctx);
1615 }
1616
1617 bdrv_drain_all_end();
1618}
1619
1620void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1621 BlockdevOnError on_write_error)
1622{
1623 blk->on_read_error = on_read_error;
1624 blk->on_write_error = on_write_error;
1625}
1626
1627BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1628{
1629 return is_read ? blk->on_read_error : blk->on_write_error;
1630}
1631
1632BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1633 int error)
1634{
1635 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1636
1637 switch (on_err) {
1638 case BLOCKDEV_ON_ERROR_ENOSPC:
1639 return (error == ENOSPC) ?
1640 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1641 case BLOCKDEV_ON_ERROR_STOP:
1642 return BLOCK_ERROR_ACTION_STOP;
1643 case BLOCKDEV_ON_ERROR_REPORT:
1644 return BLOCK_ERROR_ACTION_REPORT;
1645 case BLOCKDEV_ON_ERROR_IGNORE:
1646 return BLOCK_ERROR_ACTION_IGNORE;
1647 case BLOCKDEV_ON_ERROR_AUTO:
1648 default:
1649 abort();
1650 }
1651}
1652
1653static void send_qmp_error_event(BlockBackend *blk,
1654 BlockErrorAction action,
1655 bool is_read, int error)
1656{
1657 IoOperationType optype;
1658 BlockDriverState *bs = blk_bs(blk);
1659
1660 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1661 qapi_event_send_block_io_error(blk_name(blk), !!bs,
1662 bs ? bdrv_get_node_name(bs) : NULL, optype,
1663 action, blk_iostatus_is_enabled(blk),
1664 error == ENOSPC, strerror(error),
1665 &error_abort);
1666}
1667
1668
1669
1670
1671
1672void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1673 bool is_read, int error)
1674{
1675 assert(error >= 0);
1676
1677 if (action == BLOCK_ERROR_ACTION_STOP) {
1678
1679
1680
1681
1682 blk_iostatus_set_err(blk, error);
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 qemu_system_vmstop_request_prepare();
1693 send_qmp_error_event(blk, action, is_read, error);
1694 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1695 } else {
1696 send_qmp_error_event(blk, action, is_read, error);
1697 }
1698}
1699
1700int blk_is_read_only(BlockBackend *blk)
1701{
1702 BlockDriverState *bs = blk_bs(blk);
1703
1704 if (bs) {
1705 return bdrv_is_read_only(bs);
1706 } else {
1707 return blk->root_state.read_only;
1708 }
1709}
1710
1711int blk_is_sg(BlockBackend *blk)
1712{
1713 BlockDriverState *bs = blk_bs(blk);
1714
1715 if (!bs) {
1716 return 0;
1717 }
1718
1719 return bdrv_is_sg(bs);
1720}
1721
1722int blk_enable_write_cache(BlockBackend *blk)
1723{
1724 return blk->enable_write_cache;
1725}
1726
1727void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1728{
1729 blk->enable_write_cache = wce;
1730}
1731
1732void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1733{
1734 BlockDriverState *bs = blk_bs(blk);
1735
1736 if (!bs) {
1737 error_setg(errp, "Device '%s' has no medium", blk->name);
1738 return;
1739 }
1740
1741 bdrv_invalidate_cache(bs, errp);
1742}
1743
1744bool blk_is_inserted(BlockBackend *blk)
1745{
1746 BlockDriverState *bs = blk_bs(blk);
1747
1748 return bs && bdrv_is_inserted(bs);
1749}
1750
1751bool blk_is_available(BlockBackend *blk)
1752{
1753 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1754}
1755
1756void blk_lock_medium(BlockBackend *blk, bool locked)
1757{
1758 BlockDriverState *bs = blk_bs(blk);
1759
1760 if (bs) {
1761 bdrv_lock_medium(bs, locked);
1762 }
1763}
1764
1765void blk_eject(BlockBackend *blk, bool eject_flag)
1766{
1767 BlockDriverState *bs = blk_bs(blk);
1768 char *id;
1769
1770
1771 assert(!blk->legacy_dev);
1772
1773 if (bs) {
1774 bdrv_eject(bs, eject_flag);
1775 }
1776
1777
1778
1779 id = blk_get_attached_dev_id(blk);
1780 qapi_event_send_device_tray_moved(blk_name(blk), id,
1781 eject_flag, &error_abort);
1782 g_free(id);
1783}
1784
1785int blk_get_flags(BlockBackend *blk)
1786{
1787 BlockDriverState *bs = blk_bs(blk);
1788
1789 if (bs) {
1790 return bdrv_get_flags(bs);
1791 } else {
1792 return blk->root_state.open_flags;
1793 }
1794}
1795
1796
1797uint32_t blk_get_max_transfer(BlockBackend *blk)
1798{
1799 BlockDriverState *bs = blk_bs(blk);
1800 uint32_t max = 0;
1801
1802 if (bs) {
1803 max = bs->bl.max_transfer;
1804 }
1805 return MIN_NON_ZERO(max, INT_MAX);
1806}
1807
1808int blk_get_max_iov(BlockBackend *blk)
1809{
1810 return blk->root->bs->bl.max_iov;
1811}
1812
1813void blk_set_guest_block_size(BlockBackend *blk, int align)
1814{
1815 blk->guest_block_size = align;
1816}
1817
1818void *blk_try_blockalign(BlockBackend *blk, size_t size)
1819{
1820 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1821}
1822
1823void *blk_blockalign(BlockBackend *blk, size_t size)
1824{
1825 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1826}
1827
1828bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1829{
1830 BlockDriverState *bs = blk_bs(blk);
1831
1832 if (!bs) {
1833 return false;
1834 }
1835
1836 return bdrv_op_is_blocked(bs, op, errp);
1837}
1838
1839void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1840{
1841 BlockDriverState *bs = blk_bs(blk);
1842
1843 if (bs) {
1844 bdrv_op_unblock(bs, op, reason);
1845 }
1846}
1847
1848void blk_op_block_all(BlockBackend *blk, Error *reason)
1849{
1850 BlockDriverState *bs = blk_bs(blk);
1851
1852 if (bs) {
1853 bdrv_op_block_all(bs, reason);
1854 }
1855}
1856
1857void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1858{
1859 BlockDriverState *bs = blk_bs(blk);
1860
1861 if (bs) {
1862 bdrv_op_unblock_all(bs, reason);
1863 }
1864}
1865
1866AioContext *blk_get_aio_context(BlockBackend *blk)
1867{
1868 BlockDriverState *bs = blk_bs(blk);
1869
1870 if (bs) {
1871 return bdrv_get_aio_context(bs);
1872 } else {
1873 return qemu_get_aio_context();
1874 }
1875}
1876
1877static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1878{
1879 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1880 return blk_get_aio_context(blk_acb->blk);
1881}
1882
1883void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1884{
1885 BlockDriverState *bs = blk_bs(blk);
1886 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
1887
1888 if (bs) {
1889 if (tgm->throttle_state) {
1890 bdrv_drained_begin(bs);
1891 throttle_group_detach_aio_context(tgm);
1892 throttle_group_attach_aio_context(tgm, new_context);
1893 bdrv_drained_end(bs);
1894 }
1895 bdrv_set_aio_context(bs, new_context);
1896 }
1897}
1898
1899void blk_add_aio_context_notifier(BlockBackend *blk,
1900 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1901 void (*detach_aio_context)(void *opaque), void *opaque)
1902{
1903 BlockBackendAioNotifier *notifier;
1904 BlockDriverState *bs = blk_bs(blk);
1905
1906 notifier = g_new(BlockBackendAioNotifier, 1);
1907 notifier->attached_aio_context = attached_aio_context;
1908 notifier->detach_aio_context = detach_aio_context;
1909 notifier->opaque = opaque;
1910 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
1911
1912 if (bs) {
1913 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1914 detach_aio_context, opaque);
1915 }
1916}
1917
1918void blk_remove_aio_context_notifier(BlockBackend *blk,
1919 void (*attached_aio_context)(AioContext *,
1920 void *),
1921 void (*detach_aio_context)(void *),
1922 void *opaque)
1923{
1924 BlockBackendAioNotifier *notifier;
1925 BlockDriverState *bs = blk_bs(blk);
1926
1927 if (bs) {
1928 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1929 detach_aio_context, opaque);
1930 }
1931
1932 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
1933 if (notifier->attached_aio_context == attached_aio_context &&
1934 notifier->detach_aio_context == detach_aio_context &&
1935 notifier->opaque == opaque) {
1936 QLIST_REMOVE(notifier, list);
1937 g_free(notifier);
1938 return;
1939 }
1940 }
1941
1942 abort();
1943}
1944
1945void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1946{
1947 notifier_list_add(&blk->remove_bs_notifiers, notify);
1948}
1949
1950void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1951{
1952 notifier_list_add(&blk->insert_bs_notifiers, notify);
1953}
1954
1955void blk_io_plug(BlockBackend *blk)
1956{
1957 BlockDriverState *bs = blk_bs(blk);
1958
1959 if (bs) {
1960 bdrv_io_plug(bs);
1961 }
1962}
1963
1964void blk_io_unplug(BlockBackend *blk)
1965{
1966 BlockDriverState *bs = blk_bs(blk);
1967
1968 if (bs) {
1969 bdrv_io_unplug(bs);
1970 }
1971}
1972
1973BlockAcctStats *blk_get_stats(BlockBackend *blk)
1974{
1975 return &blk->stats;
1976}
1977
1978void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1979 BlockCompletionFunc *cb, void *opaque)
1980{
1981 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1982}
1983
1984int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1985 int bytes, BdrvRequestFlags flags)
1986{
1987 return blk_co_pwritev(blk, offset, bytes, NULL,
1988 flags | BDRV_REQ_ZERO_WRITE);
1989}
1990
1991int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
1992 int count)
1993{
1994 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1995 BDRV_REQ_WRITE_COMPRESSED);
1996}
1997
1998int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
1999 Error **errp)
2000{
2001 if (!blk_is_available(blk)) {
2002 error_setg(errp, "No medium inserted");
2003 return -ENOMEDIUM;
2004 }
2005
2006 return bdrv_truncate(blk->root, offset, prealloc, errp);
2007}
2008
2009static void blk_pdiscard_entry(void *opaque)
2010{
2011 BlkRwCo *rwco = opaque;
2012 QEMUIOVector *qiov = rwco->iobuf;
2013
2014 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
2015}
2016
2017int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
2018{
2019 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
2020}
2021
2022int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2023 int64_t pos, int size)
2024{
2025 int ret;
2026
2027 if (!blk_is_available(blk)) {
2028 return -ENOMEDIUM;
2029 }
2030
2031 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2032 if (ret < 0) {
2033 return ret;
2034 }
2035
2036 if (ret == size && !blk->enable_write_cache) {
2037 ret = bdrv_flush(blk_bs(blk));
2038 }
2039
2040 return ret < 0 ? ret : size;
2041}
2042
2043int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2044{
2045 if (!blk_is_available(blk)) {
2046 return -ENOMEDIUM;
2047 }
2048
2049 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2050}
2051
2052int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2053{
2054 if (!blk_is_available(blk)) {
2055 return -ENOMEDIUM;
2056 }
2057
2058 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2059}
2060
2061int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2062{
2063 if (!blk_is_available(blk)) {
2064 return -ENOMEDIUM;
2065 }
2066
2067 return bdrv_probe_geometry(blk_bs(blk), geo);
2068}
2069
2070
2071
2072
2073
2074void blk_update_root_state(BlockBackend *blk)
2075{
2076 assert(blk->root);
2077
2078 blk->root_state.open_flags = blk->root->bs->open_flags;
2079 blk->root_state.read_only = blk->root->bs->read_only;
2080 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2081}
2082
2083
2084
2085
2086
2087bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2088{
2089 return blk->root_state.detect_zeroes;
2090}
2091
2092
2093
2094
2095
2096int blk_get_open_flags_from_root_state(BlockBackend *blk)
2097{
2098 int bs_flags;
2099
2100 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2101 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2102
2103 return bs_flags;
2104}
2105
2106BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2107{
2108 return &blk->root_state;
2109}
2110
2111int blk_commit_all(void)
2112{
2113 BlockBackend *blk = NULL;
2114
2115 while ((blk = blk_all_next(blk)) != NULL) {
2116 AioContext *aio_context = blk_get_aio_context(blk);
2117
2118 aio_context_acquire(aio_context);
2119 if (blk_is_inserted(blk) && blk->root->bs->backing) {
2120 int ret = bdrv_commit(blk->root->bs);
2121 if (ret < 0) {
2122 aio_context_release(aio_context);
2123 return ret;
2124 }
2125 }
2126 aio_context_release(aio_context);
2127 }
2128 return 0;
2129}
2130
2131
2132
2133void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2134{
2135 throttle_group_config(&blk->public.throttle_group_member, cfg);
2136}
2137
2138void blk_io_limits_disable(BlockBackend *blk)
2139{
2140 BlockDriverState *bs = blk_bs(blk);
2141 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2142 assert(tgm->throttle_state);
2143 if (bs) {
2144 bdrv_drained_begin(bs);
2145 }
2146 throttle_group_unregister_tgm(tgm);
2147 if (bs) {
2148 bdrv_drained_end(bs);
2149 }
2150}
2151
2152
2153void blk_io_limits_enable(BlockBackend *blk, const char *group)
2154{
2155 assert(!blk->public.throttle_group_member.throttle_state);
2156 throttle_group_register_tgm(&blk->public.throttle_group_member,
2157 group, blk_get_aio_context(blk));
2158}
2159
2160void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2161{
2162
2163 if (!blk->public.throttle_group_member.throttle_state) {
2164 return;
2165 }
2166
2167
2168 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2169 group)) {
2170 return;
2171 }
2172
2173
2174 blk_io_limits_disable(blk);
2175 blk_io_limits_enable(blk, group);
2176}
2177
2178static void blk_root_drained_begin(BdrvChild *child)
2179{
2180 BlockBackend *blk = child->opaque;
2181
2182 if (++blk->quiesce_counter == 1) {
2183 if (blk->dev_ops && blk->dev_ops->drained_begin) {
2184 blk->dev_ops->drained_begin(blk->dev_opaque);
2185 }
2186 }
2187
2188
2189
2190
2191 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
2192 throttle_group_restart_tgm(&blk->public.throttle_group_member);
2193 }
2194}
2195
2196static void blk_root_drained_end(BdrvChild *child)
2197{
2198 BlockBackend *blk = child->opaque;
2199 assert(blk->quiesce_counter);
2200
2201 assert(blk->public.throttle_group_member.io_limits_disabled);
2202 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2203
2204 if (--blk->quiesce_counter == 0) {
2205 if (blk->dev_ops && blk->dev_ops->drained_end) {
2206 blk->dev_ops->drained_end(blk->dev_opaque);
2207 }
2208 }
2209}
2210
2211void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2212{
2213 bdrv_register_buf(blk_bs(blk), host, size);
2214}
2215
2216void blk_unregister_buf(BlockBackend *blk, void *host)
2217{
2218 bdrv_unregister_buf(blk_bs(blk), host);
2219}
2220