1
2
3
4
5
6
7
8
9
10
11
12
13#include "qemu/osdep.h"
14#include "sysemu/block-backend.h"
15#include "block/block_int.h"
16#include "block/blockjob.h"
17#include "block/throttle-groups.h"
18#include "sysemu/blockdev.h"
19#include "sysemu/sysemu.h"
20#include "qapi-event.h"
21#include "qemu/id.h"
22
23
24#define COROUTINE_POOL_RESERVATION 64
25
26#define NOT_DONE 0x7fffffff
27
28static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
29
30struct BlockBackend {
31 char *name;
32 int refcnt;
33 BdrvChild *root;
34 DriveInfo *legacy_dinfo;
35 QTAILQ_ENTRY(BlockBackend) link;
36 QTAILQ_ENTRY(BlockBackend) monitor_link;
37
38 void *dev;
39
40 const BlockDevOps *dev_ops;
41 void *dev_opaque;
42
43
44 int guest_block_size;
45
46
47
48 BlockBackendRootState root_state;
49
50 bool enable_write_cache;
51
52
53 BlockAcctStats stats;
54
55 BlockdevOnError on_read_error, on_write_error;
56 bool iostatus_enabled;
57 BlockDeviceIoStatus iostatus;
58
59 bool allow_write_beyond_eof;
60
61 NotifierList remove_bs_notifiers, insert_bs_notifiers;
62};
63
64typedef struct BlockBackendAIOCB {
65 BlockAIOCB common;
66 QEMUBH *bh;
67 BlockBackend *blk;
68 int ret;
69} BlockBackendAIOCB;
70
71static const AIOCBInfo block_backend_aiocb_info = {
72 .get_aio_context = blk_aiocb_get_aio_context,
73 .aiocb_size = sizeof(BlockBackendAIOCB),
74};
75
76static void drive_info_del(DriveInfo *dinfo);
77
78
79static QTAILQ_HEAD(, BlockBackend) block_backends =
80 QTAILQ_HEAD_INITIALIZER(block_backends);
81
82
83
84static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
85 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
86
87static void blk_root_inherit_options(int *child_flags, QDict *child_options,
88 int parent_flags, QDict *parent_options)
89{
90
91 abort();
92}
93
94static const BdrvChildRole child_root = {
95 .inherit_options = blk_root_inherit_options,
96};
97
98
99
100
101
102
103BlockBackend *blk_new(Error **errp)
104{
105 BlockBackend *blk;
106
107 blk = g_new0(BlockBackend, 1);
108 blk->refcnt = 1;
109 notifier_list_init(&blk->remove_bs_notifiers);
110 notifier_list_init(&blk->insert_bs_notifiers);
111 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
112 return blk;
113}
114
115
116
117
118
119BlockBackend *blk_new_with_bs(Error **errp)
120{
121 BlockBackend *blk;
122 BlockDriverState *bs;
123
124 blk = blk_new(errp);
125 if (!blk) {
126 return NULL;
127 }
128
129 bs = bdrv_new_root();
130 blk->root = bdrv_root_attach_child(bs, "root", &child_root);
131 bs->blk = blk;
132 return blk;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147BlockBackend *blk_new_open(const char *filename, const char *reference,
148 QDict *options, int flags, Error **errp)
149{
150 BlockBackend *blk;
151 int ret;
152
153 blk = blk_new_with_bs(errp);
154 if (!blk) {
155 QDECREF(options);
156 return NULL;
157 }
158
159 ret = bdrv_open(&blk->root->bs, filename, reference, options, flags, errp);
160 if (ret < 0) {
161 blk_unref(blk);
162 return NULL;
163 }
164
165 blk_set_enable_write_cache(blk, true);
166
167 return blk;
168}
169
170static void blk_delete(BlockBackend *blk)
171{
172 assert(!blk->refcnt);
173 assert(!blk->name);
174 assert(!blk->dev);
175 if (blk->root) {
176 blk_remove_bs(blk);
177 }
178 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
179 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
180 if (blk->root_state.throttle_state) {
181 g_free(blk->root_state.throttle_group);
182 throttle_group_unref(blk->root_state.throttle_state);
183 }
184 QTAILQ_REMOVE(&block_backends, blk, link);
185 drive_info_del(blk->legacy_dinfo);
186 block_acct_cleanup(&blk->stats);
187 g_free(blk);
188}
189
190static void drive_info_del(DriveInfo *dinfo)
191{
192 if (!dinfo) {
193 return;
194 }
195 qemu_opts_del(dinfo->opts);
196 g_free(dinfo->serial);
197 g_free(dinfo);
198}
199
200int blk_get_refcnt(BlockBackend *blk)
201{
202 return blk ? blk->refcnt : 0;
203}
204
205
206
207
208
209void blk_ref(BlockBackend *blk)
210{
211 blk->refcnt++;
212}
213
214
215
216
217
218
219void blk_unref(BlockBackend *blk)
220{
221 if (blk) {
222 assert(blk->refcnt > 0);
223 if (!--blk->refcnt) {
224 blk_delete(blk);
225 }
226 }
227}
228
229
230
231
232
233static BlockBackend *blk_all_next(BlockBackend *blk)
234{
235 return blk ? QTAILQ_NEXT(blk, link)
236 : QTAILQ_FIRST(&block_backends);
237}
238
239void blk_remove_all_bs(void)
240{
241 BlockBackend *blk = NULL;
242
243 while ((blk = blk_all_next(blk)) != NULL) {
244 AioContext *ctx = blk_get_aio_context(blk);
245
246 aio_context_acquire(ctx);
247 if (blk->root) {
248 blk_remove_bs(blk);
249 }
250 aio_context_release(ctx);
251 }
252}
253
254
255
256
257
258
259
260
261
262
263
264BlockBackend *blk_next(BlockBackend *blk)
265{
266 return blk ? QTAILQ_NEXT(blk, monitor_link)
267 : QTAILQ_FIRST(&monitor_block_backends);
268}
269
270
271
272
273
274
275
276BlockDriverState *blk_next_root_bs(BlockDriverState *bs)
277{
278 BlockBackend *blk;
279
280 if (bs) {
281 assert(bs->blk);
282 blk = bs->blk;
283 } else {
284 blk = NULL;
285 }
286
287 do {
288 blk = blk_all_next(blk);
289 } while (blk && !blk->root);
290
291 return blk ? blk->root->bs : NULL;
292}
293
294
295
296
297
298
299
300
301
302
303
304bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
305{
306 assert(!blk->name);
307 assert(name && name[0]);
308
309 if (!id_wellformed(name)) {
310 error_setg(errp, "Invalid device name");
311 return false;
312 }
313 if (blk_by_name(name)) {
314 error_setg(errp, "Device with id '%s' already exists", name);
315 return false;
316 }
317 if (bdrv_find_node(name)) {
318 error_setg(errp,
319 "Device name '%s' conflicts with an existing node name",
320 name);
321 return false;
322 }
323
324 blk->name = g_strdup(name);
325 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
326 return true;
327}
328
329
330
331
332
333void monitor_remove_blk(BlockBackend *blk)
334{
335 if (!blk->name) {
336 return;
337 }
338
339 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
340 g_free(blk->name);
341 blk->name = NULL;
342}
343
344
345
346
347
348const char *blk_name(BlockBackend *blk)
349{
350 return blk->name ?: "";
351}
352
353
354
355
356
357BlockBackend *blk_by_name(const char *name)
358{
359 BlockBackend *blk = NULL;
360
361 assert(name);
362 while ((blk = blk_next(blk)) != NULL) {
363 if (!strcmp(name, blk->name)) {
364 return blk;
365 }
366 }
367 return NULL;
368}
369
370
371
372
373BlockDriverState *blk_bs(BlockBackend *blk)
374{
375 return blk->root ? blk->root->bs : NULL;
376}
377
378
379
380
381DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
382{
383 return blk->legacy_dinfo;
384}
385
386
387
388
389
390
391DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
392{
393 assert(!blk->legacy_dinfo);
394 return blk->legacy_dinfo = dinfo;
395}
396
397
398
399
400
401BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
402{
403 BlockBackend *blk = NULL;
404
405 while ((blk = blk_next(blk)) != NULL) {
406 if (blk->legacy_dinfo == dinfo) {
407 return blk;
408 }
409 }
410 abort();
411}
412
413
414
415
416void blk_remove_bs(BlockBackend *blk)
417{
418 assert(blk->root->bs->blk == blk);
419
420 notifier_list_notify(&blk->remove_bs_notifiers, blk);
421
422 blk_update_root_state(blk);
423
424 blk->root->bs->blk = NULL;
425 bdrv_root_unref_child(blk->root);
426 blk->root = NULL;
427}
428
429
430
431
432void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
433{
434 assert(!blk->root && !bs->blk);
435 bdrv_ref(bs);
436 blk->root = bdrv_root_attach_child(bs, "root", &child_root);
437 bs->blk = blk;
438
439 notifier_list_notify(&blk->insert_bs_notifiers, blk);
440}
441
442
443
444
445
446int blk_attach_dev(BlockBackend *blk, void *dev)
447
448{
449 if (blk->dev) {
450 return -EBUSY;
451 }
452 blk_ref(blk);
453 blk->dev = dev;
454 blk_iostatus_reset(blk);
455 return 0;
456}
457
458
459
460
461
462
463void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
464{
465 if (blk_attach_dev(blk, dev) < 0) {
466 abort();
467 }
468}
469
470
471
472
473
474void blk_detach_dev(BlockBackend *blk, void *dev)
475
476{
477 assert(blk->dev == dev);
478 blk->dev = NULL;
479 blk->dev_ops = NULL;
480 blk->dev_opaque = NULL;
481 blk->guest_block_size = 512;
482 blk_unref(blk);
483}
484
485
486
487
488void *blk_get_attached_dev(BlockBackend *blk)
489
490{
491 return blk->dev;
492}
493
494
495
496
497
498
499void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
500 void *opaque)
501{
502 blk->dev_ops = ops;
503 blk->dev_opaque = opaque;
504}
505
506
507
508
509
510
511
512void blk_dev_change_media_cb(BlockBackend *blk, bool load)
513{
514 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
515 bool tray_was_open, tray_is_open;
516
517 tray_was_open = blk_dev_is_tray_open(blk);
518 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
519 tray_is_open = blk_dev_is_tray_open(blk);
520
521 if (tray_was_open != tray_is_open) {
522 qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
523 &error_abort);
524 }
525 }
526}
527
528
529
530
531
532bool blk_dev_has_removable_media(BlockBackend *blk)
533{
534 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
535}
536
537
538
539
540bool blk_dev_has_tray(BlockBackend *blk)
541{
542 return blk->dev_ops && blk->dev_ops->is_tray_open;
543}
544
545
546
547
548
549void blk_dev_eject_request(BlockBackend *blk, bool force)
550{
551 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
552 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
553 }
554}
555
556
557
558
559bool blk_dev_is_tray_open(BlockBackend *blk)
560{
561 if (blk_dev_has_tray(blk)) {
562 return blk->dev_ops->is_tray_open(blk->dev_opaque);
563 }
564 return false;
565}
566
567
568
569
570
571bool blk_dev_is_medium_locked(BlockBackend *blk)
572{
573 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
574 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
575 }
576 return false;
577}
578
579
580
581
582void blk_dev_resize_cb(BlockBackend *blk)
583{
584 if (blk->dev_ops && blk->dev_ops->resize_cb) {
585 blk->dev_ops->resize_cb(blk->dev_opaque);
586 }
587}
588
589void blk_iostatus_enable(BlockBackend *blk)
590{
591 blk->iostatus_enabled = true;
592 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
593}
594
595
596
597bool blk_iostatus_is_enabled(const BlockBackend *blk)
598{
599 return (blk->iostatus_enabled &&
600 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
601 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
602 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
603}
604
605BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
606{
607 return blk->iostatus;
608}
609
610void blk_iostatus_disable(BlockBackend *blk)
611{
612 blk->iostatus_enabled = false;
613}
614
615void blk_iostatus_reset(BlockBackend *blk)
616{
617 if (blk_iostatus_is_enabled(blk)) {
618 BlockDriverState *bs = blk_bs(blk);
619 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
620 if (bs && bs->job) {
621 block_job_iostatus_reset(bs->job);
622 }
623 }
624}
625
626void blk_iostatus_set_err(BlockBackend *blk, int error)
627{
628 assert(blk_iostatus_is_enabled(blk));
629 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
630 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
631 BLOCK_DEVICE_IO_STATUS_FAILED;
632 }
633}
634
635void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
636{
637 blk->allow_write_beyond_eof = allow;
638}
639
640static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
641 size_t size)
642{
643 int64_t len;
644
645 if (size > INT_MAX) {
646 return -EIO;
647 }
648
649 if (!blk_is_available(blk)) {
650 return -ENOMEDIUM;
651 }
652
653 if (offset < 0) {
654 return -EIO;
655 }
656
657 if (!blk->allow_write_beyond_eof) {
658 len = blk_getlength(blk);
659 if (len < 0) {
660 return len;
661 }
662
663 if (offset > len || len - offset < size) {
664 return -EIO;
665 }
666 }
667
668 return 0;
669}
670
671static int blk_check_request(BlockBackend *blk, int64_t sector_num,
672 int nb_sectors)
673{
674 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
675 return -EIO;
676 }
677
678 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
679 return -EIO;
680 }
681
682 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
683 nb_sectors * BDRV_SECTOR_SIZE);
684}
685
686static int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
687 unsigned int bytes, QEMUIOVector *qiov,
688 BdrvRequestFlags flags)
689{
690 int ret = blk_check_byte_request(blk, offset, bytes);
691 if (ret < 0) {
692 return ret;
693 }
694
695 return bdrv_co_do_preadv(blk_bs(blk), offset, bytes, qiov, flags);
696}
697
698static int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
699 unsigned int bytes, QEMUIOVector *qiov,
700 BdrvRequestFlags flags)
701{
702 int ret;
703
704 ret = blk_check_byte_request(blk, offset, bytes);
705 if (ret < 0) {
706 return ret;
707 }
708
709 if (!blk->enable_write_cache) {
710 flags |= BDRV_REQ_FUA;
711 }
712
713 return bdrv_co_do_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
714}
715
716typedef struct BlkRwCo {
717 BlockBackend *blk;
718 int64_t offset;
719 QEMUIOVector *qiov;
720 int ret;
721 BdrvRequestFlags flags;
722} BlkRwCo;
723
724static void blk_read_entry(void *opaque)
725{
726 BlkRwCo *rwco = opaque;
727
728 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
729 rwco->qiov, rwco->flags);
730}
731
732static void blk_write_entry(void *opaque)
733{
734 BlkRwCo *rwco = opaque;
735
736 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
737 rwco->qiov, rwco->flags);
738}
739
740static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
741 int64_t bytes, CoroutineEntry co_entry,
742 BdrvRequestFlags flags)
743{
744 AioContext *aio_context;
745 QEMUIOVector qiov;
746 struct iovec iov;
747 Coroutine *co;
748 BlkRwCo rwco;
749
750 iov = (struct iovec) {
751 .iov_base = buf,
752 .iov_len = bytes,
753 };
754 qemu_iovec_init_external(&qiov, &iov, 1);
755
756 rwco = (BlkRwCo) {
757 .blk = blk,
758 .offset = offset,
759 .qiov = &qiov,
760 .flags = flags,
761 .ret = NOT_DONE,
762 };
763
764 co = qemu_coroutine_create(co_entry);
765 qemu_coroutine_enter(co, &rwco);
766
767 aio_context = blk_get_aio_context(blk);
768 while (rwco.ret == NOT_DONE) {
769 aio_poll(aio_context, true);
770 }
771
772 return rwco.ret;
773}
774
775static int blk_rw(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
776 int nb_sectors, CoroutineEntry co_entry,
777 BdrvRequestFlags flags)
778{
779 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
780 return -EINVAL;
781 }
782
783 return blk_prw(blk, sector_num << BDRV_SECTOR_BITS, buf,
784 nb_sectors << BDRV_SECTOR_BITS, co_entry, flags);
785}
786
787int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
788 int nb_sectors)
789{
790 return blk_rw(blk, sector_num, buf, nb_sectors, blk_read_entry, 0);
791}
792
793int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
794 int nb_sectors)
795{
796 BlockDriverState *bs = blk_bs(blk);
797 bool enabled;
798 int ret;
799
800 ret = blk_check_request(blk, sector_num, nb_sectors);
801 if (ret < 0) {
802 return ret;
803 }
804
805 enabled = bs->io_limits_enabled;
806 bs->io_limits_enabled = false;
807 ret = blk_read(blk, sector_num, buf, nb_sectors);
808 bs->io_limits_enabled = enabled;
809 return ret;
810}
811
812int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
813 int nb_sectors)
814{
815 return blk_rw(blk, sector_num, (uint8_t*) buf, nb_sectors,
816 blk_write_entry, 0);
817}
818
819int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
820 int nb_sectors, BdrvRequestFlags flags)
821{
822 return blk_rw(blk, sector_num, NULL, nb_sectors, blk_write_entry,
823 flags | BDRV_REQ_ZERO_WRITE);
824}
825
826static void error_callback_bh(void *opaque)
827{
828 struct BlockBackendAIOCB *acb = opaque;
829 qemu_bh_delete(acb->bh);
830 acb->common.cb(acb->common.opaque, acb->ret);
831 qemu_aio_unref(acb);
832}
833
834BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
835 BlockCompletionFunc *cb,
836 void *opaque, int ret)
837{
838 struct BlockBackendAIOCB *acb;
839 QEMUBH *bh;
840
841 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
842 acb->blk = blk;
843 acb->ret = ret;
844
845 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
846 acb->bh = bh;
847 qemu_bh_schedule(bh);
848
849 return &acb->common;
850}
851
852typedef struct BlkAioEmAIOCB {
853 BlockAIOCB common;
854 BlkRwCo rwco;
855 int bytes;
856 bool has_returned;
857 QEMUBH* bh;
858} BlkAioEmAIOCB;
859
860static const AIOCBInfo blk_aio_em_aiocb_info = {
861 .aiocb_size = sizeof(BlkAioEmAIOCB),
862};
863
864static void blk_aio_complete(BlkAioEmAIOCB *acb)
865{
866 if (acb->bh) {
867 assert(acb->has_returned);
868 qemu_bh_delete(acb->bh);
869 }
870 if (acb->has_returned) {
871 acb->common.cb(acb->common.opaque, acb->rwco.ret);
872 qemu_aio_unref(acb);
873 }
874}
875
876static void blk_aio_complete_bh(void *opaque)
877{
878 blk_aio_complete(opaque);
879}
880
881static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
882 QEMUIOVector *qiov, CoroutineEntry co_entry,
883 BdrvRequestFlags flags,
884 BlockCompletionFunc *cb, void *opaque)
885{
886 BlkAioEmAIOCB *acb;
887 Coroutine *co;
888
889 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
890 acb->rwco = (BlkRwCo) {
891 .blk = blk,
892 .offset = offset,
893 .qiov = qiov,
894 .flags = flags,
895 .ret = NOT_DONE,
896 };
897 acb->bytes = bytes;
898 acb->bh = NULL;
899 acb->has_returned = false;
900
901 co = qemu_coroutine_create(co_entry);
902 qemu_coroutine_enter(co, acb);
903
904 acb->has_returned = true;
905 if (acb->rwco.ret != NOT_DONE) {
906 acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
907 qemu_bh_schedule(acb->bh);
908 }
909
910 return &acb->common;
911}
912
913static void blk_aio_read_entry(void *opaque)
914{
915 BlkAioEmAIOCB *acb = opaque;
916 BlkRwCo *rwco = &acb->rwco;
917
918 assert(rwco->qiov->size == acb->bytes);
919 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
920 rwco->qiov, rwco->flags);
921 blk_aio_complete(acb);
922}
923
924static void blk_aio_write_entry(void *opaque)
925{
926 BlkAioEmAIOCB *acb = opaque;
927 BlkRwCo *rwco = &acb->rwco;
928
929 assert(!rwco->qiov || rwco->qiov->size == acb->bytes);
930 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
931 rwco->qiov, rwco->flags);
932 blk_aio_complete(acb);
933}
934
935BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
936 int nb_sectors, BdrvRequestFlags flags,
937 BlockCompletionFunc *cb, void *opaque)
938{
939 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
940 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
941 }
942
943 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS,
944 nb_sectors << BDRV_SECTOR_BITS, NULL,
945 blk_aio_write_entry, flags | BDRV_REQ_ZERO_WRITE,
946 cb, opaque);
947}
948
949int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
950{
951 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
952 if (ret < 0) {
953 return ret;
954 }
955 return count;
956}
957
958int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
959{
960 int ret = blk_prw(blk, offset, (void*) buf, count, blk_write_entry, 0);
961 if (ret < 0) {
962 return ret;
963 }
964 return count;
965}
966
967int64_t blk_getlength(BlockBackend *blk)
968{
969 if (!blk_is_available(blk)) {
970 return -ENOMEDIUM;
971 }
972
973 return bdrv_getlength(blk_bs(blk));
974}
975
976void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
977{
978 if (!blk_bs(blk)) {
979 *nb_sectors_ptr = 0;
980 } else {
981 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
982 }
983}
984
985int64_t blk_nb_sectors(BlockBackend *blk)
986{
987 if (!blk_is_available(blk)) {
988 return -ENOMEDIUM;
989 }
990
991 return bdrv_nb_sectors(blk_bs(blk));
992}
993
994BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
995 QEMUIOVector *iov, int nb_sectors,
996 BlockCompletionFunc *cb, void *opaque)
997{
998 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
999 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
1000 }
1001
1002 assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
1003 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
1004 blk_aio_read_entry, 0, cb, opaque);
1005}
1006
1007BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1008 QEMUIOVector *qiov, BdrvRequestFlags flags,
1009 BlockCompletionFunc *cb, void *opaque)
1010{
1011 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1012 blk_aio_read_entry, flags, cb, opaque);
1013}
1014
1015BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
1016 QEMUIOVector *iov, int nb_sectors,
1017 BlockCompletionFunc *cb, void *opaque)
1018{
1019 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1020 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
1021 }
1022
1023 assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
1024 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
1025 blk_aio_write_entry, 0, cb, opaque);
1026}
1027
1028BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1029 QEMUIOVector *qiov, BdrvRequestFlags flags,
1030 BlockCompletionFunc *cb, void *opaque)
1031{
1032 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1033 blk_aio_write_entry, flags, cb, opaque);
1034}
1035
1036BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1037 BlockCompletionFunc *cb, void *opaque)
1038{
1039 if (!blk_is_available(blk)) {
1040 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1041 }
1042
1043 return bdrv_aio_flush(blk_bs(blk), cb, opaque);
1044}
1045
1046BlockAIOCB *blk_aio_discard(BlockBackend *blk,
1047 int64_t sector_num, int nb_sectors,
1048 BlockCompletionFunc *cb, void *opaque)
1049{
1050 int ret = blk_check_request(blk, sector_num, nb_sectors);
1051 if (ret < 0) {
1052 return blk_abort_aio_request(blk, cb, opaque, ret);
1053 }
1054
1055 return bdrv_aio_discard(blk_bs(blk), sector_num, nb_sectors, cb, opaque);
1056}
1057
1058void blk_aio_cancel(BlockAIOCB *acb)
1059{
1060 bdrv_aio_cancel(acb);
1061}
1062
1063void blk_aio_cancel_async(BlockAIOCB *acb)
1064{
1065 bdrv_aio_cancel_async(acb);
1066}
1067
1068int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
1069{
1070 int i, ret;
1071
1072 for (i = 0; i < num_reqs; i++) {
1073 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
1074 if (ret < 0) {
1075 return ret;
1076 }
1077 }
1078
1079 return bdrv_aio_multiwrite(blk_bs(blk), reqs, num_reqs);
1080}
1081
1082int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1083{
1084 if (!blk_is_available(blk)) {
1085 return -ENOMEDIUM;
1086 }
1087
1088 return bdrv_ioctl(blk_bs(blk), req, buf);
1089}
1090
1091BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1092 BlockCompletionFunc *cb, void *opaque)
1093{
1094 if (!blk_is_available(blk)) {
1095 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1096 }
1097
1098 return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
1099}
1100
1101int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1102{
1103 int ret = blk_check_request(blk, sector_num, nb_sectors);
1104 if (ret < 0) {
1105 return ret;
1106 }
1107
1108 return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
1109}
1110
1111int blk_co_flush(BlockBackend *blk)
1112{
1113 if (!blk_is_available(blk)) {
1114 return -ENOMEDIUM;
1115 }
1116
1117 return bdrv_co_flush(blk_bs(blk));
1118}
1119
1120int blk_flush(BlockBackend *blk)
1121{
1122 if (!blk_is_available(blk)) {
1123 return -ENOMEDIUM;
1124 }
1125
1126 return bdrv_flush(blk_bs(blk));
1127}
1128
1129void blk_drain(BlockBackend *blk)
1130{
1131 if (blk_bs(blk)) {
1132 bdrv_drain(blk_bs(blk));
1133 }
1134}
1135
1136void blk_drain_all(void)
1137{
1138 bdrv_drain_all();
1139}
1140
1141void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1142 BlockdevOnError on_write_error)
1143{
1144 blk->on_read_error = on_read_error;
1145 blk->on_write_error = on_write_error;
1146}
1147
1148BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1149{
1150 return is_read ? blk->on_read_error : blk->on_write_error;
1151}
1152
1153BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1154 int error)
1155{
1156 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1157
1158 switch (on_err) {
1159 case BLOCKDEV_ON_ERROR_ENOSPC:
1160 return (error == ENOSPC) ?
1161 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1162 case BLOCKDEV_ON_ERROR_STOP:
1163 return BLOCK_ERROR_ACTION_STOP;
1164 case BLOCKDEV_ON_ERROR_REPORT:
1165 return BLOCK_ERROR_ACTION_REPORT;
1166 case BLOCKDEV_ON_ERROR_IGNORE:
1167 return BLOCK_ERROR_ACTION_IGNORE;
1168 default:
1169 abort();
1170 }
1171}
1172
1173static void send_qmp_error_event(BlockBackend *blk,
1174 BlockErrorAction action,
1175 bool is_read, int error)
1176{
1177 IoOperationType optype;
1178
1179 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1180 qapi_event_send_block_io_error(blk_name(blk), optype, action,
1181 blk_iostatus_is_enabled(blk),
1182 error == ENOSPC, strerror(error),
1183 &error_abort);
1184}
1185
1186
1187
1188
1189
1190void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1191 bool is_read, int error)
1192{
1193 assert(error >= 0);
1194
1195 if (action == BLOCK_ERROR_ACTION_STOP) {
1196
1197
1198
1199
1200 blk_iostatus_set_err(blk, error);
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 qemu_system_vmstop_request_prepare();
1211 send_qmp_error_event(blk, action, is_read, error);
1212 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1213 } else {
1214 send_qmp_error_event(blk, action, is_read, error);
1215 }
1216}
1217
1218int blk_is_read_only(BlockBackend *blk)
1219{
1220 BlockDriverState *bs = blk_bs(blk);
1221
1222 if (bs) {
1223 return bdrv_is_read_only(bs);
1224 } else {
1225 return blk->root_state.read_only;
1226 }
1227}
1228
1229int blk_is_sg(BlockBackend *blk)
1230{
1231 BlockDriverState *bs = blk_bs(blk);
1232
1233 if (!bs) {
1234 return 0;
1235 }
1236
1237 return bdrv_is_sg(bs);
1238}
1239
1240int blk_enable_write_cache(BlockBackend *blk)
1241{
1242 return blk->enable_write_cache;
1243}
1244
1245void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1246{
1247 blk->enable_write_cache = wce;
1248}
1249
1250void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1251{
1252 BlockDriverState *bs = blk_bs(blk);
1253
1254 if (!bs) {
1255 error_setg(errp, "Device '%s' has no medium", blk->name);
1256 return;
1257 }
1258
1259 bdrv_invalidate_cache(bs, errp);
1260}
1261
1262bool blk_is_inserted(BlockBackend *blk)
1263{
1264 BlockDriverState *bs = blk_bs(blk);
1265
1266 return bs && bdrv_is_inserted(bs);
1267}
1268
1269bool blk_is_available(BlockBackend *blk)
1270{
1271 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1272}
1273
1274void blk_lock_medium(BlockBackend *blk, bool locked)
1275{
1276 BlockDriverState *bs = blk_bs(blk);
1277
1278 if (bs) {
1279 bdrv_lock_medium(bs, locked);
1280 }
1281}
1282
1283void blk_eject(BlockBackend *blk, bool eject_flag)
1284{
1285 BlockDriverState *bs = blk_bs(blk);
1286
1287 if (bs) {
1288 bdrv_eject(bs, eject_flag);
1289 }
1290}
1291
1292int blk_get_flags(BlockBackend *blk)
1293{
1294 BlockDriverState *bs = blk_bs(blk);
1295
1296 if (bs) {
1297 return bdrv_get_flags(bs);
1298 } else {
1299 return blk->root_state.open_flags;
1300 }
1301}
1302
1303int blk_get_max_transfer_length(BlockBackend *blk)
1304{
1305 BlockDriverState *bs = blk_bs(blk);
1306
1307 if (bs) {
1308 return bs->bl.max_transfer_length;
1309 } else {
1310 return 0;
1311 }
1312}
1313
1314int blk_get_max_iov(BlockBackend *blk)
1315{
1316 return blk->root->bs->bl.max_iov;
1317}
1318
1319void blk_set_guest_block_size(BlockBackend *blk, int align)
1320{
1321 blk->guest_block_size = align;
1322}
1323
1324void *blk_try_blockalign(BlockBackend *blk, size_t size)
1325{
1326 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1327}
1328
1329void *blk_blockalign(BlockBackend *blk, size_t size)
1330{
1331 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1332}
1333
1334bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1335{
1336 BlockDriverState *bs = blk_bs(blk);
1337
1338 if (!bs) {
1339 return false;
1340 }
1341
1342 return bdrv_op_is_blocked(bs, op, errp);
1343}
1344
1345void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1346{
1347 BlockDriverState *bs = blk_bs(blk);
1348
1349 if (bs) {
1350 bdrv_op_unblock(bs, op, reason);
1351 }
1352}
1353
1354void blk_op_block_all(BlockBackend *blk, Error *reason)
1355{
1356 BlockDriverState *bs = blk_bs(blk);
1357
1358 if (bs) {
1359 bdrv_op_block_all(bs, reason);
1360 }
1361}
1362
1363void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1364{
1365 BlockDriverState *bs = blk_bs(blk);
1366
1367 if (bs) {
1368 bdrv_op_unblock_all(bs, reason);
1369 }
1370}
1371
1372AioContext *blk_get_aio_context(BlockBackend *blk)
1373{
1374 BlockDriverState *bs = blk_bs(blk);
1375
1376 if (bs) {
1377 return bdrv_get_aio_context(bs);
1378 } else {
1379 return qemu_get_aio_context();
1380 }
1381}
1382
1383static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1384{
1385 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1386 return blk_get_aio_context(blk_acb->blk);
1387}
1388
1389void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1390{
1391 BlockDriverState *bs = blk_bs(blk);
1392
1393 if (bs) {
1394 bdrv_set_aio_context(bs, new_context);
1395 }
1396}
1397
1398void blk_add_aio_context_notifier(BlockBackend *blk,
1399 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1400 void (*detach_aio_context)(void *opaque), void *opaque)
1401{
1402 BlockDriverState *bs = blk_bs(blk);
1403
1404 if (bs) {
1405 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1406 detach_aio_context, opaque);
1407 }
1408}
1409
1410void blk_remove_aio_context_notifier(BlockBackend *blk,
1411 void (*attached_aio_context)(AioContext *,
1412 void *),
1413 void (*detach_aio_context)(void *),
1414 void *opaque)
1415{
1416 BlockDriverState *bs = blk_bs(blk);
1417
1418 if (bs) {
1419 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1420 detach_aio_context, opaque);
1421 }
1422}
1423
1424void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1425{
1426 notifier_list_add(&blk->remove_bs_notifiers, notify);
1427}
1428
1429void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1430{
1431 notifier_list_add(&blk->insert_bs_notifiers, notify);
1432}
1433
1434void blk_io_plug(BlockBackend *blk)
1435{
1436 BlockDriverState *bs = blk_bs(blk);
1437
1438 if (bs) {
1439 bdrv_io_plug(bs);
1440 }
1441}
1442
1443void blk_io_unplug(BlockBackend *blk)
1444{
1445 BlockDriverState *bs = blk_bs(blk);
1446
1447 if (bs) {
1448 bdrv_io_unplug(bs);
1449 }
1450}
1451
1452BlockAcctStats *blk_get_stats(BlockBackend *blk)
1453{
1454 return &blk->stats;
1455}
1456
1457void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1458 BlockCompletionFunc *cb, void *opaque)
1459{
1460 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1461}
1462
1463int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1464 int nb_sectors, BdrvRequestFlags flags)
1465{
1466 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1467 return -EINVAL;
1468 }
1469
1470 return blk_co_pwritev(blk, sector_num << BDRV_SECTOR_BITS,
1471 nb_sectors << BDRV_SECTOR_BITS, NULL,
1472 flags | BDRV_REQ_ZERO_WRITE);
1473}
1474
1475int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1476 const uint8_t *buf, int nb_sectors)
1477{
1478 int ret = blk_check_request(blk, sector_num, nb_sectors);
1479 if (ret < 0) {
1480 return ret;
1481 }
1482
1483 return bdrv_write_compressed(blk_bs(blk), sector_num, buf, nb_sectors);
1484}
1485
1486int blk_truncate(BlockBackend *blk, int64_t offset)
1487{
1488 if (!blk_is_available(blk)) {
1489 return -ENOMEDIUM;
1490 }
1491
1492 return bdrv_truncate(blk_bs(blk), offset);
1493}
1494
1495int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1496{
1497 int ret = blk_check_request(blk, sector_num, nb_sectors);
1498 if (ret < 0) {
1499 return ret;
1500 }
1501
1502 return bdrv_discard(blk_bs(blk), sector_num, nb_sectors);
1503}
1504
1505int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1506 int64_t pos, int size)
1507{
1508 int ret;
1509
1510 if (!blk_is_available(blk)) {
1511 return -ENOMEDIUM;
1512 }
1513
1514 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
1515 if (ret < 0) {
1516 return ret;
1517 }
1518
1519 if (ret == size && !blk->enable_write_cache) {
1520 ret = bdrv_flush(blk_bs(blk));
1521 }
1522
1523 return ret < 0 ? ret : size;
1524}
1525
1526int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1527{
1528 if (!blk_is_available(blk)) {
1529 return -ENOMEDIUM;
1530 }
1531
1532 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
1533}
1534
1535int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1536{
1537 if (!blk_is_available(blk)) {
1538 return -ENOMEDIUM;
1539 }
1540
1541 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
1542}
1543
1544int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1545{
1546 if (!blk_is_available(blk)) {
1547 return -ENOMEDIUM;
1548 }
1549
1550 return bdrv_probe_geometry(blk_bs(blk), geo);
1551}
1552
1553
1554
1555
1556
1557void blk_update_root_state(BlockBackend *blk)
1558{
1559 assert(blk->root);
1560
1561 blk->root_state.open_flags = blk->root->bs->open_flags;
1562 blk->root_state.read_only = blk->root->bs->read_only;
1563 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
1564
1565 if (blk->root_state.throttle_group) {
1566 g_free(blk->root_state.throttle_group);
1567 throttle_group_unref(blk->root_state.throttle_state);
1568 }
1569 if (blk->root->bs->throttle_state) {
1570 const char *name = throttle_group_get_name(blk->root->bs);
1571 blk->root_state.throttle_group = g_strdup(name);
1572 blk->root_state.throttle_state = throttle_group_incref(name);
1573 } else {
1574 blk->root_state.throttle_group = NULL;
1575 blk->root_state.throttle_state = NULL;
1576 }
1577}
1578
1579
1580
1581
1582
1583
1584void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1585{
1586 bs->detect_zeroes = blk->root_state.detect_zeroes;
1587 if (blk->root_state.throttle_group) {
1588 bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1589 }
1590}
1591
1592
1593
1594
1595
1596int blk_get_open_flags_from_root_state(BlockBackend *blk)
1597{
1598 int bs_flags;
1599
1600 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1601 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1602
1603 return bs_flags;
1604}
1605
1606BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1607{
1608 return &blk->root_state;
1609}
1610
1611int blk_commit_all(void)
1612{
1613 BlockBackend *blk = NULL;
1614
1615 while ((blk = blk_all_next(blk)) != NULL) {
1616 AioContext *aio_context = blk_get_aio_context(blk);
1617
1618 aio_context_acquire(aio_context);
1619 if (blk_is_inserted(blk) && blk->root->bs->backing) {
1620 int ret = bdrv_commit(blk->root->bs);
1621 if (ret < 0) {
1622 aio_context_release(aio_context);
1623 return ret;
1624 }
1625 }
1626 aio_context_release(aio_context);
1627 }
1628 return 0;
1629}
1630
1631int blk_flush_all(void)
1632{
1633 BlockBackend *blk = NULL;
1634 int result = 0;
1635
1636 while ((blk = blk_all_next(blk)) != NULL) {
1637 AioContext *aio_context = blk_get_aio_context(blk);
1638 int ret;
1639
1640 aio_context_acquire(aio_context);
1641 if (blk_is_inserted(blk)) {
1642 ret = blk_flush(blk);
1643 if (ret < 0 && !result) {
1644 result = ret;
1645 }
1646 }
1647 aio_context_release(aio_context);
1648 }
1649
1650 return result;
1651}
1652