1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include <glusterfs/api/glfs.h>
12#include "block/block_int.h"
13#include "qapi/error.h"
14#include "qemu/uri.h"
15
16typedef struct GlusterAIOCB {
17 int64_t size;
18 int ret;
19 QEMUBH *bh;
20 Coroutine *coroutine;
21 AioContext *aio_context;
22} GlusterAIOCB;
23
24typedef struct BDRVGlusterState {
25 struct glfs *glfs;
26 struct glfs_fd *fd;
27} BDRVGlusterState;
28
29typedef struct GlusterConf {
30 char *server;
31 int port;
32 char *volname;
33 char *image;
34 char *transport;
35} GlusterConf;
36
37static void qemu_gluster_gconf_free(GlusterConf *gconf)
38{
39 if (gconf) {
40 g_free(gconf->server);
41 g_free(gconf->volname);
42 g_free(gconf->image);
43 g_free(gconf->transport);
44 g_free(gconf);
45 }
46}
47
48static int parse_volume_options(GlusterConf *gconf, char *path)
49{
50 char *p, *q;
51
52 if (!path) {
53 return -EINVAL;
54 }
55
56
57 p = q = path + strspn(path, "/");
58 p += strcspn(p, "/");
59 if (*p == '\0') {
60 return -EINVAL;
61 }
62 gconf->volname = g_strndup(q, p - q);
63
64
65 p += strspn(p, "/");
66 if (*p == '\0') {
67 return -EINVAL;
68 }
69 gconf->image = g_strdup(p);
70 return 0;
71}
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
111{
112 URI *uri;
113 QueryParams *qp = NULL;
114 bool is_unix = false;
115 int ret = 0;
116
117 uri = uri_parse(filename);
118 if (!uri) {
119 return -EINVAL;
120 }
121
122
123 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
124 gconf->transport = g_strdup("tcp");
125 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
126 gconf->transport = g_strdup("tcp");
127 } else if (!strcmp(uri->scheme, "gluster+unix")) {
128 gconf->transport = g_strdup("unix");
129 is_unix = true;
130 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
131 gconf->transport = g_strdup("rdma");
132 } else {
133 ret = -EINVAL;
134 goto out;
135 }
136
137 ret = parse_volume_options(gconf, uri->path);
138 if (ret < 0) {
139 goto out;
140 }
141
142 qp = query_params_parse(uri->query);
143 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
144 ret = -EINVAL;
145 goto out;
146 }
147
148 if (is_unix) {
149 if (uri->server || uri->port) {
150 ret = -EINVAL;
151 goto out;
152 }
153 if (strcmp(qp->p[0].name, "socket")) {
154 ret = -EINVAL;
155 goto out;
156 }
157 gconf->server = g_strdup(qp->p[0].value);
158 } else {
159 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
160 gconf->port = uri->port;
161 }
162
163out:
164 if (qp) {
165 query_params_free(qp);
166 }
167 uri_free(uri);
168 return ret;
169}
170
171static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
172 Error **errp)
173{
174 struct glfs *glfs = NULL;
175 int ret;
176 int old_errno;
177
178 ret = qemu_gluster_parseuri(gconf, filename);
179 if (ret < 0) {
180 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
181 "volname/image[?socket=...]");
182 errno = -ret;
183 goto out;
184 }
185
186 glfs = glfs_new(gconf->volname);
187 if (!glfs) {
188 goto out;
189 }
190
191 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
192 gconf->port);
193 if (ret < 0) {
194 goto out;
195 }
196
197
198
199
200
201 ret = glfs_set_logging(glfs, "-", 4);
202 if (ret < 0) {
203 goto out;
204 }
205
206 ret = glfs_init(glfs);
207 if (ret) {
208 error_setg_errno(errp, errno,
209 "Gluster connection failed for server=%s port=%d "
210 "volume=%s image=%s transport=%s", gconf->server,
211 gconf->port, gconf->volname, gconf->image,
212 gconf->transport);
213
214
215 if (errno == 0)
216 errno = EINVAL;
217
218 goto out;
219 }
220 return glfs;
221
222out:
223 if (glfs) {
224 old_errno = errno;
225 glfs_fini(glfs);
226 errno = old_errno;
227 }
228 return NULL;
229}
230
231static void qemu_gluster_complete_aio(void *opaque)
232{
233 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
234
235 qemu_bh_delete(acb->bh);
236 acb->bh = NULL;
237 qemu_coroutine_enter(acb->coroutine, NULL);
238}
239
240
241
242
243static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
244{
245 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
246
247 if (!ret || ret == acb->size) {
248 acb->ret = 0;
249 } else if (ret < 0) {
250 acb->ret = -errno;
251 } else {
252 acb->ret = -EIO;
253 }
254
255 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
256 qemu_bh_schedule(acb->bh);
257}
258
259
260static QemuOptsList runtime_opts = {
261 .name = "gluster",
262 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
263 .desc = {
264 {
265 .name = "filename",
266 .type = QEMU_OPT_STRING,
267 .help = "URL to the gluster image",
268 },
269 { }
270 },
271};
272
273static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
274{
275 assert(open_flags != NULL);
276
277 *open_flags |= O_BINARY;
278
279 if (bdrv_flags & BDRV_O_RDWR) {
280 *open_flags |= O_RDWR;
281 } else {
282 *open_flags |= O_RDONLY;
283 }
284
285 if ((bdrv_flags & BDRV_O_NOCACHE)) {
286 *open_flags |= O_DIRECT;
287 }
288}
289
290static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
291 int bdrv_flags, Error **errp)
292{
293 BDRVGlusterState *s = bs->opaque;
294 int open_flags = 0;
295 int ret = 0;
296 GlusterConf *gconf = g_new0(GlusterConf, 1);
297 QemuOpts *opts;
298 Error *local_err = NULL;
299 const char *filename;
300
301 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
302 qemu_opts_absorb_qdict(opts, options, &local_err);
303 if (local_err) {
304 error_propagate(errp, local_err);
305 ret = -EINVAL;
306 goto out;
307 }
308
309 filename = qemu_opt_get(opts, "filename");
310
311 s->glfs = qemu_gluster_init(gconf, filename, errp);
312 if (!s->glfs) {
313 ret = -errno;
314 goto out;
315 }
316
317#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
318
319
320
321
322
323
324 ret = glfs_set_xlator_option(s->glfs, "*-write-behind",
325 "resync-failed-syncs-after-fsync",
326 "on");
327 if (ret < 0) {
328 error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
329 ret = -errno;
330 goto out;
331 }
332#endif
333
334 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
335
336 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
337 if (!s->fd) {
338 ret = -errno;
339 }
340
341out:
342 qemu_opts_del(opts);
343 qemu_gluster_gconf_free(gconf);
344 if (!ret) {
345 return ret;
346 }
347 if (s->fd) {
348 glfs_close(s->fd);
349 }
350 if (s->glfs) {
351 glfs_fini(s->glfs);
352 }
353 return ret;
354}
355
356typedef struct BDRVGlusterReopenState {
357 struct glfs *glfs;
358 struct glfs_fd *fd;
359} BDRVGlusterReopenState;
360
361
362static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
363 BlockReopenQueue *queue, Error **errp)
364{
365 int ret = 0;
366 BDRVGlusterReopenState *reop_s;
367 GlusterConf *gconf = NULL;
368 int open_flags = 0;
369
370 assert(state != NULL);
371 assert(state->bs != NULL);
372
373 state->opaque = g_new0(BDRVGlusterReopenState, 1);
374 reop_s = state->opaque;
375
376 qemu_gluster_parse_flags(state->flags, &open_flags);
377
378 gconf = g_new0(GlusterConf, 1);
379
380 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
381 if (reop_s->glfs == NULL) {
382 ret = -errno;
383 goto exit;
384 }
385
386#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
387 ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind",
388 "resync-failed-syncs-after-fsync", "on");
389 if (ret < 0) {
390 error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
391 ret = -errno;
392 goto exit;
393 }
394#endif
395
396 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
397 if (reop_s->fd == NULL) {
398
399 ret = -errno;
400 goto exit;
401 }
402
403exit:
404
405 qemu_gluster_gconf_free(gconf);
406 return ret;
407}
408
409static void qemu_gluster_reopen_commit(BDRVReopenState *state)
410{
411 BDRVGlusterReopenState *reop_s = state->opaque;
412 BDRVGlusterState *s = state->bs->opaque;
413
414
415
416 if (s->fd) {
417 glfs_close(s->fd);
418 }
419 if (s->glfs) {
420 glfs_fini(s->glfs);
421 }
422
423
424 s->fd = reop_s->fd;
425 s->glfs = reop_s->glfs;
426
427 g_free(state->opaque);
428 state->opaque = NULL;
429
430 return;
431}
432
433
434static void qemu_gluster_reopen_abort(BDRVReopenState *state)
435{
436 BDRVGlusterReopenState *reop_s = state->opaque;
437
438 if (reop_s == NULL) {
439 return;
440 }
441
442 if (reop_s->fd) {
443 glfs_close(reop_s->fd);
444 }
445
446 if (reop_s->glfs) {
447 glfs_fini(reop_s->glfs);
448 }
449
450 g_free(state->opaque);
451 state->opaque = NULL;
452
453 return;
454}
455
456#ifdef CONFIG_GLUSTERFS_ZEROFILL
457static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
458 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
459{
460 int ret;
461 GlusterAIOCB acb;
462 BDRVGlusterState *s = bs->opaque;
463 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
464 off_t offset = sector_num * BDRV_SECTOR_SIZE;
465
466 acb.size = size;
467 acb.ret = 0;
468 acb.coroutine = qemu_coroutine_self();
469 acb.aio_context = bdrv_get_aio_context(bs);
470
471 ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
472 if (ret < 0) {
473 return -errno;
474 }
475
476 qemu_coroutine_yield();
477 return acb.ret;
478}
479
480static inline bool gluster_supports_zerofill(void)
481{
482 return 1;
483}
484
485static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
486 int64_t size)
487{
488 return glfs_zerofill(fd, offset, size);
489}
490
491#else
492static inline bool gluster_supports_zerofill(void)
493{
494 return 0;
495}
496
497static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
498 int64_t size)
499{
500 return 0;
501}
502#endif
503
504static int qemu_gluster_create(const char *filename,
505 QemuOpts *opts, Error **errp)
506{
507 struct glfs *glfs;
508 struct glfs_fd *fd;
509 int ret = 0;
510 int prealloc = 0;
511 int64_t total_size = 0;
512 char *tmp = NULL;
513 GlusterConf *gconf = g_new0(GlusterConf, 1);
514
515 glfs = qemu_gluster_init(gconf, filename, errp);
516 if (!glfs) {
517 ret = -errno;
518 goto out;
519 }
520
521 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
522 BDRV_SECTOR_SIZE);
523
524 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
525 if (!tmp || !strcmp(tmp, "off")) {
526 prealloc = 0;
527 } else if (!strcmp(tmp, "full") &&
528 gluster_supports_zerofill()) {
529 prealloc = 1;
530 } else {
531 error_setg(errp, "Invalid preallocation mode: '%s'"
532 " or GlusterFS doesn't support zerofill API",
533 tmp);
534 ret = -EINVAL;
535 goto out;
536 }
537
538 fd = glfs_creat(glfs, gconf->image,
539 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
540 if (!fd) {
541 ret = -errno;
542 } else {
543 if (!glfs_ftruncate(fd, total_size)) {
544 if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
545 ret = -errno;
546 }
547 } else {
548 ret = -errno;
549 }
550
551 if (glfs_close(fd) != 0) {
552 ret = -errno;
553 }
554 }
555out:
556 g_free(tmp);
557 qemu_gluster_gconf_free(gconf);
558 if (glfs) {
559 glfs_fini(glfs);
560 }
561 return ret;
562}
563
564static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
565 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
566{
567 int ret;
568 GlusterAIOCB acb;
569 BDRVGlusterState *s = bs->opaque;
570 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
571 off_t offset = sector_num * BDRV_SECTOR_SIZE;
572
573 acb.size = size;
574 acb.ret = 0;
575 acb.coroutine = qemu_coroutine_self();
576 acb.aio_context = bdrv_get_aio_context(bs);
577
578 if (write) {
579 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
580 gluster_finish_aiocb, &acb);
581 } else {
582 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
583 gluster_finish_aiocb, &acb);
584 }
585
586 if (ret < 0) {
587 return -errno;
588 }
589
590 qemu_coroutine_yield();
591 return acb.ret;
592}
593
594static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
595{
596 int ret;
597 BDRVGlusterState *s = bs->opaque;
598
599 ret = glfs_ftruncate(s->fd, offset);
600 if (ret < 0) {
601 return -errno;
602 }
603
604 return 0;
605}
606
607static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
608 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
609{
610 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
611}
612
613static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
614 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
615{
616 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
617}
618
619static void qemu_gluster_close(BlockDriverState *bs)
620{
621 BDRVGlusterState *s = bs->opaque;
622
623 if (s->fd) {
624 glfs_close(s->fd);
625 s->fd = NULL;
626 }
627 glfs_fini(s->glfs);
628}
629
630static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
631{
632 int ret;
633 GlusterAIOCB acb;
634 BDRVGlusterState *s = bs->opaque;
635
636 acb.size = 0;
637 acb.ret = 0;
638 acb.coroutine = qemu_coroutine_self();
639 acb.aio_context = bdrv_get_aio_context(bs);
640
641 ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
642 if (ret < 0) {
643 ret = -errno;
644 goto error;
645 }
646
647 qemu_coroutine_yield();
648 if (acb.ret < 0) {
649 ret = acb.ret;
650 goto error;
651 }
652
653 return acb.ret;
654
655error:
656
657
658
659
660
661
662
663
664
665
666
667
668
669 qemu_gluster_close(bs);
670 bs->drv = NULL;
671 return ret;
672}
673
674#ifdef CONFIG_GLUSTERFS_DISCARD
675static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
676 int64_t sector_num, int nb_sectors)
677{
678 int ret;
679 GlusterAIOCB acb;
680 BDRVGlusterState *s = bs->opaque;
681 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
682 off_t offset = sector_num * BDRV_SECTOR_SIZE;
683
684 acb.size = 0;
685 acb.ret = 0;
686 acb.coroutine = qemu_coroutine_self();
687 acb.aio_context = bdrv_get_aio_context(bs);
688
689 ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
690 if (ret < 0) {
691 return -errno;
692 }
693
694 qemu_coroutine_yield();
695 return acb.ret;
696}
697#endif
698
699static int64_t qemu_gluster_getlength(BlockDriverState *bs)
700{
701 BDRVGlusterState *s = bs->opaque;
702 int64_t ret;
703
704 ret = glfs_lseek(s->fd, 0, SEEK_END);
705 if (ret < 0) {
706 return -errno;
707 } else {
708 return ret;
709 }
710}
711
712static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
713{
714 BDRVGlusterState *s = bs->opaque;
715 struct stat st;
716 int ret;
717
718 ret = glfs_fstat(s->fd, &st);
719 if (ret < 0) {
720 return -errno;
721 } else {
722 return st.st_blocks * 512;
723 }
724}
725
726static int qemu_gluster_has_zero_init(BlockDriverState *bs)
727{
728
729 return 0;
730}
731
732static QemuOptsList qemu_gluster_create_opts = {
733 .name = "qemu-gluster-create-opts",
734 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
735 .desc = {
736 {
737 .name = BLOCK_OPT_SIZE,
738 .type = QEMU_OPT_SIZE,
739 .help = "Virtual disk size"
740 },
741 {
742 .name = BLOCK_OPT_PREALLOC,
743 .type = QEMU_OPT_STRING,
744 .help = "Preallocation mode (allowed values: off, full)"
745 },
746 { }
747 }
748};
749
750static BlockDriver bdrv_gluster = {
751 .format_name = "gluster",
752 .protocol_name = "gluster",
753 .instance_size = sizeof(BDRVGlusterState),
754 .bdrv_needs_filename = true,
755 .bdrv_file_open = qemu_gluster_open,
756 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
757 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
758 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
759 .bdrv_close = qemu_gluster_close,
760 .bdrv_create = qemu_gluster_create,
761 .bdrv_getlength = qemu_gluster_getlength,
762 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
763 .bdrv_truncate = qemu_gluster_truncate,
764 .bdrv_co_readv = qemu_gluster_co_readv,
765 .bdrv_co_writev = qemu_gluster_co_writev,
766 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
767 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
768#ifdef CONFIG_GLUSTERFS_DISCARD
769 .bdrv_co_discard = qemu_gluster_co_discard,
770#endif
771#ifdef CONFIG_GLUSTERFS_ZEROFILL
772 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
773#endif
774 .create_opts = &qemu_gluster_create_opts,
775};
776
777static BlockDriver bdrv_gluster_tcp = {
778 .format_name = "gluster",
779 .protocol_name = "gluster+tcp",
780 .instance_size = sizeof(BDRVGlusterState),
781 .bdrv_needs_filename = true,
782 .bdrv_file_open = qemu_gluster_open,
783 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
784 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
785 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
786 .bdrv_close = qemu_gluster_close,
787 .bdrv_create = qemu_gluster_create,
788 .bdrv_getlength = qemu_gluster_getlength,
789 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
790 .bdrv_truncate = qemu_gluster_truncate,
791 .bdrv_co_readv = qemu_gluster_co_readv,
792 .bdrv_co_writev = qemu_gluster_co_writev,
793 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
794 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
795#ifdef CONFIG_GLUSTERFS_DISCARD
796 .bdrv_co_discard = qemu_gluster_co_discard,
797#endif
798#ifdef CONFIG_GLUSTERFS_ZEROFILL
799 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
800#endif
801 .create_opts = &qemu_gluster_create_opts,
802};
803
804static BlockDriver bdrv_gluster_unix = {
805 .format_name = "gluster",
806 .protocol_name = "gluster+unix",
807 .instance_size = sizeof(BDRVGlusterState),
808 .bdrv_needs_filename = true,
809 .bdrv_file_open = qemu_gluster_open,
810 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
811 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
812 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
813 .bdrv_close = qemu_gluster_close,
814 .bdrv_create = qemu_gluster_create,
815 .bdrv_getlength = qemu_gluster_getlength,
816 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
817 .bdrv_truncate = qemu_gluster_truncate,
818 .bdrv_co_readv = qemu_gluster_co_readv,
819 .bdrv_co_writev = qemu_gluster_co_writev,
820 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
821 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
822#ifdef CONFIG_GLUSTERFS_DISCARD
823 .bdrv_co_discard = qemu_gluster_co_discard,
824#endif
825#ifdef CONFIG_GLUSTERFS_ZEROFILL
826 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
827#endif
828 .create_opts = &qemu_gluster_create_opts,
829};
830
831static BlockDriver bdrv_gluster_rdma = {
832 .format_name = "gluster",
833 .protocol_name = "gluster+rdma",
834 .instance_size = sizeof(BDRVGlusterState),
835 .bdrv_needs_filename = true,
836 .bdrv_file_open = qemu_gluster_open,
837 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
838 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
839 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
840 .bdrv_close = qemu_gluster_close,
841 .bdrv_create = qemu_gluster_create,
842 .bdrv_getlength = qemu_gluster_getlength,
843 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
844 .bdrv_truncate = qemu_gluster_truncate,
845 .bdrv_co_readv = qemu_gluster_co_readv,
846 .bdrv_co_writev = qemu_gluster_co_writev,
847 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
848 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
849#ifdef CONFIG_GLUSTERFS_DISCARD
850 .bdrv_co_discard = qemu_gluster_co_discard,
851#endif
852#ifdef CONFIG_GLUSTERFS_ZEROFILL
853 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
854#endif
855 .create_opts = &qemu_gluster_create_opts,
856};
857
858static void bdrv_gluster_init(void)
859{
860 bdrv_register(&bdrv_gluster_rdma);
861 bdrv_register(&bdrv_gluster_unix);
862 bdrv_register(&bdrv_gluster_tcp);
863 bdrv_register(&bdrv_gluster);
864}
865
866block_init(bdrv_gluster_init);
867