1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/spinlock.h>
37#include <linux/vmalloc.h>
38#include <linux/export.h>
39#include <asm/xen/hypervisor.h>
40#include <xen/page.h>
41#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
43#include <xen/balloon.h>
44#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/xen.h>
48#include <xen/features.h>
49
50#include "xenbus.h"
51
52#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
56struct xenbus_map_node {
57 struct list_head next;
58 union {
59 struct {
60 struct vm_struct *area;
61 } pv;
62 struct {
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65 void *addr;
66 } hvm;
67 };
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
70};
71
72static DEFINE_SPINLOCK(xenbus_valloc_lock);
73static LIST_HEAD(xenbus_valloc_pages);
74
75struct xenbus_ring_ops {
76 int (*map)(struct xenbus_device *dev,
77 grant_ref_t *gnt_refs, unsigned int nr_grefs,
78 void **vaddr);
79 int (*unmap)(struct xenbus_device *dev, void *vaddr);
80};
81
82static const struct xenbus_ring_ops *ring_ops __read_mostly;
83
84const char *xenbus_strstate(enum xenbus_state state)
85{
86 static const char *const name[] = {
87 [ XenbusStateUnknown ] = "Unknown",
88 [ XenbusStateInitialising ] = "Initialising",
89 [ XenbusStateInitWait ] = "InitWait",
90 [ XenbusStateInitialised ] = "Initialised",
91 [ XenbusStateConnected ] = "Connected",
92 [ XenbusStateClosing ] = "Closing",
93 [ XenbusStateClosed ] = "Closed",
94 [XenbusStateReconfiguring] = "Reconfiguring",
95 [XenbusStateReconfigured] = "Reconfigured",
96 };
97 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
98}
99EXPORT_SYMBOL_GPL(xenbus_strstate);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115int xenbus_watch_path(struct xenbus_device *dev, const char *path,
116 struct xenbus_watch *watch,
117 void (*callback)(struct xenbus_watch *,
118 const char *, const char *))
119{
120 int err;
121
122 watch->node = path;
123 watch->callback = callback;
124
125 err = register_xenbus_watch(watch);
126
127 if (err) {
128 watch->node = NULL;
129 watch->callback = NULL;
130 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
131 }
132
133 return err;
134}
135EXPORT_SYMBOL_GPL(xenbus_watch_path);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153int xenbus_watch_pathfmt(struct xenbus_device *dev,
154 struct xenbus_watch *watch,
155 void (*callback)(struct xenbus_watch *,
156 const char *, const char *),
157 const char *pathfmt, ...)
158{
159 int err;
160 va_list ap;
161 char *path;
162
163 va_start(ap, pathfmt);
164 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
165 va_end(ap);
166
167 if (!path) {
168 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
169 return -ENOMEM;
170 }
171 err = xenbus_watch_path(dev, path, watch, callback);
172
173 if (err)
174 kfree(path);
175 return err;
176}
177EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
178
179static void xenbus_switch_fatal(struct xenbus_device *, int, int,
180 const char *, ...);
181
182static int
183__xenbus_switch_state(struct xenbus_device *dev,
184 enum xenbus_state state, int depth)
185{
186
187
188
189
190
191
192
193
194
195
196
197
198
199 struct xenbus_transaction xbt;
200 int current_state;
201 int err, abort;
202
203 if (state == dev->state)
204 return 0;
205
206again:
207 abort = 1;
208
209 err = xenbus_transaction_start(&xbt);
210 if (err) {
211 xenbus_switch_fatal(dev, depth, err, "starting transaction");
212 return 0;
213 }
214
215 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
216 if (err != 1)
217 goto abort;
218
219 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
220 if (err) {
221 xenbus_switch_fatal(dev, depth, err, "writing new state");
222 goto abort;
223 }
224
225 abort = 0;
226abort:
227 err = xenbus_transaction_end(xbt, abort);
228 if (err) {
229 if (err == -EAGAIN && !abort)
230 goto again;
231 xenbus_switch_fatal(dev, depth, err, "ending transaction");
232 } else
233 dev->state = state;
234
235 return 0;
236}
237
238
239
240
241
242
243
244
245
246
247int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
248{
249 return __xenbus_switch_state(dev, state, 0);
250}
251
252EXPORT_SYMBOL_GPL(xenbus_switch_state);
253
254int xenbus_frontend_closed(struct xenbus_device *dev)
255{
256 xenbus_switch_state(dev, XenbusStateClosed);
257 complete(&dev->down);
258 return 0;
259}
260EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
261
262static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
263 const char *fmt, va_list ap)
264{
265 unsigned int len;
266 char *printf_buffer;
267 char *path_buffer;
268
269#define PRINTF_BUFFER_SIZE 4096
270
271 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
272 if (!printf_buffer)
273 return;
274
275 len = sprintf(printf_buffer, "%i ", -err);
276 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
277
278 dev_err(&dev->dev, "%s\n", printf_buffer);
279
280 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
281 if (!path_buffer ||
282 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
283 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
284 dev->nodename, printf_buffer);
285
286 kfree(printf_buffer);
287 kfree(path_buffer);
288}
289
290
291
292
293
294
295
296
297
298
299void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
300{
301 va_list ap;
302
303 va_start(ap, fmt);
304 xenbus_va_dev_error(dev, err, fmt, ap);
305 va_end(ap);
306}
307EXPORT_SYMBOL_GPL(xenbus_dev_error);
308
309
310
311
312
313
314
315
316
317
318
319
320void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
321{
322 va_list ap;
323
324 va_start(ap, fmt);
325 xenbus_va_dev_error(dev, err, fmt, ap);
326 va_end(ap);
327
328 xenbus_switch_state(dev, XenbusStateClosing);
329}
330EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
331
332
333
334
335
336static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
337 const char *fmt, ...)
338{
339 va_list ap;
340
341 va_start(ap, fmt);
342 xenbus_va_dev_error(dev, err, fmt, ap);
343 va_end(ap);
344
345 if (!depth)
346 __xenbus_switch_state(dev, XenbusStateClosing, 1);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
362 unsigned int nr_pages, grant_ref_t *grefs)
363{
364 int err;
365 int i, j;
366
367 for (i = 0; i < nr_pages; i++) {
368 err = gnttab_grant_foreign_access(dev->otherend_id,
369 virt_to_gfn(vaddr), 0);
370 if (err < 0) {
371 xenbus_dev_fatal(dev, err,
372 "granting access to ring page");
373 goto fail;
374 }
375 grefs[i] = err;
376
377 vaddr = vaddr + XEN_PAGE_SIZE;
378 }
379
380 return 0;
381
382fail:
383 for (j = 0; j < i; j++)
384 gnttab_end_foreign_access_ref(grefs[j], 0);
385 return err;
386}
387EXPORT_SYMBOL_GPL(xenbus_grant_ring);
388
389
390
391
392
393
394
395
396int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
397{
398 struct evtchn_alloc_unbound alloc_unbound;
399 int err;
400
401 alloc_unbound.dom = DOMID_SELF;
402 alloc_unbound.remote_dom = dev->otherend_id;
403
404 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
405 &alloc_unbound);
406 if (err)
407 xenbus_dev_fatal(dev, err, "allocating event channel");
408 else
409 *port = alloc_unbound.port;
410
411 return err;
412}
413EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
414
415
416
417
418
419int xenbus_free_evtchn(struct xenbus_device *dev, int port)
420{
421 struct evtchn_close close;
422 int err;
423
424 close.port = port;
425
426 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
427 if (err)
428 xenbus_dev_error(dev, err, "freeing event channel %d", port);
429
430 return err;
431}
432EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
451 unsigned int nr_grefs, void **vaddr)
452{
453 return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
454}
455EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
456
457
458
459
460static int __xenbus_map_ring(struct xenbus_device *dev,
461 grant_ref_t *gnt_refs,
462 unsigned int nr_grefs,
463 grant_handle_t *handles,
464 phys_addr_t *addrs,
465 unsigned int flags,
466 bool *leaked)
467{
468 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
469 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
470 int i, j;
471 int err = GNTST_okay;
472
473 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
474 return -EINVAL;
475
476 for (i = 0; i < nr_grefs; i++) {
477 memset(&map[i], 0, sizeof(map[i]));
478 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
479 dev->otherend_id);
480 handles[i] = INVALID_GRANT_HANDLE;
481 }
482
483 gnttab_batch_map(map, i);
484
485 for (i = 0; i < nr_grefs; i++) {
486 if (map[i].status != GNTST_okay) {
487 err = map[i].status;
488 xenbus_dev_fatal(dev, map[i].status,
489 "mapping in shared page %d from domain %d",
490 gnt_refs[i], dev->otherend_id);
491 goto fail;
492 } else
493 handles[i] = map[i].handle;
494 }
495
496 return GNTST_okay;
497
498 fail:
499 for (i = j = 0; i < nr_grefs; i++) {
500 if (handles[i] != INVALID_GRANT_HANDLE) {
501 memset(&unmap[j], 0, sizeof(unmap[j]));
502 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
503 GNTMAP_host_map, handles[i]);
504 j++;
505 }
506 }
507
508 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
509 BUG();
510
511 *leaked = false;
512 for (i = 0; i < j; i++) {
513 if (unmap[i].status != GNTST_okay) {
514 *leaked = true;
515 break;
516 }
517 }
518
519 return err;
520}
521
522struct map_ring_valloc_hvm
523{
524 unsigned int idx;
525
526
527 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
528 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
529};
530
531static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
532 unsigned int goffset,
533 unsigned int len,
534 void *data)
535{
536 struct map_ring_valloc_hvm *info = data;
537 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
538
539 info->phys_addrs[info->idx] = vaddr;
540 info->addrs[info->idx] = vaddr;
541
542 info->idx++;
543}
544
545static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
546 grant_ref_t *gnt_ref,
547 unsigned int nr_grefs,
548 void **vaddr)
549{
550 struct xenbus_map_node *node;
551 int err;
552 void *addr;
553 bool leaked = false;
554 struct map_ring_valloc_hvm info = {
555 .idx = 0,
556 };
557 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
558
559 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
560 return -EINVAL;
561
562 *vaddr = NULL;
563
564 node = kzalloc(sizeof(*node), GFP_KERNEL);
565 if (!node)
566 return -ENOMEM;
567
568 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
569 if (err)
570 goto out_err;
571
572 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
573 xenbus_map_ring_setup_grant_hvm,
574 &info);
575
576 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
577 info.phys_addrs, GNTMAP_host_map, &leaked);
578 node->nr_handles = nr_grefs;
579
580 if (err)
581 goto out_free_ballooned_pages;
582
583 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
584 PAGE_KERNEL);
585 if (!addr) {
586 err = -ENOMEM;
587 goto out_xenbus_unmap_ring;
588 }
589
590 node->hvm.addr = addr;
591
592 spin_lock(&xenbus_valloc_lock);
593 list_add(&node->next, &xenbus_valloc_pages);
594 spin_unlock(&xenbus_valloc_lock);
595
596 *vaddr = addr;
597 return 0;
598
599 out_xenbus_unmap_ring:
600 if (!leaked)
601 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
602 else
603 pr_alert("leaking %p size %u page(s)",
604 addr, nr_pages);
605 out_free_ballooned_pages:
606 if (!leaked)
607 free_xenballooned_pages(nr_pages, node->hvm.pages);
608 out_err:
609 kfree(node);
610 return err;
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
634 unsigned int nr_grefs, grant_handle_t *handles,
635 unsigned long *vaddrs, bool *leaked)
636{
637 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
638 int i;
639
640 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
641 return -EINVAL;
642
643 for (i = 0; i < nr_grefs; i++)
644 phys_addrs[i] = (unsigned long)vaddrs[i];
645
646 return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
647 phys_addrs, GNTMAP_host_map, leaked);
648}
649EXPORT_SYMBOL_GPL(xenbus_map_ring);
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
665{
666 return ring_ops->unmap(dev, vaddr);
667}
668EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
669
670#ifdef CONFIG_XEN_PV
671static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
672 grant_ref_t *gnt_refs,
673 unsigned int nr_grefs,
674 void **vaddr)
675{
676 struct xenbus_map_node *node;
677 struct vm_struct *area;
678 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
679 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
680 int err = GNTST_okay;
681 int i;
682 bool leaked;
683
684 *vaddr = NULL;
685
686 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
687 return -EINVAL;
688
689 node = kzalloc(sizeof(*node), GFP_KERNEL);
690 if (!node)
691 return -ENOMEM;
692
693 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
694 if (!area) {
695 kfree(node);
696 return -ENOMEM;
697 }
698
699 for (i = 0; i < nr_grefs; i++)
700 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
701
702 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
703 phys_addrs,
704 GNTMAP_host_map | GNTMAP_contains_pte,
705 &leaked);
706 if (err)
707 goto failed;
708
709 node->nr_handles = nr_grefs;
710 node->pv.area = area;
711
712 spin_lock(&xenbus_valloc_lock);
713 list_add(&node->next, &xenbus_valloc_pages);
714 spin_unlock(&xenbus_valloc_lock);
715
716 *vaddr = area->addr;
717 return 0;
718
719failed:
720 if (!leaked)
721 free_vm_area(area);
722 else
723 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
724
725 kfree(node);
726 return err;
727}
728
729static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
730{
731 struct xenbus_map_node *node;
732 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
733 unsigned int level;
734 int i;
735 bool leaked = false;
736 int err;
737
738 spin_lock(&xenbus_valloc_lock);
739 list_for_each_entry(node, &xenbus_valloc_pages, next) {
740 if (node->pv.area->addr == vaddr) {
741 list_del(&node->next);
742 goto found;
743 }
744 }
745 node = NULL;
746 found:
747 spin_unlock(&xenbus_valloc_lock);
748
749 if (!node) {
750 xenbus_dev_error(dev, -ENOENT,
751 "can't find mapped virtual address %p", vaddr);
752 return GNTST_bad_virt_addr;
753 }
754
755 for (i = 0; i < node->nr_handles; i++) {
756 unsigned long addr;
757
758 memset(&unmap[i], 0, sizeof(unmap[i]));
759 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
760 unmap[i].host_addr = arbitrary_virt_to_machine(
761 lookup_address(addr, &level)).maddr;
762 unmap[i].dev_bus_addr = 0;
763 unmap[i].handle = node->handles[i];
764 }
765
766 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
767 BUG();
768
769 err = GNTST_okay;
770 leaked = false;
771 for (i = 0; i < node->nr_handles; i++) {
772 if (unmap[i].status != GNTST_okay) {
773 leaked = true;
774 xenbus_dev_error(dev, unmap[i].status,
775 "unmapping page at handle %d error %d",
776 node->handles[i], unmap[i].status);
777 err = unmap[i].status;
778 break;
779 }
780 }
781
782 if (!leaked)
783 free_vm_area(node->pv.area);
784 else
785 pr_alert("leaking VM area %p size %u page(s)",
786 node->pv.area, node->nr_handles);
787
788 kfree(node);
789 return err;
790}
791
792static const struct xenbus_ring_ops ring_ops_pv = {
793 .map = xenbus_map_ring_valloc_pv,
794 .unmap = xenbus_unmap_ring_vfree_pv,
795};
796#endif
797
798struct unmap_ring_vfree_hvm
799{
800 unsigned int idx;
801 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
802};
803
804static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
805 unsigned int goffset,
806 unsigned int len,
807 void *data)
808{
809 struct unmap_ring_vfree_hvm *info = data;
810
811 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
812
813 info->idx++;
814}
815
816static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
817{
818 int rv;
819 struct xenbus_map_node *node;
820 void *addr;
821 struct unmap_ring_vfree_hvm info = {
822 .idx = 0,
823 };
824 unsigned int nr_pages;
825
826 spin_lock(&xenbus_valloc_lock);
827 list_for_each_entry(node, &xenbus_valloc_pages, next) {
828 addr = node->hvm.addr;
829 if (addr == vaddr) {
830 list_del(&node->next);
831 goto found;
832 }
833 }
834 node = addr = NULL;
835 found:
836 spin_unlock(&xenbus_valloc_lock);
837
838 if (!node) {
839 xenbus_dev_error(dev, -ENOENT,
840 "can't find mapped virtual address %p", vaddr);
841 return GNTST_bad_virt_addr;
842 }
843
844 nr_pages = XENBUS_PAGES(node->nr_handles);
845
846 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
847 xenbus_unmap_ring_setup_grant_hvm,
848 &info);
849
850 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
851 info.addrs);
852 if (!rv) {
853 vunmap(vaddr);
854 free_xenballooned_pages(nr_pages, node->hvm.pages);
855 }
856 else
857 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
858
859 kfree(node);
860 return rv;
861}
862
863
864
865
866
867
868
869
870
871
872
873
874int xenbus_unmap_ring(struct xenbus_device *dev,
875 grant_handle_t *handles, unsigned int nr_handles,
876 unsigned long *vaddrs)
877{
878 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
879 int i;
880 int err;
881
882 if (nr_handles > XENBUS_MAX_RING_GRANTS)
883 return -EINVAL;
884
885 for (i = 0; i < nr_handles; i++)
886 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
887 GNTMAP_host_map, handles[i]);
888
889 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
890 BUG();
891
892 err = GNTST_okay;
893 for (i = 0; i < nr_handles; i++) {
894 if (unmap[i].status != GNTST_okay) {
895 xenbus_dev_error(dev, unmap[i].status,
896 "unmapping page at handle %d error %d",
897 handles[i], unmap[i].status);
898 err = unmap[i].status;
899 break;
900 }
901 }
902
903 return err;
904}
905EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
906
907
908
909
910
911
912
913
914
915enum xenbus_state xenbus_read_driver_state(const char *path)
916{
917 enum xenbus_state result;
918 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
919 if (err)
920 result = XenbusStateUnknown;
921
922 return result;
923}
924EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
925
926static const struct xenbus_ring_ops ring_ops_hvm = {
927 .map = xenbus_map_ring_valloc_hvm,
928 .unmap = xenbus_unmap_ring_vfree_hvm,
929};
930
931void __init xenbus_ring_ops_init(void)
932{
933#ifdef CONFIG_XEN_PV
934 if (!xen_feature(XENFEAT_auto_translated_physmap))
935 ring_ops = &ring_ops_pv;
936 else
937#endif
938 ring_ops = &ring_ops_hvm;
939}
940