1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/spinlock.h>
37#include <linux/vmalloc.h>
38#include <linux/export.h>
39#include <asm/xen/hypervisor.h>
40#include <xen/page.h>
41#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
43#include <xen/balloon.h>
44#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/xen.h>
48#include <xen/features.h>
49
50#include "xenbus.h"
51
52#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
56struct xenbus_map_node {
57 struct list_head next;
58 union {
59 struct {
60 struct vm_struct *area;
61 } pv;
62 struct {
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65 void *addr;
66 } hvm;
67 };
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
70};
71
72static DEFINE_SPINLOCK(xenbus_valloc_lock);
73static LIST_HEAD(xenbus_valloc_pages);
74
75struct xenbus_ring_ops {
76 int (*map)(struct xenbus_device *dev,
77 grant_ref_t *gnt_refs, unsigned int nr_grefs,
78 void **vaddr);
79 int (*unmap)(struct xenbus_device *dev, void *vaddr);
80};
81
82static const struct xenbus_ring_ops *ring_ops __read_mostly;
83
84const char *xenbus_strstate(enum xenbus_state state)
85{
86 static const char *const name[] = {
87 [ XenbusStateUnknown ] = "Unknown",
88 [ XenbusStateInitialising ] = "Initialising",
89 [ XenbusStateInitWait ] = "InitWait",
90 [ XenbusStateInitialised ] = "Initialised",
91 [ XenbusStateConnected ] = "Connected",
92 [ XenbusStateClosing ] = "Closing",
93 [ XenbusStateClosed ] = "Closed",
94 [XenbusStateReconfiguring] = "Reconfiguring",
95 [XenbusStateReconfigured] = "Reconfigured",
96 };
97 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
98}
99EXPORT_SYMBOL_GPL(xenbus_strstate);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115int xenbus_watch_path(struct xenbus_device *dev, const char *path,
116 struct xenbus_watch *watch,
117 void (*callback)(struct xenbus_watch *,
118 const char *, const char *))
119{
120 int err;
121
122 watch->node = path;
123 watch->callback = callback;
124
125 err = register_xenbus_watch(watch);
126
127 if (err) {
128 watch->node = NULL;
129 watch->callback = NULL;
130 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
131 }
132
133 return err;
134}
135EXPORT_SYMBOL_GPL(xenbus_watch_path);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153int xenbus_watch_pathfmt(struct xenbus_device *dev,
154 struct xenbus_watch *watch,
155 void (*callback)(struct xenbus_watch *,
156 const char *, const char *),
157 const char *pathfmt, ...)
158{
159 int err;
160 va_list ap;
161 char *path;
162
163 va_start(ap, pathfmt);
164 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
165 va_end(ap);
166
167 if (!path) {
168 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
169 return -ENOMEM;
170 }
171 err = xenbus_watch_path(dev, path, watch, callback);
172
173 if (err)
174 kfree(path);
175 return err;
176}
177EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
178
179static void xenbus_switch_fatal(struct xenbus_device *, int, int,
180 const char *, ...);
181
182static int
183__xenbus_switch_state(struct xenbus_device *dev,
184 enum xenbus_state state, int depth)
185{
186
187
188
189
190
191
192
193
194
195
196
197
198
199 struct xenbus_transaction xbt;
200 int current_state;
201 int err, abort;
202
203 if (state == dev->state)
204 return 0;
205
206again:
207 abort = 1;
208
209 err = xenbus_transaction_start(&xbt);
210 if (err) {
211 xenbus_switch_fatal(dev, depth, err, "starting transaction");
212 return 0;
213 }
214
215 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
216 if (err != 1)
217 goto abort;
218
219 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
220 if (err) {
221 xenbus_switch_fatal(dev, depth, err, "writing new state");
222 goto abort;
223 }
224
225 abort = 0;
226abort:
227 err = xenbus_transaction_end(xbt, abort);
228 if (err) {
229 if (err == -EAGAIN && !abort)
230 goto again;
231 xenbus_switch_fatal(dev, depth, err, "ending transaction");
232 } else
233 dev->state = state;
234
235 return 0;
236}
237
238
239
240
241
242
243
244
245
246
247int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
248{
249 return __xenbus_switch_state(dev, state, 0);
250}
251
252EXPORT_SYMBOL_GPL(xenbus_switch_state);
253
254int xenbus_frontend_closed(struct xenbus_device *dev)
255{
256 xenbus_switch_state(dev, XenbusStateClosed);
257 complete(&dev->down);
258 return 0;
259}
260EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
261
262static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
263 const char *fmt, va_list ap)
264{
265 unsigned int len;
266 char *printf_buffer;
267 char *path_buffer;
268
269#define PRINTF_BUFFER_SIZE 4096
270
271 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
272 if (!printf_buffer)
273 return;
274
275 len = sprintf(printf_buffer, "%i ", -err);
276 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
277
278 dev_err(&dev->dev, "%s\n", printf_buffer);
279
280 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
281 if (path_buffer)
282 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
283
284 kfree(printf_buffer);
285 kfree(path_buffer);
286}
287
288
289
290
291
292
293
294
295
296
297void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
298{
299 va_list ap;
300
301 va_start(ap, fmt);
302 xenbus_va_dev_error(dev, err, fmt, ap);
303 va_end(ap);
304}
305EXPORT_SYMBOL_GPL(xenbus_dev_error);
306
307
308
309
310
311
312
313
314
315
316
317
318void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
319{
320 va_list ap;
321
322 va_start(ap, fmt);
323 xenbus_va_dev_error(dev, err, fmt, ap);
324 va_end(ap);
325
326 xenbus_switch_state(dev, XenbusStateClosing);
327}
328EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
329
330
331
332
333
334static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
335 const char *fmt, ...)
336{
337 va_list ap;
338
339 va_start(ap, fmt);
340 xenbus_va_dev_error(dev, err, fmt, ap);
341 va_end(ap);
342
343 if (!depth)
344 __xenbus_switch_state(dev, XenbusStateClosing, 1);
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
360 unsigned int nr_pages, grant_ref_t *grefs)
361{
362 int err;
363 int i, j;
364
365 for (i = 0; i < nr_pages; i++) {
366 err = gnttab_grant_foreign_access(dev->otherend_id,
367 virt_to_gfn(vaddr), 0);
368 if (err < 0) {
369 xenbus_dev_fatal(dev, err,
370 "granting access to ring page");
371 goto fail;
372 }
373 grefs[i] = err;
374
375 vaddr = vaddr + XEN_PAGE_SIZE;
376 }
377
378 return 0;
379
380fail:
381 for (j = 0; j < i; j++)
382 gnttab_end_foreign_access_ref(grefs[j], 0);
383 return err;
384}
385EXPORT_SYMBOL_GPL(xenbus_grant_ring);
386
387
388
389
390
391
392
393
394int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
395{
396 struct evtchn_alloc_unbound alloc_unbound;
397 int err;
398
399 alloc_unbound.dom = DOMID_SELF;
400 alloc_unbound.remote_dom = dev->otherend_id;
401
402 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
403 &alloc_unbound);
404 if (err)
405 xenbus_dev_fatal(dev, err, "allocating event channel");
406 else
407 *port = alloc_unbound.port;
408
409 return err;
410}
411EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
412
413
414
415
416
417int xenbus_free_evtchn(struct xenbus_device *dev, int port)
418{
419 struct evtchn_close close;
420 int err;
421
422 close.port = port;
423
424 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
425 if (err)
426 xenbus_dev_error(dev, err, "freeing event channel %d", port);
427
428 return err;
429}
430EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
449 unsigned int nr_grefs, void **vaddr)
450{
451 return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
452}
453EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
454
455
456
457
458static int __xenbus_map_ring(struct xenbus_device *dev,
459 grant_ref_t *gnt_refs,
460 unsigned int nr_grefs,
461 grant_handle_t *handles,
462 phys_addr_t *addrs,
463 unsigned int flags,
464 bool *leaked)
465{
466 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
467 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
468 int i, j;
469 int err = GNTST_okay;
470
471 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
472 return -EINVAL;
473
474 for (i = 0; i < nr_grefs; i++) {
475 memset(&map[i], 0, sizeof(map[i]));
476 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
477 dev->otherend_id);
478 handles[i] = INVALID_GRANT_HANDLE;
479 }
480
481 gnttab_batch_map(map, i);
482
483 for (i = 0; i < nr_grefs; i++) {
484 if (map[i].status != GNTST_okay) {
485 err = map[i].status;
486 xenbus_dev_fatal(dev, map[i].status,
487 "mapping in shared page %d from domain %d",
488 gnt_refs[i], dev->otherend_id);
489 goto fail;
490 } else
491 handles[i] = map[i].handle;
492 }
493
494 return GNTST_okay;
495
496 fail:
497 for (i = j = 0; i < nr_grefs; i++) {
498 if (handles[i] != INVALID_GRANT_HANDLE) {
499 memset(&unmap[j], 0, sizeof(unmap[j]));
500 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
501 GNTMAP_host_map, handles[i]);
502 j++;
503 }
504 }
505
506 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
507 BUG();
508
509 *leaked = false;
510 for (i = 0; i < j; i++) {
511 if (unmap[i].status != GNTST_okay) {
512 *leaked = true;
513 break;
514 }
515 }
516
517 return err;
518}
519
520struct map_ring_valloc_hvm
521{
522 unsigned int idx;
523
524
525 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
526 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
527};
528
529static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
530 unsigned int goffset,
531 unsigned int len,
532 void *data)
533{
534 struct map_ring_valloc_hvm *info = data;
535 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
536
537 info->phys_addrs[info->idx] = vaddr;
538 info->addrs[info->idx] = vaddr;
539
540 info->idx++;
541}
542
543static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
544 grant_ref_t *gnt_ref,
545 unsigned int nr_grefs,
546 void **vaddr)
547{
548 struct xenbus_map_node *node;
549 int err;
550 void *addr;
551 bool leaked = false;
552 struct map_ring_valloc_hvm info = {
553 .idx = 0,
554 };
555 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
556
557 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
558 return -EINVAL;
559
560 *vaddr = NULL;
561
562 node = kzalloc(sizeof(*node), GFP_KERNEL);
563 if (!node)
564 return -ENOMEM;
565
566 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
567 if (err)
568 goto out_err;
569
570 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
571 xenbus_map_ring_setup_grant_hvm,
572 &info);
573
574 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
575 info.phys_addrs, GNTMAP_host_map, &leaked);
576 node->nr_handles = nr_grefs;
577
578 if (err)
579 goto out_free_ballooned_pages;
580
581 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
582 PAGE_KERNEL);
583 if (!addr) {
584 err = -ENOMEM;
585 goto out_xenbus_unmap_ring;
586 }
587
588 node->hvm.addr = addr;
589
590 spin_lock(&xenbus_valloc_lock);
591 list_add(&node->next, &xenbus_valloc_pages);
592 spin_unlock(&xenbus_valloc_lock);
593
594 *vaddr = addr;
595 return 0;
596
597 out_xenbus_unmap_ring:
598 if (!leaked)
599 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
600 else
601 pr_alert("leaking %p size %u page(s)",
602 addr, nr_pages);
603 out_free_ballooned_pages:
604 if (!leaked)
605 free_xenballooned_pages(nr_pages, node->hvm.pages);
606 out_err:
607 kfree(node);
608 return err;
609}
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
632 unsigned int nr_grefs, grant_handle_t *handles,
633 unsigned long *vaddrs, bool *leaked)
634{
635 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
636 int i;
637
638 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
639 return -EINVAL;
640
641 for (i = 0; i < nr_grefs; i++)
642 phys_addrs[i] = (unsigned long)vaddrs[i];
643
644 return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
645 phys_addrs, GNTMAP_host_map, leaked);
646}
647EXPORT_SYMBOL_GPL(xenbus_map_ring);
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
663{
664 return ring_ops->unmap(dev, vaddr);
665}
666EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
667
668#ifdef CONFIG_XEN_PV
669static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
670 grant_ref_t *gnt_refs,
671 unsigned int nr_grefs,
672 void **vaddr)
673{
674 struct xenbus_map_node *node;
675 struct vm_struct *area;
676 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
677 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
678 int err = GNTST_okay;
679 int i;
680 bool leaked;
681
682 *vaddr = NULL;
683
684 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
685 return -EINVAL;
686
687 node = kzalloc(sizeof(*node), GFP_KERNEL);
688 if (!node)
689 return -ENOMEM;
690
691 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
692 if (!area) {
693 kfree(node);
694 return -ENOMEM;
695 }
696
697 for (i = 0; i < nr_grefs; i++)
698 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
699
700 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
701 phys_addrs,
702 GNTMAP_host_map | GNTMAP_contains_pte,
703 &leaked);
704 if (err)
705 goto failed;
706
707 node->nr_handles = nr_grefs;
708 node->pv.area = area;
709
710 spin_lock(&xenbus_valloc_lock);
711 list_add(&node->next, &xenbus_valloc_pages);
712 spin_unlock(&xenbus_valloc_lock);
713
714 *vaddr = area->addr;
715 return 0;
716
717failed:
718 if (!leaked)
719 free_vm_area(area);
720 else
721 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
722
723 kfree(node);
724 return err;
725}
726
727static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
728{
729 struct xenbus_map_node *node;
730 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
731 unsigned int level;
732 int i;
733 bool leaked = false;
734 int err;
735
736 spin_lock(&xenbus_valloc_lock);
737 list_for_each_entry(node, &xenbus_valloc_pages, next) {
738 if (node->pv.area->addr == vaddr) {
739 list_del(&node->next);
740 goto found;
741 }
742 }
743 node = NULL;
744 found:
745 spin_unlock(&xenbus_valloc_lock);
746
747 if (!node) {
748 xenbus_dev_error(dev, -ENOENT,
749 "can't find mapped virtual address %p", vaddr);
750 return GNTST_bad_virt_addr;
751 }
752
753 for (i = 0; i < node->nr_handles; i++) {
754 unsigned long addr;
755
756 memset(&unmap[i], 0, sizeof(unmap[i]));
757 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
758 unmap[i].host_addr = arbitrary_virt_to_machine(
759 lookup_address(addr, &level)).maddr;
760 unmap[i].dev_bus_addr = 0;
761 unmap[i].handle = node->handles[i];
762 }
763
764 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
765 BUG();
766
767 err = GNTST_okay;
768 leaked = false;
769 for (i = 0; i < node->nr_handles; i++) {
770 if (unmap[i].status != GNTST_okay) {
771 leaked = true;
772 xenbus_dev_error(dev, unmap[i].status,
773 "unmapping page at handle %d error %d",
774 node->handles[i], unmap[i].status);
775 err = unmap[i].status;
776 break;
777 }
778 }
779
780 if (!leaked)
781 free_vm_area(node->pv.area);
782 else
783 pr_alert("leaking VM area %p size %u page(s)",
784 node->pv.area, node->nr_handles);
785
786 kfree(node);
787 return err;
788}
789
790static const struct xenbus_ring_ops ring_ops_pv = {
791 .map = xenbus_map_ring_valloc_pv,
792 .unmap = xenbus_unmap_ring_vfree_pv,
793};
794#endif
795
796struct unmap_ring_vfree_hvm
797{
798 unsigned int idx;
799 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
800};
801
802static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
803 unsigned int goffset,
804 unsigned int len,
805 void *data)
806{
807 struct unmap_ring_vfree_hvm *info = data;
808
809 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
810
811 info->idx++;
812}
813
814static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
815{
816 int rv;
817 struct xenbus_map_node *node;
818 void *addr;
819 struct unmap_ring_vfree_hvm info = {
820 .idx = 0,
821 };
822 unsigned int nr_pages;
823
824 spin_lock(&xenbus_valloc_lock);
825 list_for_each_entry(node, &xenbus_valloc_pages, next) {
826 addr = node->hvm.addr;
827 if (addr == vaddr) {
828 list_del(&node->next);
829 goto found;
830 }
831 }
832 node = addr = NULL;
833 found:
834 spin_unlock(&xenbus_valloc_lock);
835
836 if (!node) {
837 xenbus_dev_error(dev, -ENOENT,
838 "can't find mapped virtual address %p", vaddr);
839 return GNTST_bad_virt_addr;
840 }
841
842 nr_pages = XENBUS_PAGES(node->nr_handles);
843
844 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
845 xenbus_unmap_ring_setup_grant_hvm,
846 &info);
847
848 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
849 info.addrs);
850 if (!rv) {
851 vunmap(vaddr);
852 free_xenballooned_pages(nr_pages, node->hvm.pages);
853 }
854 else
855 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
856
857 kfree(node);
858 return rv;
859}
860
861
862
863
864
865
866
867
868
869
870
871
872int xenbus_unmap_ring(struct xenbus_device *dev,
873 grant_handle_t *handles, unsigned int nr_handles,
874 unsigned long *vaddrs)
875{
876 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
877 int i;
878 int err;
879
880 if (nr_handles > XENBUS_MAX_RING_GRANTS)
881 return -EINVAL;
882
883 for (i = 0; i < nr_handles; i++)
884 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
885 GNTMAP_host_map, handles[i]);
886
887 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
888 BUG();
889
890 err = GNTST_okay;
891 for (i = 0; i < nr_handles; i++) {
892 if (unmap[i].status != GNTST_okay) {
893 xenbus_dev_error(dev, unmap[i].status,
894 "unmapping page at handle %d error %d",
895 handles[i], unmap[i].status);
896 err = unmap[i].status;
897 break;
898 }
899 }
900
901 return err;
902}
903EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
904
905
906
907
908
909
910
911
912
913enum xenbus_state xenbus_read_driver_state(const char *path)
914{
915 enum xenbus_state result;
916 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
917 if (err)
918 result = XenbusStateUnknown;
919
920 return result;
921}
922EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
923
924static const struct xenbus_ring_ops ring_ops_hvm = {
925 .map = xenbus_map_ring_valloc_hvm,
926 .unmap = xenbus_unmap_ring_vfree_hvm,
927};
928
929void __init xenbus_ring_ops_init(void)
930{
931#ifdef CONFIG_XEN_PV
932 if (!xen_feature(XENFEAT_auto_translated_physmap))
933 ring_ops = &ring_ops_pv;
934 else
935#endif
936 ring_ops = &ring_ops_hvm;
937}
938