1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/spinlock.h>
37#include <linux/vmalloc.h>
38#include <linux/export.h>
39#include <asm/xen/hypervisor.h>
40#include <asm/xen/page.h>
41#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
43#include <xen/balloon.h>
44#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/xen.h>
48
49#include "xenbus_probe.h"
50
51struct xenbus_map_node {
52 struct list_head next;
53 union {
54 struct vm_struct *area;
55 struct page *page;
56 };
57 grant_handle_t handle;
58};
59
60static DEFINE_SPINLOCK(xenbus_valloc_lock);
61static LIST_HEAD(xenbus_valloc_pages);
62
63struct xenbus_ring_ops {
64 int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
65 int (*unmap)(struct xenbus_device *dev, void *vaddr);
66};
67
68static const struct xenbus_ring_ops *ring_ops __read_mostly;
69
70const char *xenbus_strstate(enum xenbus_state state)
71{
72 static const char *const name[] = {
73 [ XenbusStateUnknown ] = "Unknown",
74 [ XenbusStateInitialising ] = "Initialising",
75 [ XenbusStateInitWait ] = "InitWait",
76 [ XenbusStateInitialised ] = "Initialised",
77 [ XenbusStateConnected ] = "Connected",
78 [ XenbusStateClosing ] = "Closing",
79 [ XenbusStateClosed ] = "Closed",
80 [XenbusStateReconfiguring] = "Reconfiguring",
81 [XenbusStateReconfigured] = "Reconfigured",
82 };
83 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
84}
85EXPORT_SYMBOL_GPL(xenbus_strstate);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101int xenbus_watch_path(struct xenbus_device *dev, const char *path,
102 struct xenbus_watch *watch,
103 void (*callback)(struct xenbus_watch *,
104 const char **, unsigned int))
105{
106 int err;
107
108 watch->node = path;
109 watch->callback = callback;
110
111 err = register_xenbus_watch(watch);
112
113 if (err) {
114 watch->node = NULL;
115 watch->callback = NULL;
116 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
117 }
118
119 return err;
120}
121EXPORT_SYMBOL_GPL(xenbus_watch_path);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139int xenbus_watch_pathfmt(struct xenbus_device *dev,
140 struct xenbus_watch *watch,
141 void (*callback)(struct xenbus_watch *,
142 const char **, unsigned int),
143 const char *pathfmt, ...)
144{
145 int err;
146 va_list ap;
147 char *path;
148
149 va_start(ap, pathfmt);
150 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
151 va_end(ap);
152
153 if (!path) {
154 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
155 return -ENOMEM;
156 }
157 err = xenbus_watch_path(dev, path, watch, callback);
158
159 if (err)
160 kfree(path);
161 return err;
162}
163EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
164
165static void xenbus_switch_fatal(struct xenbus_device *, int, int,
166 const char *, ...);
167
168static int
169__xenbus_switch_state(struct xenbus_device *dev,
170 enum xenbus_state state, int depth)
171{
172
173
174
175
176
177
178
179
180
181
182
183
184
185 struct xenbus_transaction xbt;
186 int current_state;
187 int err, abort;
188
189 if (state == dev->state)
190 return 0;
191
192again:
193 abort = 1;
194
195 err = xenbus_transaction_start(&xbt);
196 if (err) {
197 xenbus_switch_fatal(dev, depth, err, "starting transaction");
198 return 0;
199 }
200
201 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
202 if (err != 1)
203 goto abort;
204
205 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
206 if (err) {
207 xenbus_switch_fatal(dev, depth, err, "writing new state");
208 goto abort;
209 }
210
211 abort = 0;
212abort:
213 err = xenbus_transaction_end(xbt, abort);
214 if (err) {
215 if (err == -EAGAIN && !abort)
216 goto again;
217 xenbus_switch_fatal(dev, depth, err, "ending transaction");
218 } else
219 dev->state = state;
220
221 return 0;
222}
223
224
225
226
227
228
229
230
231
232
233int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
234{
235 return __xenbus_switch_state(dev, state, 0);
236}
237
238EXPORT_SYMBOL_GPL(xenbus_switch_state);
239
240int xenbus_frontend_closed(struct xenbus_device *dev)
241{
242 xenbus_switch_state(dev, XenbusStateClosed);
243 complete(&dev->down);
244 return 0;
245}
246EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
247
248
249
250
251
252static char *error_path(struct xenbus_device *dev)
253{
254 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
255}
256
257
258static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
259 const char *fmt, va_list ap)
260{
261 int ret;
262 unsigned int len;
263 char *printf_buffer = NULL;
264 char *path_buffer = NULL;
265
266#define PRINTF_BUFFER_SIZE 4096
267 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
268 if (printf_buffer == NULL)
269 goto fail;
270
271 len = sprintf(printf_buffer, "%i ", -err);
272 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
273
274 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
275
276 dev_err(&dev->dev, "%s\n", printf_buffer);
277
278 path_buffer = error_path(dev);
279
280 if (path_buffer == NULL) {
281 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
282 dev->nodename, printf_buffer);
283 goto fail;
284 }
285
286 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
287 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
288 dev->nodename, printf_buffer);
289 goto fail;
290 }
291
292fail:
293 kfree(printf_buffer);
294 kfree(path_buffer);
295}
296
297
298
299
300
301
302
303
304
305
306
307void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
308{
309 va_list ap;
310
311 va_start(ap, fmt);
312 xenbus_va_dev_error(dev, err, fmt, ap);
313 va_end(ap);
314}
315EXPORT_SYMBOL_GPL(xenbus_dev_error);
316
317
318
319
320
321
322
323
324
325
326
327
328void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
329{
330 va_list ap;
331
332 va_start(ap, fmt);
333 xenbus_va_dev_error(dev, err, fmt, ap);
334 va_end(ap);
335
336 xenbus_switch_state(dev, XenbusStateClosing);
337}
338EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
339
340
341
342
343
344static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
345 const char *fmt, ...)
346{
347 va_list ap;
348
349 va_start(ap, fmt);
350 xenbus_va_dev_error(dev, err, fmt, ap);
351 va_end(ap);
352
353 if (!depth)
354 __xenbus_switch_state(dev, XenbusStateClosing, 1);
355}
356
357
358
359
360
361
362
363
364
365
366int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
367{
368 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
369 if (err < 0)
370 xenbus_dev_fatal(dev, err, "granting access to ring page");
371 return err;
372}
373EXPORT_SYMBOL_GPL(xenbus_grant_ring);
374
375
376
377
378
379
380
381
382int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
383{
384 struct evtchn_alloc_unbound alloc_unbound;
385 int err;
386
387 alloc_unbound.dom = DOMID_SELF;
388 alloc_unbound.remote_dom = dev->otherend_id;
389
390 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
391 &alloc_unbound);
392 if (err)
393 xenbus_dev_fatal(dev, err, "allocating event channel");
394 else
395 *port = alloc_unbound.port;
396
397 return err;
398}
399EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
400
401
402
403
404
405
406
407int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
408{
409 struct evtchn_bind_interdomain bind_interdomain;
410 int err;
411
412 bind_interdomain.remote_dom = dev->otherend_id;
413 bind_interdomain.remote_port = remote_port;
414
415 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
416 &bind_interdomain);
417 if (err)
418 xenbus_dev_fatal(dev, err,
419 "binding to event channel %d from domain %d",
420 remote_port, dev->otherend_id);
421 else
422 *port = bind_interdomain.local_port;
423
424 return err;
425}
426EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
427
428
429
430
431
432int xenbus_free_evtchn(struct xenbus_device *dev, int port)
433{
434 struct evtchn_close close;
435 int err;
436
437 close.port = port;
438
439 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
440 if (err)
441 xenbus_dev_error(dev, err, "freeing event channel %d", port);
442
443 return err;
444}
445EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
463{
464 return ring_ops->map(dev, gnt_ref, vaddr);
465}
466EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
467
468static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
469 int gnt_ref, void **vaddr)
470{
471 struct gnttab_map_grant_ref op = {
472 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
473 .ref = gnt_ref,
474 .dom = dev->otherend_id,
475 };
476 struct xenbus_map_node *node;
477 struct vm_struct *area;
478 pte_t *pte;
479
480 *vaddr = NULL;
481
482 node = kzalloc(sizeof(*node), GFP_KERNEL);
483 if (!node)
484 return -ENOMEM;
485
486 area = alloc_vm_area(PAGE_SIZE, &pte);
487 if (!area) {
488 kfree(node);
489 return -ENOMEM;
490 }
491
492 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
493
494 gnttab_batch_map(&op, 1);
495
496 if (op.status != GNTST_okay) {
497 free_vm_area(area);
498 kfree(node);
499 xenbus_dev_fatal(dev, op.status,
500 "mapping in shared page %d from domain %d",
501 gnt_ref, dev->otherend_id);
502 return op.status;
503 }
504
505 node->handle = op.handle;
506 node->area = area;
507
508 spin_lock(&xenbus_valloc_lock);
509 list_add(&node->next, &xenbus_valloc_pages);
510 spin_unlock(&xenbus_valloc_lock);
511
512 *vaddr = area->addr;
513 return 0;
514}
515
516static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
517 int gnt_ref, void **vaddr)
518{
519 struct xenbus_map_node *node;
520 int err;
521 void *addr;
522
523 *vaddr = NULL;
524
525 node = kzalloc(sizeof(*node), GFP_KERNEL);
526 if (!node)
527 return -ENOMEM;
528
529 err = alloc_xenballooned_pages(1, &node->page, false );
530 if (err)
531 goto out_err;
532
533 addr = pfn_to_kaddr(page_to_pfn(node->page));
534
535 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
536 if (err)
537 goto out_err_free_ballooned_pages;
538
539 spin_lock(&xenbus_valloc_lock);
540 list_add(&node->next, &xenbus_valloc_pages);
541 spin_unlock(&xenbus_valloc_lock);
542
543 *vaddr = addr;
544 return 0;
545
546 out_err_free_ballooned_pages:
547 free_xenballooned_pages(1, &node->page);
548 out_err:
549 kfree(node);
550 return err;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
569 grant_handle_t *handle, void *vaddr)
570{
571 struct gnttab_map_grant_ref op;
572
573 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
574 dev->otherend_id);
575
576 gnttab_batch_map(&op, 1);
577
578 if (op.status != GNTST_okay) {
579 xenbus_dev_fatal(dev, op.status,
580 "mapping in shared page %d from domain %d",
581 gnt_ref, dev->otherend_id);
582 } else
583 *handle = op.handle;
584
585 return op.status;
586}
587EXPORT_SYMBOL_GPL(xenbus_map_ring);
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
603{
604 return ring_ops->unmap(dev, vaddr);
605}
606EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
607
608static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
609{
610 struct xenbus_map_node *node;
611 struct gnttab_unmap_grant_ref op = {
612 .host_addr = (unsigned long)vaddr,
613 };
614 unsigned int level;
615
616 spin_lock(&xenbus_valloc_lock);
617 list_for_each_entry(node, &xenbus_valloc_pages, next) {
618 if (node->area->addr == vaddr) {
619 list_del(&node->next);
620 goto found;
621 }
622 }
623 node = NULL;
624 found:
625 spin_unlock(&xenbus_valloc_lock);
626
627 if (!node) {
628 xenbus_dev_error(dev, -ENOENT,
629 "can't find mapped virtual address %p", vaddr);
630 return GNTST_bad_virt_addr;
631 }
632
633 op.handle = node->handle;
634 op.host_addr = arbitrary_virt_to_machine(
635 lookup_address((unsigned long)vaddr, &level)).maddr;
636
637 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
638 BUG();
639
640 if (op.status == GNTST_okay)
641 free_vm_area(node->area);
642 else
643 xenbus_dev_error(dev, op.status,
644 "unmapping page at handle %d error %d",
645 node->handle, op.status);
646
647 kfree(node);
648 return op.status;
649}
650
651static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
652{
653 int rv;
654 struct xenbus_map_node *node;
655 void *addr;
656
657 spin_lock(&xenbus_valloc_lock);
658 list_for_each_entry(node, &xenbus_valloc_pages, next) {
659 addr = pfn_to_kaddr(page_to_pfn(node->page));
660 if (addr == vaddr) {
661 list_del(&node->next);
662 goto found;
663 }
664 }
665 node = addr = NULL;
666 found:
667 spin_unlock(&xenbus_valloc_lock);
668
669 if (!node) {
670 xenbus_dev_error(dev, -ENOENT,
671 "can't find mapped virtual address %p", vaddr);
672 return GNTST_bad_virt_addr;
673 }
674
675 rv = xenbus_unmap_ring(dev, node->handle, addr);
676
677 if (!rv)
678 free_xenballooned_pages(1, &node->page);
679 else
680 WARN(1, "Leaking %p\n", vaddr);
681
682 kfree(node);
683 return rv;
684}
685
686
687
688
689
690
691
692
693
694
695
696int xenbus_unmap_ring(struct xenbus_device *dev,
697 grant_handle_t handle, void *vaddr)
698{
699 struct gnttab_unmap_grant_ref op;
700
701 gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
702
703 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
704 BUG();
705
706 if (op.status != GNTST_okay)
707 xenbus_dev_error(dev, op.status,
708 "unmapping page at handle %d error %d",
709 handle, op.status);
710
711 return op.status;
712}
713EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
714
715
716
717
718
719
720
721
722
723enum xenbus_state xenbus_read_driver_state(const char *path)
724{
725 enum xenbus_state result;
726 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
727 if (err)
728 result = XenbusStateUnknown;
729
730 return result;
731}
732EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
733
734static const struct xenbus_ring_ops ring_ops_pv = {
735 .map = xenbus_map_ring_valloc_pv,
736 .unmap = xenbus_unmap_ring_vfree_pv,
737};
738
739static const struct xenbus_ring_ops ring_ops_hvm = {
740 .map = xenbus_map_ring_valloc_hvm,
741 .unmap = xenbus_unmap_ring_vfree_hvm,
742};
743
744void __init xenbus_ring_ops_init(void)
745{
746 if (xen_pv_domain())
747 ring_ops = &ring_ops_pv;
748 else
749 ring_ops = &ring_ops_hvm;
750}
751