1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/spinlock.h>
37#include <linux/vmalloc.h>
38#include <linux/export.h>
39#include <asm/xen/hypervisor.h>
40#include <asm/xen/page.h>
41#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
43#include <xen/balloon.h>
44#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/xen.h>
48#include <xen/features.h>
49
50#include "xenbus_probe.h"
51
52struct xenbus_map_node {
53 struct list_head next;
54 union {
55 struct vm_struct *area;
56 struct page *page;
57 };
58 grant_handle_t handle;
59};
60
61static DEFINE_SPINLOCK(xenbus_valloc_lock);
62static LIST_HEAD(xenbus_valloc_pages);
63
64struct xenbus_ring_ops {
65 int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
66 int (*unmap)(struct xenbus_device *dev, void *vaddr);
67};
68
69static const struct xenbus_ring_ops *ring_ops __read_mostly;
70
71const char *xenbus_strstate(enum xenbus_state state)
72{
73 static const char *const name[] = {
74 [ XenbusStateUnknown ] = "Unknown",
75 [ XenbusStateInitialising ] = "Initialising",
76 [ XenbusStateInitWait ] = "InitWait",
77 [ XenbusStateInitialised ] = "Initialised",
78 [ XenbusStateConnected ] = "Connected",
79 [ XenbusStateClosing ] = "Closing",
80 [ XenbusStateClosed ] = "Closed",
81 [XenbusStateReconfiguring] = "Reconfiguring",
82 [XenbusStateReconfigured] = "Reconfigured",
83 };
84 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
85}
86EXPORT_SYMBOL_GPL(xenbus_strstate);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102int xenbus_watch_path(struct xenbus_device *dev, const char *path,
103 struct xenbus_watch *watch,
104 void (*callback)(struct xenbus_watch *,
105 const char **, unsigned int))
106{
107 int err;
108
109 watch->node = path;
110 watch->callback = callback;
111
112 err = register_xenbus_watch(watch);
113
114 if (err) {
115 watch->node = NULL;
116 watch->callback = NULL;
117 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
118 }
119
120 return err;
121}
122EXPORT_SYMBOL_GPL(xenbus_watch_path);
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140int xenbus_watch_pathfmt(struct xenbus_device *dev,
141 struct xenbus_watch *watch,
142 void (*callback)(struct xenbus_watch *,
143 const char **, unsigned int),
144 const char *pathfmt, ...)
145{
146 int err;
147 va_list ap;
148 char *path;
149
150 va_start(ap, pathfmt);
151 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
152 va_end(ap);
153
154 if (!path) {
155 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
156 return -ENOMEM;
157 }
158 err = xenbus_watch_path(dev, path, watch, callback);
159
160 if (err)
161 kfree(path);
162 return err;
163}
164EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
165
166static void xenbus_switch_fatal(struct xenbus_device *, int, int,
167 const char *, ...);
168
169static int
170__xenbus_switch_state(struct xenbus_device *dev,
171 enum xenbus_state state, int depth)
172{
173
174
175
176
177
178
179
180
181
182
183
184
185
186 struct xenbus_transaction xbt;
187 int current_state;
188 int err, abort;
189
190 if (state == dev->state)
191 return 0;
192
193again:
194 abort = 1;
195
196 err = xenbus_transaction_start(&xbt);
197 if (err) {
198 xenbus_switch_fatal(dev, depth, err, "starting transaction");
199 return 0;
200 }
201
202 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
203 if (err != 1)
204 goto abort;
205
206 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
207 if (err) {
208 xenbus_switch_fatal(dev, depth, err, "writing new state");
209 goto abort;
210 }
211
212 abort = 0;
213abort:
214 err = xenbus_transaction_end(xbt, abort);
215 if (err) {
216 if (err == -EAGAIN && !abort)
217 goto again;
218 xenbus_switch_fatal(dev, depth, err, "ending transaction");
219 } else
220 dev->state = state;
221
222 return 0;
223}
224
225
226
227
228
229
230
231
232
233
234int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
235{
236 return __xenbus_switch_state(dev, state, 0);
237}
238
239EXPORT_SYMBOL_GPL(xenbus_switch_state);
240
241int xenbus_frontend_closed(struct xenbus_device *dev)
242{
243 xenbus_switch_state(dev, XenbusStateClosed);
244 complete(&dev->down);
245 return 0;
246}
247EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
248
249
250
251
252
253static char *error_path(struct xenbus_device *dev)
254{
255 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
256}
257
258
259static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
260 const char *fmt, va_list ap)
261{
262 int ret;
263 unsigned int len;
264 char *printf_buffer = NULL;
265 char *path_buffer = NULL;
266
267#define PRINTF_BUFFER_SIZE 4096
268 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
269 if (printf_buffer == NULL)
270 goto fail;
271
272 len = sprintf(printf_buffer, "%i ", -err);
273 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
274
275 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
276
277 dev_err(&dev->dev, "%s\n", printf_buffer);
278
279 path_buffer = error_path(dev);
280
281 if (path_buffer == NULL) {
282 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
283 dev->nodename, printf_buffer);
284 goto fail;
285 }
286
287 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
288 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
289 dev->nodename, printf_buffer);
290 goto fail;
291 }
292
293fail:
294 kfree(printf_buffer);
295 kfree(path_buffer);
296}
297
298
299
300
301
302
303
304
305
306
307
308void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
309{
310 va_list ap;
311
312 va_start(ap, fmt);
313 xenbus_va_dev_error(dev, err, fmt, ap);
314 va_end(ap);
315}
316EXPORT_SYMBOL_GPL(xenbus_dev_error);
317
318
319
320
321
322
323
324
325
326
327
328
329void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
330{
331 va_list ap;
332
333 va_start(ap, fmt);
334 xenbus_va_dev_error(dev, err, fmt, ap);
335 va_end(ap);
336
337 xenbus_switch_state(dev, XenbusStateClosing);
338}
339EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
340
341
342
343
344
345static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
346 const char *fmt, ...)
347{
348 va_list ap;
349
350 va_start(ap, fmt);
351 xenbus_va_dev_error(dev, err, fmt, ap);
352 va_end(ap);
353
354 if (!depth)
355 __xenbus_switch_state(dev, XenbusStateClosing, 1);
356}
357
358
359
360
361
362
363
364
365
366
367int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
368{
369 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
370 if (err < 0)
371 xenbus_dev_fatal(dev, err, "granting access to ring page");
372 return err;
373}
374EXPORT_SYMBOL_GPL(xenbus_grant_ring);
375
376
377
378
379
380
381
382
383int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
384{
385 struct evtchn_alloc_unbound alloc_unbound;
386 int err;
387
388 alloc_unbound.dom = DOMID_SELF;
389 alloc_unbound.remote_dom = dev->otherend_id;
390
391 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
392 &alloc_unbound);
393 if (err)
394 xenbus_dev_fatal(dev, err, "allocating event channel");
395 else
396 *port = alloc_unbound.port;
397
398 return err;
399}
400EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
401
402
403
404
405
406
407
408int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
409{
410 struct evtchn_bind_interdomain bind_interdomain;
411 int err;
412
413 bind_interdomain.remote_dom = dev->otherend_id;
414 bind_interdomain.remote_port = remote_port;
415
416 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
417 &bind_interdomain);
418 if (err)
419 xenbus_dev_fatal(dev, err,
420 "binding to event channel %d from domain %d",
421 remote_port, dev->otherend_id);
422 else
423 *port = bind_interdomain.local_port;
424
425 return err;
426}
427EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
428
429
430
431
432
433int xenbus_free_evtchn(struct xenbus_device *dev, int port)
434{
435 struct evtchn_close close;
436 int err;
437
438 close.port = port;
439
440 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
441 if (err)
442 xenbus_dev_error(dev, err, "freeing event channel %d", port);
443
444 return err;
445}
446EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
464{
465 return ring_ops->map(dev, gnt_ref, vaddr);
466}
467EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
468
469static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
470 int gnt_ref, void **vaddr)
471{
472 struct gnttab_map_grant_ref op = {
473 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
474 .ref = gnt_ref,
475 .dom = dev->otherend_id,
476 };
477 struct xenbus_map_node *node;
478 struct vm_struct *area;
479 pte_t *pte;
480
481 *vaddr = NULL;
482
483 node = kzalloc(sizeof(*node), GFP_KERNEL);
484 if (!node)
485 return -ENOMEM;
486
487 area = alloc_vm_area(PAGE_SIZE, &pte);
488 if (!area) {
489 kfree(node);
490 return -ENOMEM;
491 }
492
493 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
494
495 gnttab_batch_map(&op, 1);
496
497 if (op.status != GNTST_okay) {
498 free_vm_area(area);
499 kfree(node);
500 xenbus_dev_fatal(dev, op.status,
501 "mapping in shared page %d from domain %d",
502 gnt_ref, dev->otherend_id);
503 return op.status;
504 }
505
506 node->handle = op.handle;
507 node->area = area;
508
509 spin_lock(&xenbus_valloc_lock);
510 list_add(&node->next, &xenbus_valloc_pages);
511 spin_unlock(&xenbus_valloc_lock);
512
513 *vaddr = area->addr;
514 return 0;
515}
516
517static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
518 int gnt_ref, void **vaddr)
519{
520 struct xenbus_map_node *node;
521 int err;
522 void *addr;
523
524 *vaddr = NULL;
525
526 node = kzalloc(sizeof(*node), GFP_KERNEL);
527 if (!node)
528 return -ENOMEM;
529
530 err = alloc_xenballooned_pages(1, &node->page, false );
531 if (err)
532 goto out_err;
533
534 addr = pfn_to_kaddr(page_to_pfn(node->page));
535
536 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
537 if (err)
538 goto out_err_free_ballooned_pages;
539
540 spin_lock(&xenbus_valloc_lock);
541 list_add(&node->next, &xenbus_valloc_pages);
542 spin_unlock(&xenbus_valloc_lock);
543
544 *vaddr = addr;
545 return 0;
546
547 out_err_free_ballooned_pages:
548 free_xenballooned_pages(1, &node->page);
549 out_err:
550 kfree(node);
551 return err;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
570 grant_handle_t *handle, void *vaddr)
571{
572 struct gnttab_map_grant_ref op;
573
574 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
575 dev->otherend_id);
576
577 gnttab_batch_map(&op, 1);
578
579 if (op.status != GNTST_okay) {
580 xenbus_dev_fatal(dev, op.status,
581 "mapping in shared page %d from domain %d",
582 gnt_ref, dev->otherend_id);
583 } else
584 *handle = op.handle;
585
586 return op.status;
587}
588EXPORT_SYMBOL_GPL(xenbus_map_ring);
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
604{
605 return ring_ops->unmap(dev, vaddr);
606}
607EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
608
609static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
610{
611 struct xenbus_map_node *node;
612 struct gnttab_unmap_grant_ref op = {
613 .host_addr = (unsigned long)vaddr,
614 };
615 unsigned int level;
616
617 spin_lock(&xenbus_valloc_lock);
618 list_for_each_entry(node, &xenbus_valloc_pages, next) {
619 if (node->area->addr == vaddr) {
620 list_del(&node->next);
621 goto found;
622 }
623 }
624 node = NULL;
625 found:
626 spin_unlock(&xenbus_valloc_lock);
627
628 if (!node) {
629 xenbus_dev_error(dev, -ENOENT,
630 "can't find mapped virtual address %p", vaddr);
631 return GNTST_bad_virt_addr;
632 }
633
634 op.handle = node->handle;
635 op.host_addr = arbitrary_virt_to_machine(
636 lookup_address((unsigned long)vaddr, &level)).maddr;
637
638 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
639 BUG();
640
641 if (op.status == GNTST_okay)
642 free_vm_area(node->area);
643 else
644 xenbus_dev_error(dev, op.status,
645 "unmapping page at handle %d error %d",
646 node->handle, op.status);
647
648 kfree(node);
649 return op.status;
650}
651
652static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
653{
654 int rv;
655 struct xenbus_map_node *node;
656 void *addr;
657
658 spin_lock(&xenbus_valloc_lock);
659 list_for_each_entry(node, &xenbus_valloc_pages, next) {
660 addr = pfn_to_kaddr(page_to_pfn(node->page));
661 if (addr == vaddr) {
662 list_del(&node->next);
663 goto found;
664 }
665 }
666 node = addr = NULL;
667 found:
668 spin_unlock(&xenbus_valloc_lock);
669
670 if (!node) {
671 xenbus_dev_error(dev, -ENOENT,
672 "can't find mapped virtual address %p", vaddr);
673 return GNTST_bad_virt_addr;
674 }
675
676 rv = xenbus_unmap_ring(dev, node->handle, addr);
677
678 if (!rv)
679 free_xenballooned_pages(1, &node->page);
680 else
681 WARN(1, "Leaking %p\n", vaddr);
682
683 kfree(node);
684 return rv;
685}
686
687
688
689
690
691
692
693
694
695
696
697int xenbus_unmap_ring(struct xenbus_device *dev,
698 grant_handle_t handle, void *vaddr)
699{
700 struct gnttab_unmap_grant_ref op;
701
702 gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
703
704 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
705 BUG();
706
707 if (op.status != GNTST_okay)
708 xenbus_dev_error(dev, op.status,
709 "unmapping page at handle %d error %d",
710 handle, op.status);
711
712 return op.status;
713}
714EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
715
716
717
718
719
720
721
722
723
724enum xenbus_state xenbus_read_driver_state(const char *path)
725{
726 enum xenbus_state result;
727 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
728 if (err)
729 result = XenbusStateUnknown;
730
731 return result;
732}
733EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
734
735static const struct xenbus_ring_ops ring_ops_pv = {
736 .map = xenbus_map_ring_valloc_pv,
737 .unmap = xenbus_unmap_ring_vfree_pv,
738};
739
740static const struct xenbus_ring_ops ring_ops_hvm = {
741 .map = xenbus_map_ring_valloc_hvm,
742 .unmap = xenbus_unmap_ring_vfree_hvm,
743};
744
745void __init xenbus_ring_ops_init(void)
746{
747 if (!xen_feature(XENFEAT_auto_translated_physmap))
748 ring_ops = &ring_ops_pv;
749 else
750 ring_ops = &ring_ops_hvm;
751}
752