1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <linux/vmalloc.h>
35#include <asm/xen/hypervisor.h>
36#include <xen/interface/xen.h>
37#include <xen/interface/event_channel.h>
38#include <xen/events.h>
39#include <xen/grant_table.h>
40#include <xen/xenbus.h>
41
42const char *xenbus_strstate(enum xenbus_state state)
43{
44 static const char *const name[] = {
45 [ XenbusStateUnknown ] = "Unknown",
46 [ XenbusStateInitialising ] = "Initialising",
47 [ XenbusStateInitWait ] = "InitWait",
48 [ XenbusStateInitialised ] = "Initialised",
49 [ XenbusStateConnected ] = "Connected",
50 [ XenbusStateClosing ] = "Closing",
51 [ XenbusStateClosed ] = "Closed",
52 };
53 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
54}
55EXPORT_SYMBOL_GPL(xenbus_strstate);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71int xenbus_watch_path(struct xenbus_device *dev, const char *path,
72 struct xenbus_watch *watch,
73 void (*callback)(struct xenbus_watch *,
74 const char **, unsigned int))
75{
76 int err;
77
78 watch->node = path;
79 watch->callback = callback;
80
81 err = register_xenbus_watch(watch);
82
83 if (err) {
84 watch->node = NULL;
85 watch->callback = NULL;
86 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
87 }
88
89 return err;
90}
91EXPORT_SYMBOL_GPL(xenbus_watch_path);
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109int xenbus_watch_pathfmt(struct xenbus_device *dev,
110 struct xenbus_watch *watch,
111 void (*callback)(struct xenbus_watch *,
112 const char **, unsigned int),
113 const char *pathfmt, ...)
114{
115 int err;
116 va_list ap;
117 char *path;
118
119 va_start(ap, pathfmt);
120 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
121 va_end(ap);
122
123 if (!path) {
124 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
125 return -ENOMEM;
126 }
127 err = xenbus_watch_path(dev, path, watch, callback);
128
129 if (err)
130 kfree(path);
131 return err;
132}
133EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
134
135
136
137
138
139
140
141
142
143
144
145int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
146{
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161 int current_state;
162 int err;
163
164 if (state == dev->state)
165 return 0;
166
167 err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
168 ¤t_state);
169 if (err != 1)
170 return 0;
171
172 err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
173 if (err) {
174 if (state != XenbusStateClosing)
175 xenbus_dev_fatal(dev, err, "writing new state");
176 return err;
177 }
178
179 dev->state = state;
180
181 return 0;
182}
183EXPORT_SYMBOL_GPL(xenbus_switch_state);
184
185int xenbus_frontend_closed(struct xenbus_device *dev)
186{
187 xenbus_switch_state(dev, XenbusStateClosed);
188 complete(&dev->down);
189 return 0;
190}
191EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
192
193
194
195
196
197static char *error_path(struct xenbus_device *dev)
198{
199 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
200}
201
202
203static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
204 const char *fmt, va_list ap)
205{
206 int ret;
207 unsigned int len;
208 char *printf_buffer = NULL;
209 char *path_buffer = NULL;
210
211#define PRINTF_BUFFER_SIZE 4096
212 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
213 if (printf_buffer == NULL)
214 goto fail;
215
216 len = sprintf(printf_buffer, "%i ", -err);
217 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
218
219 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
220
221 dev_err(&dev->dev, "%s\n", printf_buffer);
222
223 path_buffer = error_path(dev);
224
225 if (path_buffer == NULL) {
226 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
227 dev->nodename, printf_buffer);
228 goto fail;
229 }
230
231 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
232 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
233 dev->nodename, printf_buffer);
234 goto fail;
235 }
236
237fail:
238 kfree(printf_buffer);
239 kfree(path_buffer);
240}
241
242
243
244
245
246
247
248
249
250
251
252void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
253{
254 va_list ap;
255
256 va_start(ap, fmt);
257 xenbus_va_dev_error(dev, err, fmt, ap);
258 va_end(ap);
259}
260EXPORT_SYMBOL_GPL(xenbus_dev_error);
261
262
263
264
265
266
267
268
269
270
271
272
273void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
274{
275 va_list ap;
276
277 va_start(ap, fmt);
278 xenbus_va_dev_error(dev, err, fmt, ap);
279 va_end(ap);
280
281 xenbus_switch_state(dev, XenbusStateClosing);
282}
283EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
284
285
286
287
288
289
290
291
292
293
294int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
295{
296 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
297 if (err < 0)
298 xenbus_dev_fatal(dev, err, "granting access to ring page");
299 return err;
300}
301EXPORT_SYMBOL_GPL(xenbus_grant_ring);
302
303
304
305
306
307
308
309
310int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
311{
312 struct evtchn_alloc_unbound alloc_unbound;
313 int err;
314
315 alloc_unbound.dom = DOMID_SELF;
316 alloc_unbound.remote_dom = dev->otherend_id;
317
318 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
319 &alloc_unbound);
320 if (err)
321 xenbus_dev_fatal(dev, err, "allocating event channel");
322 else
323 *port = alloc_unbound.port;
324
325 return err;
326}
327EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
328
329
330
331
332
333
334
335int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
336{
337 struct evtchn_bind_interdomain bind_interdomain;
338 int err;
339
340 bind_interdomain.remote_dom = dev->otherend_id;
341 bind_interdomain.remote_port = remote_port;
342
343 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
344 &bind_interdomain);
345 if (err)
346 xenbus_dev_fatal(dev, err,
347 "binding to event channel %d from domain %d",
348 remote_port, dev->otherend_id);
349 else
350 *port = bind_interdomain.local_port;
351
352 return err;
353}
354EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
355
356
357
358
359
360int xenbus_free_evtchn(struct xenbus_device *dev, int port)
361{
362 struct evtchn_close close;
363 int err;
364
365 close.port = port;
366
367 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
368 if (err)
369 xenbus_dev_error(dev, err, "freeing event channel %d", port);
370
371 return err;
372}
373EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
391{
392 struct gnttab_map_grant_ref op = {
393 .flags = GNTMAP_host_map,
394 .ref = gnt_ref,
395 .dom = dev->otherend_id,
396 };
397 struct vm_struct *area;
398
399 *vaddr = NULL;
400
401 area = xen_alloc_vm_area(PAGE_SIZE);
402 if (!area)
403 return -ENOMEM;
404
405 op.host_addr = (unsigned long)area->addr;
406
407 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
408 BUG();
409
410 if (op.status != GNTST_okay) {
411 xen_free_vm_area(area);
412 xenbus_dev_fatal(dev, op.status,
413 "mapping in shared page %d from domain %d",
414 gnt_ref, dev->otherend_id);
415 return op.status;
416 }
417
418
419 area->phys_addr = (unsigned long)op.handle;
420
421 *vaddr = area->addr;
422 return 0;
423}
424EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
442 grant_handle_t *handle, void *vaddr)
443{
444 struct gnttab_map_grant_ref op = {
445 .host_addr = (unsigned long)vaddr,
446 .flags = GNTMAP_host_map,
447 .ref = gnt_ref,
448 .dom = dev->otherend_id,
449 };
450
451 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
452 BUG();
453
454 if (op.status != GNTST_okay) {
455 xenbus_dev_fatal(dev, op.status,
456 "mapping in shared page %d from domain %d",
457 gnt_ref, dev->otherend_id);
458 } else
459 *handle = op.handle;
460
461 return op.status;
462}
463EXPORT_SYMBOL_GPL(xenbus_map_ring);
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
479{
480 struct vm_struct *area;
481 struct gnttab_unmap_grant_ref op = {
482 .host_addr = (unsigned long)vaddr,
483 };
484
485
486
487
488
489
490
491 read_lock(&vmlist_lock);
492 for (area = vmlist; area != NULL; area = area->next) {
493 if (area->addr == vaddr)
494 break;
495 }
496 read_unlock(&vmlist_lock);
497
498 if (!area) {
499 xenbus_dev_error(dev, -ENOENT,
500 "can't find mapped virtual address %p", vaddr);
501 return GNTST_bad_virt_addr;
502 }
503
504 op.handle = (grant_handle_t)area->phys_addr;
505
506 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
507 BUG();
508
509 if (op.status == GNTST_okay)
510 xen_free_vm_area(area);
511 else
512 xenbus_dev_error(dev, op.status,
513 "unmapping page at handle %d error %d",
514 (int16_t)area->phys_addr, op.status);
515
516 return op.status;
517}
518EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
519
520
521
522
523
524
525
526
527
528
529
530
531int xenbus_unmap_ring(struct xenbus_device *dev,
532 grant_handle_t handle, void *vaddr)
533{
534 struct gnttab_unmap_grant_ref op = {
535 .host_addr = (unsigned long)vaddr,
536 .handle = handle,
537 };
538
539 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
540 BUG();
541
542 if (op.status != GNTST_okay)
543 xenbus_dev_error(dev, op.status,
544 "unmapping page at handle %d error %d",
545 handle, op.status);
546
547 return op.status;
548}
549EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
550
551
552
553
554
555
556
557
558
559enum xenbus_state xenbus_read_driver_state(const char *path)
560{
561 enum xenbus_state result;
562 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
563 if (err)
564 result = XenbusStateUnknown;
565
566 return result;
567}
568EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
569