1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include "qemu/uuid.h"
14#include "qapi/error.h"
15
16#include "hw/xen/xen_native.h"
17#include "hw/xen/xen_backend_ops.h"
18
19
20
21
22
23
24#undef XC_WANT_COMPAT_EVTCHN_API
25#undef XC_WANT_COMPAT_GNTTAB_API
26#undef XC_WANT_COMPAT_MAP_FOREIGN_API
27
28#include <xenctrl.h>
29
30
31
32
33
34
35#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
36
37typedef xc_evtchn xenevtchn_handle;
38typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
39
40#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
41#define xenevtchn_close(h) xc_evtchn_close(h)
42#define xenevtchn_fd(h) xc_evtchn_fd(h)
43#define xenevtchn_pending(h) xc_evtchn_pending(h)
44#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
45#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
46#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
47#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
48
49typedef xc_gnttab xengnttab_handle;
50
51#define xengnttab_open(l, f) xc_gnttab_open(l, f)
52#define xengnttab_close(h) xc_gnttab_close(h)
53#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
54#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
55#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
56#define xengnttab_map_grant_refs(h, c, d, r, p) \
57 xc_gnttab_map_grant_refs(h, c, d, r, p)
58#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
59 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
60
61typedef xc_interface xenforeignmemory_handle;
62
63#else
64
65#include <xenevtchn.h>
66#include <xengnttab.h>
67#include <xenforeignmemory.h>
68
69#endif
70
71
72
73static int libxengnttab_fallback_grant_copy(xengnttab_handle *xgt,
74 bool to_domain, uint32_t domid,
75 XenGrantCopySegment segs[],
76 unsigned int nr_segs, Error **errp)
77{
78 uint32_t *refs = g_new(uint32_t, nr_segs);
79 int prot = to_domain ? PROT_WRITE : PROT_READ;
80 void *map;
81 unsigned int i;
82 int rc = 0;
83
84 for (i = 0; i < nr_segs; i++) {
85 XenGrantCopySegment *seg = &segs[i];
86
87 refs[i] = to_domain ? seg->dest.foreign.ref :
88 seg->source.foreign.ref;
89 }
90 map = xengnttab_map_domain_grant_refs(xgt, nr_segs, domid, refs, prot);
91 if (!map) {
92 if (errp) {
93 error_setg_errno(errp, errno,
94 "xengnttab_map_domain_grant_refs failed");
95 }
96 rc = -errno;
97 goto done;
98 }
99
100 for (i = 0; i < nr_segs; i++) {
101 XenGrantCopySegment *seg = &segs[i];
102 void *page = map + (i * XEN_PAGE_SIZE);
103
104 if (to_domain) {
105 memcpy(page + seg->dest.foreign.offset, seg->source.virt,
106 seg->len);
107 } else {
108 memcpy(seg->dest.virt, page + seg->source.foreign.offset,
109 seg->len);
110 }
111 }
112
113 if (xengnttab_unmap(xgt, map, nr_segs)) {
114 if (errp) {
115 error_setg_errno(errp, errno, "xengnttab_unmap failed");
116 }
117 rc = -errno;
118 }
119
120done:
121 g_free(refs);
122 return rc;
123}
124
125#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
126
127static int libxengnttab_backend_grant_copy(xengnttab_handle *xgt,
128 bool to_domain, uint32_t domid,
129 XenGrantCopySegment *segs,
130 uint32_t nr_segs, Error **errp)
131{
132 xengnttab_grant_copy_segment_t *xengnttab_segs;
133 unsigned int i;
134 int rc;
135
136 xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
137
138 for (i = 0; i < nr_segs; i++) {
139 XenGrantCopySegment *seg = &segs[i];
140 xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
141
142 if (to_domain) {
143 xengnttab_seg->flags = GNTCOPY_dest_gref;
144 xengnttab_seg->dest.foreign.domid = domid;
145 xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
146 xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
147 xengnttab_seg->source.virt = seg->source.virt;
148 } else {
149 xengnttab_seg->flags = GNTCOPY_source_gref;
150 xengnttab_seg->source.foreign.domid = domid;
151 xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
152 xengnttab_seg->source.foreign.offset =
153 seg->source.foreign.offset;
154 xengnttab_seg->dest.virt = seg->dest.virt;
155 }
156
157 xengnttab_seg->len = seg->len;
158 }
159
160 if (xengnttab_grant_copy(xgt, nr_segs, xengnttab_segs)) {
161 if (errp) {
162 error_setg_errno(errp, errno, "xengnttab_grant_copy failed");
163 }
164 rc = -errno;
165 goto done;
166 }
167
168 rc = 0;
169 for (i = 0; i < nr_segs; i++) {
170 xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
171
172 if (xengnttab_seg->status != GNTST_okay) {
173 if (errp) {
174 error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i);
175 }
176 rc = -EIO;
177 break;
178 }
179 }
180
181done:
182 g_free(xengnttab_segs);
183 return rc;
184}
185#endif
186
187static xenevtchn_handle *libxenevtchn_backend_open(void)
188{
189 return xenevtchn_open(NULL, 0);
190}
191
192struct evtchn_backend_ops libxenevtchn_backend_ops = {
193 .open = libxenevtchn_backend_open,
194 .close = xenevtchn_close,
195 .bind_interdomain = xenevtchn_bind_interdomain,
196 .unbind = xenevtchn_unbind,
197 .get_fd = xenevtchn_fd,
198 .notify = xenevtchn_notify,
199 .unmask = xenevtchn_unmask,
200 .pending = xenevtchn_pending,
201};
202
203static xengnttab_handle *libxengnttab_backend_open(void)
204{
205 return xengnttab_open(NULL, 0);
206}
207
208static int libxengnttab_backend_unmap(xengnttab_handle *xgt,
209 void *start_address, uint32_t *refs,
210 uint32_t count)
211{
212 return xengnttab_unmap(xgt, start_address, count);
213}
214
215
216static struct gnttab_backend_ops libxengnttab_backend_ops = {
217 .features = XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE,
218 .open = libxengnttab_backend_open,
219 .close = xengnttab_close,
220 .grant_copy = libxengnttab_fallback_grant_copy,
221 .set_max_grants = xengnttab_set_max_grants,
222 .map_refs = xengnttab_map_domain_grant_refs,
223 .unmap = libxengnttab_backend_unmap,
224};
225
226#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
227
228static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
229 size_t pages, xfn_pfn_t *pfns,
230 int *errs)
231{
232 if (errs) {
233 return xc_map_foreign_bulk(xen_xc, dom, prot, pfns, errs, pages);
234 } else {
235 return xc_map_foreign_pages(xen_xc, dom, prot, pfns, pages);
236 }
237}
238
239static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
240{
241 return munmap(addr, pages * XC_PAGE_SIZE);
242}
243
244#else
245
246static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
247 size_t pages, xen_pfn_t *pfns,
248 int *errs)
249{
250 return xenforeignmemory_map2(xen_fmem, dom, addr, prot, 0, pages, pfns,
251 errs);
252}
253
254static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
255{
256 return xenforeignmemory_unmap(xen_fmem, addr, pages);
257}
258
259#endif
260
261struct foreignmem_backend_ops libxenforeignmem_backend_ops = {
262 .map = libxenforeignmem_backend_map,
263 .unmap = libxenforeignmem_backend_unmap,
264};
265
266struct qemu_xs_handle {
267 struct xs_handle *xsh;
268 NotifierList notifiers;
269};
270
271static void watch_event(void *opaque)
272{
273 struct qemu_xs_handle *h = opaque;
274
275 for (;;) {
276 char **v = xs_check_watch(h->xsh);
277
278 if (!v) {
279 break;
280 }
281
282 notifier_list_notify(&h->notifiers, v);
283 free(v);
284 }
285}
286
287static struct qemu_xs_handle *libxenstore_open(void)
288{
289 struct xs_handle *xsh = xs_open(0);
290 struct qemu_xs_handle *h;
291
292 if (!xsh) {
293 return NULL;
294 }
295
296 h = g_new0(struct qemu_xs_handle, 1);
297 h->xsh = xsh;
298
299 notifier_list_init(&h->notifiers);
300 qemu_set_fd_handler(xs_fileno(h->xsh), watch_event, NULL, h);
301
302 return h;
303}
304
305static void libxenstore_close(struct qemu_xs_handle *h)
306{
307 g_assert(notifier_list_empty(&h->notifiers));
308 qemu_set_fd_handler(xs_fileno(h->xsh), NULL, NULL, NULL);
309 xs_close(h->xsh);
310 g_free(h);
311}
312
313static char *libxenstore_get_domain_path(struct qemu_xs_handle *h,
314 unsigned int domid)
315{
316 return xs_get_domain_path(h->xsh, domid);
317}
318
319static char **libxenstore_directory(struct qemu_xs_handle *h,
320 xs_transaction_t t, const char *path,
321 unsigned int *num)
322{
323 return xs_directory(h->xsh, t, path, num);
324}
325
326static void *libxenstore_read(struct qemu_xs_handle *h, xs_transaction_t t,
327 const char *path, unsigned int *len)
328{
329 return xs_read(h->xsh, t, path, len);
330}
331
332static bool libxenstore_write(struct qemu_xs_handle *h, xs_transaction_t t,
333 const char *path, const void *data,
334 unsigned int len)
335{
336 return xs_write(h->xsh, t, path, data, len);
337}
338
339static bool libxenstore_create(struct qemu_xs_handle *h, xs_transaction_t t,
340 unsigned int owner, unsigned int domid,
341 unsigned int perms, const char *path)
342{
343 struct xs_permissions perms_list[] = {
344 {
345 .id = owner,
346 .perms = XS_PERM_NONE,
347 },
348 {
349 .id = domid,
350 .perms = perms,
351 },
352 };
353
354 if (!xs_mkdir(h->xsh, t, path)) {
355 return false;
356 }
357
358 return xs_set_permissions(h->xsh, t, path, perms_list,
359 ARRAY_SIZE(perms_list));
360}
361
362static bool libxenstore_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
363 const char *path)
364{
365 return xs_rm(h->xsh, t, path);
366}
367
368struct qemu_xs_watch {
369 char *path;
370 char *token;
371 xs_watch_fn fn;
372 void *opaque;
373 Notifier notifier;
374};
375
376static void watch_notify(Notifier *n, void *data)
377{
378 struct qemu_xs_watch *w = container_of(n, struct qemu_xs_watch, notifier);
379 const char **v = data;
380
381 if (!strcmp(w->token, v[XS_WATCH_TOKEN])) {
382 w->fn(w->opaque, v[XS_WATCH_PATH]);
383 }
384}
385
386static struct qemu_xs_watch *new_watch(const char *path, xs_watch_fn fn,
387 void *opaque)
388{
389 struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
390 QemuUUID uuid;
391
392 qemu_uuid_generate(&uuid);
393
394 w->token = qemu_uuid_unparse_strdup(&uuid);
395 w->path = g_strdup(path);
396 w->fn = fn;
397 w->opaque = opaque;
398 w->notifier.notify = watch_notify;
399
400 return w;
401}
402
403static void free_watch(struct qemu_xs_watch *w)
404{
405 g_free(w->token);
406 g_free(w->path);
407
408 g_free(w);
409}
410
411static struct qemu_xs_watch *libxenstore_watch(struct qemu_xs_handle *h,
412 const char *path, xs_watch_fn fn,
413 void *opaque)
414{
415 struct qemu_xs_watch *w = new_watch(path, fn, opaque);
416
417 notifier_list_add(&h->notifiers, &w->notifier);
418
419 if (!xs_watch(h->xsh, path, w->token)) {
420 notifier_remove(&w->notifier);
421 free_watch(w);
422 return NULL;
423 }
424
425 return w;
426}
427
428static void libxenstore_unwatch(struct qemu_xs_handle *h,
429 struct qemu_xs_watch *w)
430{
431 xs_unwatch(h->xsh, w->path, w->token);
432 notifier_remove(&w->notifier);
433 free_watch(w);
434}
435
436static xs_transaction_t libxenstore_transaction_start(struct qemu_xs_handle *h)
437{
438 return xs_transaction_start(h->xsh);
439}
440
441static bool libxenstore_transaction_end(struct qemu_xs_handle *h,
442 xs_transaction_t t, bool abort)
443{
444 return xs_transaction_end(h->xsh, t, abort);
445}
446
447struct xenstore_backend_ops libxenstore_backend_ops = {
448 .open = libxenstore_open,
449 .close = libxenstore_close,
450 .get_domain_path = libxenstore_get_domain_path,
451 .directory = libxenstore_directory,
452 .read = libxenstore_read,
453 .write = libxenstore_write,
454 .create = libxenstore_create,
455 .destroy = libxenstore_destroy,
456 .watch = libxenstore_watch,
457 .unwatch = libxenstore_unwatch,
458 .transaction_start = libxenstore_transaction_start,
459 .transaction_end = libxenstore_transaction_end,
460};
461
462void setup_xen_backend_ops(void)
463{
464#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
465 xengnttab_handle *xgt = xengnttab_open(NULL, 0);
466
467 if (xgt) {
468 if (xengnttab_grant_copy(xgt, 0, NULL) == 0) {
469 libxengnttab_backend_ops.grant_copy = libxengnttab_backend_grant_copy;
470 }
471 xengnttab_close(xgt);
472 }
473#endif
474 xen_evtchn_ops = &libxenevtchn_backend_ops;
475 xen_gnttab_ops = &libxengnttab_backend_ops;
476 xen_foreignmem_ops = &libxenforeignmem_backend_ops;
477 xen_xenstore_ops = &libxenstore_backend_ops;
478}
479