linux/drivers/xen/xenbus/xenbus_client.c
<<
>>
Prefs
   1/******************************************************************************
   2 * Client-facing interface for the Xenbus driver.  In other words, the
   3 * interface between the Xenbus and the device-specific code, be it the
   4 * frontend or the backend of that driver.
   5 *
   6 * Copyright (C) 2005 XenSource Ltd
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version 2
  10 * as published by the Free Software Foundation; or, when distributed
  11 * separately from the Linux kernel or incorporated into other
  12 * software packages, subject to the following license:
  13 *
  14 * Permission is hereby granted, free of charge, to any person obtaining a copy
  15 * of this source file (the "Software"), to deal in the Software without
  16 * restriction, including without limitation the rights to use, copy, modify,
  17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  18 * and to permit persons to whom the Software is furnished to do so, subject to
  19 * the following conditions:
  20 *
  21 * The above copyright notice and this permission notice shall be included in
  22 * all copies or substantial portions of the Software.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  30 * IN THE SOFTWARE.
  31 */
  32
  33#include <linux/mm.h>
  34#include <linux/slab.h>
  35#include <linux/types.h>
  36#include <linux/spinlock.h>
  37#include <linux/vmalloc.h>
  38#include <linux/export.h>
  39#include <asm/xen/hypervisor.h>
  40#include <asm/xen/page.h>
  41#include <xen/interface/xen.h>
  42#include <xen/interface/event_channel.h>
  43#include <xen/balloon.h>
  44#include <xen/events.h>
  45#include <xen/grant_table.h>
  46#include <xen/xenbus.h>
  47#include <xen/xen.h>
  48#include <xen/features.h>
  49
  50#include "xenbus_probe.h"
  51
  52struct xenbus_map_node {
  53        struct list_head next;
  54        union {
  55                struct vm_struct *area; /* PV */
  56                struct page *page;     /* HVM */
  57        };
  58        grant_handle_t handle;
  59};
  60
  61static DEFINE_SPINLOCK(xenbus_valloc_lock);
  62static LIST_HEAD(xenbus_valloc_pages);
  63
  64struct xenbus_ring_ops {
  65        int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
  66        int (*unmap)(struct xenbus_device *dev, void *vaddr);
  67};
  68
  69static const struct xenbus_ring_ops *ring_ops __read_mostly;
  70
  71const char *xenbus_strstate(enum xenbus_state state)
  72{
  73        static const char *const name[] = {
  74                [ XenbusStateUnknown      ] = "Unknown",
  75                [ XenbusStateInitialising ] = "Initialising",
  76                [ XenbusStateInitWait     ] = "InitWait",
  77                [ XenbusStateInitialised  ] = "Initialised",
  78                [ XenbusStateConnected    ] = "Connected",
  79                [ XenbusStateClosing      ] = "Closing",
  80                [ XenbusStateClosed       ] = "Closed",
  81                [XenbusStateReconfiguring] = "Reconfiguring",
  82                [XenbusStateReconfigured] = "Reconfigured",
  83        };
  84        return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
  85}
  86EXPORT_SYMBOL_GPL(xenbus_strstate);
  87
  88/**
  89 * xenbus_watch_path - register a watch
  90 * @dev: xenbus device
  91 * @path: path to watch
  92 * @watch: watch to register
  93 * @callback: callback to register
  94 *
  95 * Register a @watch on the given path, using the given xenbus_watch structure
  96 * for storage, and the given @callback function as the callback.  Return 0 on
  97 * success, or -errno on error.  On success, the given @path will be saved as
  98 * @watch->node, and remains the caller's to free.  On error, @watch->node will
  99 * be NULL, the device will switch to %XenbusStateClosing, and the error will
 100 * be saved in the store.
 101 */
 102int xenbus_watch_path(struct xenbus_device *dev, const char *path,
 103                      struct xenbus_watch *watch,
 104                      void (*callback)(struct xenbus_watch *,
 105                                       const char **, unsigned int))
 106{
 107        int err;
 108
 109        watch->node = path;
 110        watch->callback = callback;
 111
 112        err = register_xenbus_watch(watch);
 113
 114        if (err) {
 115                watch->node = NULL;
 116                watch->callback = NULL;
 117                xenbus_dev_fatal(dev, err, "adding watch on %s", path);
 118        }
 119
 120        return err;
 121}
 122EXPORT_SYMBOL_GPL(xenbus_watch_path);
 123
 124
 125/**
 126 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
 127 * @dev: xenbus device
 128 * @watch: watch to register
 129 * @callback: callback to register
 130 * @pathfmt: format of path to watch
 131 *
 132 * Register a watch on the given @path, using the given xenbus_watch
 133 * structure for storage, and the given @callback function as the callback.
 134 * Return 0 on success, or -errno on error.  On success, the watched path
 135 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
 136 * kfree().  On error, watch->node will be NULL, so the caller has nothing to
 137 * free, the device will switch to %XenbusStateClosing, and the error will be
 138 * saved in the store.
 139 */
 140int xenbus_watch_pathfmt(struct xenbus_device *dev,
 141                         struct xenbus_watch *watch,
 142                         void (*callback)(struct xenbus_watch *,
 143                                        const char **, unsigned int),
 144                         const char *pathfmt, ...)
 145{
 146        int err;
 147        va_list ap;
 148        char *path;
 149
 150        va_start(ap, pathfmt);
 151        path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
 152        va_end(ap);
 153
 154        if (!path) {
 155                xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
 156                return -ENOMEM;
 157        }
 158        err = xenbus_watch_path(dev, path, watch, callback);
 159
 160        if (err)
 161                kfree(path);
 162        return err;
 163}
 164EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
 165
 166static void xenbus_switch_fatal(struct xenbus_device *, int, int,
 167                                const char *, ...);
 168
 169static int
 170__xenbus_switch_state(struct xenbus_device *dev,
 171                      enum xenbus_state state, int depth)
 172{
 173        /* We check whether the state is currently set to the given value, and
 174           if not, then the state is set.  We don't want to unconditionally
 175           write the given state, because we don't want to fire watches
 176           unnecessarily.  Furthermore, if the node has gone, we don't write
 177           to it, as the device will be tearing down, and we don't want to
 178           resurrect that directory.
 179
 180           Note that, because of this cached value of our state, this
 181           function will not take a caller's Xenstore transaction
 182           (something it was trying to in the past) because dev->state
 183           would not get reset if the transaction was aborted.
 184         */
 185
 186        struct xenbus_transaction xbt;
 187        int current_state;
 188        int err, abort;
 189
 190        if (state == dev->state)
 191                return 0;
 192
 193again:
 194        abort = 1;
 195
 196        err = xenbus_transaction_start(&xbt);
 197        if (err) {
 198                xenbus_switch_fatal(dev, depth, err, "starting transaction");
 199                return 0;
 200        }
 201
 202        err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
 203        if (err != 1)
 204                goto abort;
 205
 206        err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
 207        if (err) {
 208                xenbus_switch_fatal(dev, depth, err, "writing new state");
 209                goto abort;
 210        }
 211
 212        abort = 0;
 213abort:
 214        err = xenbus_transaction_end(xbt, abort);
 215        if (err) {
 216                if (err == -EAGAIN && !abort)
 217                        goto again;
 218                xenbus_switch_fatal(dev, depth, err, "ending transaction");
 219        } else
 220                dev->state = state;
 221
 222        return 0;
 223}
 224
 225/**
 226 * xenbus_switch_state
 227 * @dev: xenbus device
 228 * @state: new state
 229 *
 230 * Advertise in the store a change of the given driver to the given new_state.
 231 * Return 0 on success, or -errno on error.  On error, the device will switch
 232 * to XenbusStateClosing, and the error will be saved in the store.
 233 */
 234int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
 235{
 236        return __xenbus_switch_state(dev, state, 0);
 237}
 238
 239EXPORT_SYMBOL_GPL(xenbus_switch_state);
 240
 241int xenbus_frontend_closed(struct xenbus_device *dev)
 242{
 243        xenbus_switch_state(dev, XenbusStateClosed);
 244        complete(&dev->down);
 245        return 0;
 246}
 247EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
 248
 249/**
 250 * Return the path to the error node for the given device, or NULL on failure.
 251 * If the value returned is non-NULL, then it is the caller's to kfree.
 252 */
 253static char *error_path(struct xenbus_device *dev)
 254{
 255        return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
 256}
 257
 258
 259static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
 260                                const char *fmt, va_list ap)
 261{
 262        int ret;
 263        unsigned int len;
 264        char *printf_buffer = NULL;
 265        char *path_buffer = NULL;
 266
 267#define PRINTF_BUFFER_SIZE 4096
 268        printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
 269        if (printf_buffer == NULL)
 270                goto fail;
 271
 272        len = sprintf(printf_buffer, "%i ", -err);
 273        ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
 274
 275        BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
 276
 277        dev_err(&dev->dev, "%s\n", printf_buffer);
 278
 279        path_buffer = error_path(dev);
 280
 281        if (path_buffer == NULL) {
 282                dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
 283                       dev->nodename, printf_buffer);
 284                goto fail;
 285        }
 286
 287        if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
 288                dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
 289                       dev->nodename, printf_buffer);
 290                goto fail;
 291        }
 292
 293fail:
 294        kfree(printf_buffer);
 295        kfree(path_buffer);
 296}
 297
 298
 299/**
 300 * xenbus_dev_error
 301 * @dev: xenbus device
 302 * @err: error to report
 303 * @fmt: error message format
 304 *
 305 * Report the given negative errno into the store, along with the given
 306 * formatted message.
 307 */
 308void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
 309{
 310        va_list ap;
 311
 312        va_start(ap, fmt);
 313        xenbus_va_dev_error(dev, err, fmt, ap);
 314        va_end(ap);
 315}
 316EXPORT_SYMBOL_GPL(xenbus_dev_error);
 317
 318/**
 319 * xenbus_dev_fatal
 320 * @dev: xenbus device
 321 * @err: error to report
 322 * @fmt: error message format
 323 *
 324 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
 325 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
 326 * closedown of this driver and its peer.
 327 */
 328
 329void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
 330{
 331        va_list ap;
 332
 333        va_start(ap, fmt);
 334        xenbus_va_dev_error(dev, err, fmt, ap);
 335        va_end(ap);
 336
 337        xenbus_switch_state(dev, XenbusStateClosing);
 338}
 339EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
 340
 341/**
 342 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
 343 * avoiding recursion within xenbus_switch_state.
 344 */
 345static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
 346                                const char *fmt, ...)
 347{
 348        va_list ap;
 349
 350        va_start(ap, fmt);
 351        xenbus_va_dev_error(dev, err, fmt, ap);
 352        va_end(ap);
 353
 354        if (!depth)
 355                __xenbus_switch_state(dev, XenbusStateClosing, 1);
 356}
 357
 358/**
 359 * xenbus_grant_ring
 360 * @dev: xenbus device
 361 * @ring_mfn: mfn of ring to grant
 362
 363 * Grant access to the given @ring_mfn to the peer of the given device.  Return
 364 * 0 on success, or -errno on error.  On error, the device will switch to
 365 * XenbusStateClosing, and the error will be saved in the store.
 366 */
 367int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
 368{
 369        int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
 370        if (err < 0)
 371                xenbus_dev_fatal(dev, err, "granting access to ring page");
 372        return err;
 373}
 374EXPORT_SYMBOL_GPL(xenbus_grant_ring);
 375
 376
 377/**
 378 * Allocate an event channel for the given xenbus_device, assigning the newly
 379 * created local port to *port.  Return 0 on success, or -errno on error.  On
 380 * error, the device will switch to XenbusStateClosing, and the error will be
 381 * saved in the store.
 382 */
 383int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
 384{
 385        struct evtchn_alloc_unbound alloc_unbound;
 386        int err;
 387
 388        alloc_unbound.dom = DOMID_SELF;
 389        alloc_unbound.remote_dom = dev->otherend_id;
 390
 391        err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
 392                                          &alloc_unbound);
 393        if (err)
 394                xenbus_dev_fatal(dev, err, "allocating event channel");
 395        else
 396                *port = alloc_unbound.port;
 397
 398        return err;
 399}
 400EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
 401
 402
 403/**
 404 * Bind to an existing interdomain event channel in another domain. Returns 0
 405 * on success and stores the local port in *port. On error, returns -errno,
 406 * switches the device to XenbusStateClosing, and saves the error in XenStore.
 407 */
 408int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
 409{
 410        struct evtchn_bind_interdomain bind_interdomain;
 411        int err;
 412
 413        bind_interdomain.remote_dom = dev->otherend_id;
 414        bind_interdomain.remote_port = remote_port;
 415
 416        err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
 417                                          &bind_interdomain);
 418        if (err)
 419                xenbus_dev_fatal(dev, err,
 420                                 "binding to event channel %d from domain %d",
 421                                 remote_port, dev->otherend_id);
 422        else
 423                *port = bind_interdomain.local_port;
 424
 425        return err;
 426}
 427EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
 428
 429
 430/**
 431 * Free an existing event channel. Returns 0 on success or -errno on error.
 432 */
 433int xenbus_free_evtchn(struct xenbus_device *dev, int port)
 434{
 435        struct evtchn_close close;
 436        int err;
 437
 438        close.port = port;
 439
 440        err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
 441        if (err)
 442                xenbus_dev_error(dev, err, "freeing event channel %d", port);
 443
 444        return err;
 445}
 446EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 447
 448
 449/**
 450 * xenbus_map_ring_valloc
 451 * @dev: xenbus device
 452 * @gnt_ref: grant reference
 453 * @vaddr: pointer to address to be filled out by mapping
 454 *
 455 * Based on Rusty Russell's skeleton driver's map_page.
 456 * Map a page of memory into this domain from another domain's grant table.
 457 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
 458 * page to that address, and sets *vaddr to that address.
 459 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 460 * or -ENOMEM on error. If an error is returned, device will switch to
 461 * XenbusStateClosing and the error message will be saved in XenStore.
 462 */
 463int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 464{
 465        return ring_ops->map(dev, gnt_ref, vaddr);
 466}
 467EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
 468
 469static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
 470                                     int gnt_ref, void **vaddr)
 471{
 472        struct gnttab_map_grant_ref op = {
 473                .flags = GNTMAP_host_map | GNTMAP_contains_pte,
 474                .ref   = gnt_ref,
 475                .dom   = dev->otherend_id,
 476        };
 477        struct xenbus_map_node *node;
 478        struct vm_struct *area;
 479        pte_t *pte;
 480
 481        *vaddr = NULL;
 482
 483        node = kzalloc(sizeof(*node), GFP_KERNEL);
 484        if (!node)
 485                return -ENOMEM;
 486
 487        area = alloc_vm_area(PAGE_SIZE, &pte);
 488        if (!area) {
 489                kfree(node);
 490                return -ENOMEM;
 491        }
 492
 493        op.host_addr = arbitrary_virt_to_machine(pte).maddr;
 494
 495        gnttab_batch_map(&op, 1);
 496
 497        if (op.status != GNTST_okay) {
 498                free_vm_area(area);
 499                kfree(node);
 500                xenbus_dev_fatal(dev, op.status,
 501                                 "mapping in shared page %d from domain %d",
 502                                 gnt_ref, dev->otherend_id);
 503                return op.status;
 504        }
 505
 506        node->handle = op.handle;
 507        node->area = area;
 508
 509        spin_lock(&xenbus_valloc_lock);
 510        list_add(&node->next, &xenbus_valloc_pages);
 511        spin_unlock(&xenbus_valloc_lock);
 512
 513        *vaddr = area->addr;
 514        return 0;
 515}
 516
 517static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 518                                      int gnt_ref, void **vaddr)
 519{
 520        struct xenbus_map_node *node;
 521        int err;
 522        void *addr;
 523
 524        *vaddr = NULL;
 525
 526        node = kzalloc(sizeof(*node), GFP_KERNEL);
 527        if (!node)
 528                return -ENOMEM;
 529
 530        err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
 531        if (err)
 532                goto out_err;
 533
 534        addr = pfn_to_kaddr(page_to_pfn(node->page));
 535
 536        err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
 537        if (err)
 538                goto out_err_free_ballooned_pages;
 539
 540        spin_lock(&xenbus_valloc_lock);
 541        list_add(&node->next, &xenbus_valloc_pages);
 542        spin_unlock(&xenbus_valloc_lock);
 543
 544        *vaddr = addr;
 545        return 0;
 546
 547 out_err_free_ballooned_pages:
 548        free_xenballooned_pages(1, &node->page);
 549 out_err:
 550        kfree(node);
 551        return err;
 552}
 553
 554
 555/**
 556 * xenbus_map_ring
 557 * @dev: xenbus device
 558 * @gnt_ref: grant reference
 559 * @handle: pointer to grant handle to be filled
 560 * @vaddr: address to be mapped to
 561 *
 562 * Map a page of memory into this domain from another domain's grant table.
 563 * xenbus_map_ring does not allocate the virtual address space (you must do
 564 * this yourself!). It only maps in the page to the specified address.
 565 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 566 * or -ENOMEM on error. If an error is returned, device will switch to
 567 * XenbusStateClosing and the error message will be saved in XenStore.
 568 */
 569int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
 570                    grant_handle_t *handle, void *vaddr)
 571{
 572        struct gnttab_map_grant_ref op;
 573
 574        gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
 575                          dev->otherend_id);
 576
 577        gnttab_batch_map(&op, 1);
 578
 579        if (op.status != GNTST_okay) {
 580                xenbus_dev_fatal(dev, op.status,
 581                                 "mapping in shared page %d from domain %d",
 582                                 gnt_ref, dev->otherend_id);
 583        } else
 584                *handle = op.handle;
 585
 586        return op.status;
 587}
 588EXPORT_SYMBOL_GPL(xenbus_map_ring);
 589
 590
 591/**
 592 * xenbus_unmap_ring_vfree
 593 * @dev: xenbus device
 594 * @vaddr: addr to unmap
 595 *
 596 * Based on Rusty Russell's skeleton driver's unmap_page.
 597 * Unmap a page of memory in this domain that was imported from another domain.
 598 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
 599 * xenbus_map_ring_valloc (it will free the virtual address space).
 600 * Returns 0 on success and returns GNTST_* on error
 601 * (see xen/include/interface/grant_table.h).
 602 */
 603int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 604{
 605        return ring_ops->unmap(dev, vaddr);
 606}
 607EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 608
 609static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 610{
 611        struct xenbus_map_node *node;
 612        struct gnttab_unmap_grant_ref op = {
 613                .host_addr = (unsigned long)vaddr,
 614        };
 615        unsigned int level;
 616
 617        spin_lock(&xenbus_valloc_lock);
 618        list_for_each_entry(node, &xenbus_valloc_pages, next) {
 619                if (node->area->addr == vaddr) {
 620                        list_del(&node->next);
 621                        goto found;
 622                }
 623        }
 624        node = NULL;
 625 found:
 626        spin_unlock(&xenbus_valloc_lock);
 627
 628        if (!node) {
 629                xenbus_dev_error(dev, -ENOENT,
 630                                 "can't find mapped virtual address %p", vaddr);
 631                return GNTST_bad_virt_addr;
 632        }
 633
 634        op.handle = node->handle;
 635        op.host_addr = arbitrary_virt_to_machine(
 636                lookup_address((unsigned long)vaddr, &level)).maddr;
 637
 638        if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 639                BUG();
 640
 641        if (op.status == GNTST_okay)
 642                free_vm_area(node->area);
 643        else
 644                xenbus_dev_error(dev, op.status,
 645                                 "unmapping page at handle %d error %d",
 646                                 node->handle, op.status);
 647
 648        kfree(node);
 649        return op.status;
 650}
 651
 652static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 653{
 654        int rv;
 655        struct xenbus_map_node *node;
 656        void *addr;
 657
 658        spin_lock(&xenbus_valloc_lock);
 659        list_for_each_entry(node, &xenbus_valloc_pages, next) {
 660                addr = pfn_to_kaddr(page_to_pfn(node->page));
 661                if (addr == vaddr) {
 662                        list_del(&node->next);
 663                        goto found;
 664                }
 665        }
 666        node = addr = NULL;
 667 found:
 668        spin_unlock(&xenbus_valloc_lock);
 669
 670        if (!node) {
 671                xenbus_dev_error(dev, -ENOENT,
 672                                 "can't find mapped virtual address %p", vaddr);
 673                return GNTST_bad_virt_addr;
 674        }
 675
 676        rv = xenbus_unmap_ring(dev, node->handle, addr);
 677
 678        if (!rv)
 679                free_xenballooned_pages(1, &node->page);
 680        else
 681                WARN(1, "Leaking %p\n", vaddr);
 682
 683        kfree(node);
 684        return rv;
 685}
 686
 687/**
 688 * xenbus_unmap_ring
 689 * @dev: xenbus device
 690 * @handle: grant handle
 691 * @vaddr: addr to unmap
 692 *
 693 * Unmap a page of memory in this domain that was imported from another domain.
 694 * Returns 0 on success and returns GNTST_* on error
 695 * (see xen/include/interface/grant_table.h).
 696 */
 697int xenbus_unmap_ring(struct xenbus_device *dev,
 698                      grant_handle_t handle, void *vaddr)
 699{
 700        struct gnttab_unmap_grant_ref op;
 701
 702        gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
 703
 704        if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 705                BUG();
 706
 707        if (op.status != GNTST_okay)
 708                xenbus_dev_error(dev, op.status,
 709                                 "unmapping page at handle %d error %d",
 710                                 handle, op.status);
 711
 712        return op.status;
 713}
 714EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
 715
 716
 717/**
 718 * xenbus_read_driver_state
 719 * @path: path for driver
 720 *
 721 * Return the state of the driver rooted at the given store path, or
 722 * XenbusStateUnknown if no state can be read.
 723 */
 724enum xenbus_state xenbus_read_driver_state(const char *path)
 725{
 726        enum xenbus_state result;
 727        int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
 728        if (err)
 729                result = XenbusStateUnknown;
 730
 731        return result;
 732}
 733EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 734
 735static const struct xenbus_ring_ops ring_ops_pv = {
 736        .map = xenbus_map_ring_valloc_pv,
 737        .unmap = xenbus_unmap_ring_vfree_pv,
 738};
 739
 740static const struct xenbus_ring_ops ring_ops_hvm = {
 741        .map = xenbus_map_ring_valloc_hvm,
 742        .unmap = xenbus_unmap_ring_vfree_hvm,
 743};
 744
 745void __init xenbus_ring_ops_init(void)
 746{
 747        if (!xen_feature(XENFEAT_auto_translated_physmap))
 748                ring_ops = &ring_ops_pv;
 749        else
 750                ring_ops = &ring_ops_hvm;
 751}
 752