linux/drivers/rpmsg/virtio_rpmsg_bus.c
<<
>>
Prefs
   1/*
   2 * Virtio-based remote processor messaging bus
   3 *
   4 * Copyright (C) 2011 Texas Instruments, Inc.
   5 * Copyright (C) 2011 Google, Inc.
   6 *
   7 * Ohad Ben-Cohen <ohad@wizery.com>
   8 * Brian Swetland <swetland@google.com>
   9 *
  10 * This software is licensed under the terms of the GNU General Public
  11 * License version 2, as published by the Free Software Foundation, and
  12 * may be copied, distributed, and modified under those terms.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#define pr_fmt(fmt) "%s: " fmt, __func__
  21
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/virtio.h>
  25#include <linux/virtio_ids.h>
  26#include <linux/virtio_config.h>
  27#include <linux/scatterlist.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/slab.h>
  30#include <linux/idr.h>
  31#include <linux/jiffies.h>
  32#include <linux/sched.h>
  33#include <linux/wait.h>
  34#include <linux/rpmsg.h>
  35#include <linux/mutex.h>
  36
  37/**
  38 * struct virtproc_info - virtual remote processor state
  39 * @vdev:       the virtio device
  40 * @rvq:        rx virtqueue
  41 * @svq:        tx virtqueue
  42 * @rbufs:      kernel address of rx buffers
  43 * @sbufs:      kernel address of tx buffers
  44 * @num_bufs:   total number of buffers for rx and tx
  45 * @last_sbuf:  index of last tx buffer used
  46 * @bufs_dma:   dma base addr of the buffers
  47 * @tx_lock:    protects svq, sbufs and sleepers, to allow concurrent senders.
  48 *              sending a message might require waking up a dozing remote
  49 *              processor, which involves sleeping, hence the mutex.
  50 * @endpoints:  idr of local endpoints, allows fast retrieval
  51 * @endpoints_lock: lock of the endpoints set
  52 * @sendq:      wait queue of sending contexts waiting for a tx buffers
  53 * @sleepers:   number of senders that are waiting for a tx buffer
  54 * @ns_ept:     the bus's name service endpoint
  55 *
  56 * This structure stores the rpmsg state of a given virtio remote processor
  57 * device (there might be several virtio proc devices for each physical
  58 * remote processor).
  59 */
  60struct virtproc_info {
  61        struct virtio_device *vdev;
  62        struct virtqueue *rvq, *svq;
  63        void *rbufs, *sbufs;
  64        unsigned int num_bufs;
  65        int last_sbuf;
  66        dma_addr_t bufs_dma;
  67        struct mutex tx_lock;
  68        struct idr endpoints;
  69        struct mutex endpoints_lock;
  70        wait_queue_head_t sendq;
  71        atomic_t sleepers;
  72        struct rpmsg_endpoint *ns_ept;
  73};
  74
  75/**
  76 * struct rpmsg_channel_info - internal channel info representation
  77 * @name: name of service
  78 * @src: local address
  79 * @dst: destination address
  80 */
  81struct rpmsg_channel_info {
  82        char name[RPMSG_NAME_SIZE];
  83        u32 src;
  84        u32 dst;
  85};
  86
  87#define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev)
  88#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
  89
  90/*
  91 * We're allocating buffers of 512 bytes each for communications. The
  92 * number of buffers will be computed from the number of buffers supported
  93 * by the vring, upto a maximum of 512 buffers (256 in each direction).
  94 *
  95 * Each buffer will have 16 bytes for the msg header and 496 bytes for
  96 * the payload.
  97 *
  98 * This will utilize a maximum total space of 256KB for the buffers.
  99 *
 100 * We might also want to add support for user-provided buffers in time.
 101 * This will allow bigger buffer size flexibility, and can also be used
 102 * to achieve zero-copy messaging.
 103 *
 104 * Note that these numbers are purely a decision of this driver - we
 105 * can change this without changing anything in the firmware of the remote
 106 * processor.
 107 */
 108#define MAX_RPMSG_NUM_BUFS      (512)
 109#define RPMSG_BUF_SIZE          (512)
 110
 111/*
 112 * Local addresses are dynamically allocated on-demand.
 113 * We do not dynamically assign addresses from the low 1024 range,
 114 * in order to reserve that address range for predefined services.
 115 */
 116#define RPMSG_RESERVED_ADDRESSES        (1024)
 117
 118/* Address 53 is reserved for advertising remote services */
 119#define RPMSG_NS_ADDR                   (53)
 120
 121/* sysfs show configuration fields */
 122#define rpmsg_show_attr(field, path, format_string)                     \
 123static ssize_t                                                          \
 124field##_show(struct device *dev,                                        \
 125                        struct device_attribute *attr, char *buf)       \
 126{                                                                       \
 127        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);            \
 128                                                                        \
 129        return sprintf(buf, format_string, rpdev->path);                \
 130}
 131
 132/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
 133rpmsg_show_attr(name, id.name, "%s\n");
 134rpmsg_show_attr(src, src, "0x%x\n");
 135rpmsg_show_attr(dst, dst, "0x%x\n");
 136rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
 137
 138/*
 139 * Unique (and free running) index for rpmsg devices.
 140 *
 141 * Yeah, we're not recycling those numbers (yet?). will be easy
 142 * to change if/when we want to.
 143 */
 144static unsigned int rpmsg_dev_index;
 145
 146static ssize_t modalias_show(struct device *dev,
 147                             struct device_attribute *attr, char *buf)
 148{
 149        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 150
 151        return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
 152}
 153
 154static struct device_attribute rpmsg_dev_attrs[] = {
 155        __ATTR_RO(name),
 156        __ATTR_RO(modalias),
 157        __ATTR_RO(dst),
 158        __ATTR_RO(src),
 159        __ATTR_RO(announce),
 160        __ATTR_NULL
 161};
 162
 163/* rpmsg devices and drivers are matched using the service name */
 164static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev,
 165                                  const struct rpmsg_device_id *id)
 166{
 167        return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0;
 168}
 169
 170/* match rpmsg channel and rpmsg driver */
 171static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
 172{
 173        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 174        struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
 175        const struct rpmsg_device_id *ids = rpdrv->id_table;
 176        unsigned int i;
 177
 178        for (i = 0; ids[i].name[0]; i++)
 179                if (rpmsg_id_match(rpdev, &ids[i]))
 180                        return 1;
 181
 182        return 0;
 183}
 184
 185static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
 186{
 187        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 188
 189        return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
 190                                        rpdev->id.name);
 191}
 192
 193/**
 194 * __ept_release() - deallocate an rpmsg endpoint
 195 * @kref: the ept's reference count
 196 *
 197 * This function deallocates an ept, and is invoked when its @kref refcount
 198 * drops to zero.
 199 *
 200 * Never invoke this function directly!
 201 */
 202static void __ept_release(struct kref *kref)
 203{
 204        struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
 205                                                  refcount);
 206        /*
 207         * At this point no one holds a reference to ept anymore,
 208         * so we can directly free it
 209         */
 210        kfree(ept);
 211}
 212
 213static inline int rpmsg_virtqueue_add_outbuf(struct virtqueue *vq,
 214                struct scatterlist *sg, unsigned int num,
 215                void *data,
 216                gfp_t gfp)
 217{
 218        return __virtqueue_add_sgs(vq, &sg, num, 0, data, gfp, true);
 219}
 220
 221static inline int rpmsg_virtqueue_add_inbuf(struct virtqueue *vq,
 222                struct scatterlist *sg, unsigned int num,
 223                void *data,
 224                gfp_t gfp)
 225{
 226        return __virtqueue_add_sgs(vq, &sg, 0, num, data, gfp, true);
 227}
 228
 229static inline dma_addr_t msg_dma_address(struct virtproc_info *vrp, void *msg)
 230{
 231        unsigned long offset = msg - vrp->rbufs;
 232
 233        return vrp->bufs_dma + offset;
 234}
 235
 236static inline void rpmsg_msg_sg_init(struct virtproc_info *vrp,
 237                                     struct scatterlist *sg,
 238                                     void *msg, unsigned int len)
 239{
 240        sg_init_table(sg, 1);
 241        sg_dma_address(sg) = msg_dma_address(vrp, msg);
 242        sg_dma_len(sg) = len;
 243}
 244
 245/* for more info, see below documentation of rpmsg_create_ept() */
 246static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
 247                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
 248                void *priv, u32 addr)
 249{
 250        int id_min, id_max, id;
 251        struct rpmsg_endpoint *ept;
 252        struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
 253
 254        ept = kzalloc(sizeof(*ept), GFP_KERNEL);
 255        if (!ept) {
 256                dev_err(dev, "failed to kzalloc a new ept\n");
 257                return NULL;
 258        }
 259
 260        kref_init(&ept->refcount);
 261        mutex_init(&ept->cb_lock);
 262
 263        ept->rpdev = rpdev;
 264        ept->cb = cb;
 265        ept->priv = priv;
 266
 267        /* do we need to allocate a local address ? */
 268        if (addr == RPMSG_ADDR_ANY) {
 269                id_min = RPMSG_RESERVED_ADDRESSES;
 270                id_max = 0;
 271        } else {
 272                id_min = addr;
 273                id_max = addr + 1;
 274        }
 275
 276        mutex_lock(&vrp->endpoints_lock);
 277
 278        /* bind the endpoint to an rpmsg address (and allocate one if needed) */
 279        id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
 280        if (id < 0) {
 281                dev_err(dev, "idr_alloc failed: %d\n", id);
 282                goto free_ept;
 283        }
 284        ept->addr = id;
 285
 286        mutex_unlock(&vrp->endpoints_lock);
 287
 288        return ept;
 289
 290free_ept:
 291        mutex_unlock(&vrp->endpoints_lock);
 292        kref_put(&ept->refcount, __ept_release);
 293        return NULL;
 294}
 295
 296/**
 297 * rpmsg_create_ept() - create a new rpmsg_endpoint
 298 * @rpdev: rpmsg channel device
 299 * @cb: rx callback handler
 300 * @priv: private data for the driver's use
 301 * @addr: local rpmsg address to bind with @cb
 302 *
 303 * Every rpmsg address in the system is bound to an rx callback (so when
 304 * inbound messages arrive, they are dispatched by the rpmsg bus using the
 305 * appropriate callback handler) by means of an rpmsg_endpoint struct.
 306 *
 307 * This function allows drivers to create such an endpoint, and by that,
 308 * bind a callback, and possibly some private data too, to an rpmsg address
 309 * (either one that is known in advance, or one that will be dynamically
 310 * assigned for them).
 311 *
 312 * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
 313 * is already created for them when they are probed by the rpmsg bus
 314 * (using the rx callback provided when they registered to the rpmsg bus).
 315 *
 316 * So things should just work for simple drivers: they already have an
 317 * endpoint, their rx callback is bound to their rpmsg address, and when
 318 * relevant inbound messages arrive (i.e. messages which their dst address
 319 * equals to the src address of their rpmsg channel), the driver's handler
 320 * is invoked to process it.
 321 *
 322 * That said, more complicated drivers might do need to allocate
 323 * additional rpmsg addresses, and bind them to different rx callbacks.
 324 * To accomplish that, those drivers need to call this function.
 325 *
 326 * Drivers should provide their @rpdev channel (so the new endpoint would belong
 327 * to the same remote processor their channel belongs to), an rx callback
 328 * function, an optional private data (which is provided back when the
 329 * rx callback is invoked), and an address they want to bind with the
 330 * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
 331 * dynamically assign them an available rpmsg address (drivers should have
 332 * a very good reason why not to always use RPMSG_ADDR_ANY here).
 333 *
 334 * Returns a pointer to the endpoint on success, or NULL on error.
 335 */
 336struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
 337                                rpmsg_rx_cb_t cb, void *priv, u32 addr)
 338{
 339        return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr);
 340}
 341EXPORT_SYMBOL(rpmsg_create_ept);
 342
 343/**
 344 * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
 345 * @vrp: virtproc which owns this ept
 346 * @ept: endpoing to destroy
 347 *
 348 * An internal function which destroy an ept without assuming it is
 349 * bound to an rpmsg channel. This is needed for handling the internal
 350 * name service endpoint, which isn't bound to an rpmsg channel.
 351 * See also __rpmsg_create_ept().
 352 */
 353static void
 354__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 355{
 356        /* make sure new inbound messages can't find this ept anymore */
 357        mutex_lock(&vrp->endpoints_lock);
 358        idr_remove(&vrp->endpoints, ept->addr);
 359        mutex_unlock(&vrp->endpoints_lock);
 360
 361        /* make sure in-flight inbound messages won't invoke cb anymore */
 362        mutex_lock(&ept->cb_lock);
 363        ept->cb = NULL;
 364        mutex_unlock(&ept->cb_lock);
 365
 366        kref_put(&ept->refcount, __ept_release);
 367}
 368
 369/**
 370 * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
 371 * @ept: endpoing to destroy
 372 *
 373 * Should be used by drivers to destroy an rpmsg endpoint previously
 374 * created with rpmsg_create_ept().
 375 */
 376void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
 377{
 378        __rpmsg_destroy_ept(ept->rpdev->vrp, ept);
 379}
 380EXPORT_SYMBOL(rpmsg_destroy_ept);
 381
 382/*
 383 * when an rpmsg driver is probed with a channel, we seamlessly create
 384 * it an endpoint, binding its rx callback to a unique local rpmsg
 385 * address.
 386 *
 387 * if we need to, we also announce about this channel to the remote
 388 * processor (needed in case the driver is exposing an rpmsg service).
 389 */
 390static int rpmsg_dev_probe(struct device *dev)
 391{
 392        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 393        struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
 394        struct virtproc_info *vrp = rpdev->vrp;
 395        struct rpmsg_endpoint *ept;
 396        int err;
 397
 398        ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src);
 399        if (!ept) {
 400                dev_err(dev, "failed to create endpoint\n");
 401                err = -ENOMEM;
 402                goto out;
 403        }
 404
 405        rpdev->ept = ept;
 406        rpdev->src = ept->addr;
 407
 408        err = rpdrv->probe(rpdev);
 409        if (err) {
 410                dev_err(dev, "%s: failed: %d\n", __func__, err);
 411                rpmsg_destroy_ept(ept);
 412                goto out;
 413        }
 414
 415        /* need to tell remote processor's name service about this channel ? */
 416        if (rpdev->announce &&
 417                        virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
 418                struct rpmsg_ns_msg nsm;
 419
 420                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
 421                nsm.addr = rpdev->src;
 422                nsm.flags = RPMSG_NS_CREATE;
 423
 424                err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
 425                if (err)
 426                        dev_err(dev, "failed to announce service %d\n", err);
 427        }
 428
 429out:
 430        return err;
 431}
 432
 433static int rpmsg_dev_remove(struct device *dev)
 434{
 435        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 436        struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
 437        struct virtproc_info *vrp = rpdev->vrp;
 438        int err = 0;
 439
 440        /* tell remote processor's name service we're removing this channel */
 441        if (rpdev->announce &&
 442                        virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
 443                struct rpmsg_ns_msg nsm;
 444
 445                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
 446                nsm.addr = rpdev->src;
 447                nsm.flags = RPMSG_NS_DESTROY;
 448
 449                err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
 450                if (err)
 451                        dev_err(dev, "failed to announce service %d\n", err);
 452        }
 453
 454        rpdrv->remove(rpdev);
 455
 456        rpmsg_destroy_ept(rpdev->ept);
 457
 458        return err;
 459}
 460
 461static struct bus_type rpmsg_bus = {
 462        .name           = "rpmsg",
 463        .match          = rpmsg_dev_match,
 464        .dev_attrs      = rpmsg_dev_attrs,
 465        .uevent         = rpmsg_uevent,
 466        .probe          = rpmsg_dev_probe,
 467        .remove         = rpmsg_dev_remove,
 468};
 469
 470/**
 471 * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
 472 * @rpdrv: pointer to a struct rpmsg_driver
 473 *
 474 * Returns 0 on success, and an appropriate error value on failure.
 475 */
 476int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
 477{
 478        rpdrv->drv.bus = &rpmsg_bus;
 479        return driver_register(&rpdrv->drv);
 480}
 481EXPORT_SYMBOL(register_rpmsg_driver);
 482
 483/**
 484 * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
 485 * @rpdrv: pointer to a struct rpmsg_driver
 486 *
 487 * Returns 0 on success, and an appropriate error value on failure.
 488 */
 489void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
 490{
 491        driver_unregister(&rpdrv->drv);
 492}
 493EXPORT_SYMBOL(unregister_rpmsg_driver);
 494
 495static void rpmsg_release_device(struct device *dev)
 496{
 497        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 498
 499        kfree(rpdev);
 500}
 501
 502/*
 503 * match an rpmsg channel with a channel info struct.
 504 * this is used to make sure we're not creating rpmsg devices for channels
 505 * that already exist.
 506 */
 507static int rpmsg_channel_match(struct device *dev, void *data)
 508{
 509        struct rpmsg_channel_info *chinfo = data;
 510        struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
 511
 512        if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
 513                return 0;
 514
 515        if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
 516                return 0;
 517
 518        if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
 519                return 0;
 520
 521        /* found a match ! */
 522        return 1;
 523}
 524
 525/*
 526 * create an rpmsg channel using its name and address info.
 527 * this function will be used to create both static and dynamic
 528 * channels.
 529 */
 530static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
 531                                struct rpmsg_channel_info *chinfo)
 532{
 533        struct rpmsg_channel *rpdev;
 534        struct device *tmp, *dev = &vrp->vdev->dev;
 535        int ret;
 536
 537        /* make sure a similar channel doesn't already exist */
 538        tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
 539        if (tmp) {
 540                /* decrement the matched device's refcount back */
 541                put_device(tmp);
 542                dev_err(dev, "channel %s:%x:%x already exist\n",
 543                                chinfo->name, chinfo->src, chinfo->dst);
 544                return NULL;
 545        }
 546
 547        rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
 548        if (!rpdev) {
 549                pr_err("kzalloc failed\n");
 550                return NULL;
 551        }
 552
 553        rpdev->vrp = vrp;
 554        rpdev->src = chinfo->src;
 555        rpdev->dst = chinfo->dst;
 556
 557        /*
 558         * rpmsg server channels has predefined local address (for now),
 559         * and their existence needs to be announced remotely
 560         */
 561        rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;
 562
 563        strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
 564
 565        /* very simple device indexing plumbing which is enough for now */
 566        dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);
 567
 568        rpdev->dev.parent = &vrp->vdev->dev;
 569        rpdev->dev.bus = &rpmsg_bus;
 570        rpdev->dev.release = rpmsg_release_device;
 571
 572        ret = device_register(&rpdev->dev);
 573        if (ret) {
 574                dev_err(dev, "device_register failed: %d\n", ret);
 575                put_device(&rpdev->dev);
 576                return NULL;
 577        }
 578
 579        return rpdev;
 580}
 581
 582/*
 583 * find an existing channel using its name + address properties,
 584 * and destroy it
 585 */
 586static int rpmsg_destroy_channel(struct virtproc_info *vrp,
 587                                        struct rpmsg_channel_info *chinfo)
 588{
 589        struct virtio_device *vdev = vrp->vdev;
 590        struct device *dev;
 591
 592        dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match);
 593        if (!dev)
 594                return -EINVAL;
 595
 596        device_unregister(dev);
 597
 598        put_device(dev);
 599
 600        return 0;
 601}
 602
 603/* super simple buffer "allocator" that is just enough for now */
 604static void *get_a_tx_buf(struct virtproc_info *vrp)
 605{
 606        unsigned int len;
 607        void *ret;
 608
 609        /* support multiple concurrent senders */
 610        mutex_lock(&vrp->tx_lock);
 611
 612        /*
 613         * either pick the next unused tx buffer
 614         * (half of our buffers are used for sending messages)
 615         */
 616        if (vrp->last_sbuf < vrp->num_bufs / 2)
 617                ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 618        /* or recycle a used one */
 619        else
 620                ret = virtqueue_get_buf(vrp->svq, &len);
 621
 622        mutex_unlock(&vrp->tx_lock);
 623
 624        return ret;
 625}
 626
 627/**
 628 * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
 629 * @vrp: virtual remote processor state
 630 *
 631 * This function is called before a sender is blocked, waiting for
 632 * a tx buffer to become available.
 633 *
 634 * If we already have blocking senders, this function merely increases
 635 * the "sleepers" reference count, and exits.
 636 *
 637 * Otherwise, if this is the first sender to block, we also enable
 638 * virtio's tx callbacks, so we'd be immediately notified when a tx
 639 * buffer is consumed (we rely on virtio's tx callback in order
 640 * to wake up sleeping senders as soon as a tx buffer is used by the
 641 * remote processor).
 642 */
 643static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
 644{
 645        /* support multiple concurrent senders */
 646        mutex_lock(&vrp->tx_lock);
 647
 648        /* are we the first sleeping context waiting for tx buffers ? */
 649        if (atomic_inc_return(&vrp->sleepers) == 1)
 650                /* enable "tx-complete" interrupts before dozing off */
 651                virtqueue_enable_cb(vrp->svq);
 652
 653        mutex_unlock(&vrp->tx_lock);
 654}
 655
 656/**
 657 * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
 658 * @vrp: virtual remote processor state
 659 *
 660 * This function is called after a sender, that waited for a tx buffer
 661 * to become available, is unblocked.
 662 *
 663 * If we still have blocking senders, this function merely decreases
 664 * the "sleepers" reference count, and exits.
 665 *
 666 * Otherwise, if there are no more blocking senders, we also disable
 667 * virtio's tx callbacks, to avoid the overhead incurred with handling
 668 * those (now redundant) interrupts.
 669 */
 670static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
 671{
 672        /* support multiple concurrent senders */
 673        mutex_lock(&vrp->tx_lock);
 674
 675        /* are we the last sleeping context waiting for tx buffers ? */
 676        if (atomic_dec_and_test(&vrp->sleepers))
 677                /* disable "tx-complete" interrupts */
 678                virtqueue_disable_cb(vrp->svq);
 679
 680        mutex_unlock(&vrp->tx_lock);
 681}
 682
 683/**
 684 * rpmsg_send_offchannel_raw() - send a message across to the remote processor
 685 * @rpdev: the rpmsg channel
 686 * @src: source address
 687 * @dst: destination address
 688 * @data: payload of message
 689 * @len: length of payload
 690 * @wait: indicates whether caller should block in case no TX buffers available
 691 *
 692 * This function is the base implementation for all of the rpmsg sending API.
 693 *
 694 * It will send @data of length @len to @dst, and say it's from @src. The
 695 * message will be sent to the remote processor which the @rpdev channel
 696 * belongs to.
 697 *
 698 * The message is sent using one of the TX buffers that are available for
 699 * communication with this remote processor.
 700 *
 701 * If @wait is true, the caller will be blocked until either a TX buffer is
 702 * available, or 15 seconds elapses (we don't want callers to
 703 * sleep indefinitely due to misbehaving remote processors), and in that
 704 * case -ERESTARTSYS is returned. The number '15' itself was picked
 705 * arbitrarily; there's little point in asking drivers to provide a timeout
 706 * value themselves.
 707 *
 708 * Otherwise, if @wait is false, and there are no TX buffers available,
 709 * the function will immediately fail, and -ENOMEM will be returned.
 710 *
 711 * Normally drivers shouldn't use this function directly; instead, drivers
 712 * should use the appropriate rpmsg_{try}send{to, _offchannel} API
 713 * (see include/linux/rpmsg.h).
 714 *
 715 * Returns 0 on success and an appropriate error value on failure.
 716 */
 717int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
 718                                        void *data, int len, bool wait)
 719{
 720        struct virtproc_info *vrp = rpdev->vrp;
 721        struct device *dev = &rpdev->dev;
 722        struct scatterlist sg;
 723        struct rpmsg_hdr *msg;
 724        int err;
 725
 726        /* bcasting isn't allowed */
 727        if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
 728                dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
 729                return -EINVAL;
 730        }
 731
 732        /*
 733         * We currently use fixed-sized buffers, and therefore the payload
 734         * length is limited.
 735         *
 736         * One of the possible improvements here is either to support
 737         * user-provided buffers (and then we can also support zero-copy
 738         * messaging), or to improve the buffer allocator, to support
 739         * variable-length buffer sizes.
 740         */
 741        if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) {
 742                dev_err(dev, "message is too big (%d)\n", len);
 743                return -EMSGSIZE;
 744        }
 745
 746        /* grab a buffer */
 747        msg = get_a_tx_buf(vrp);
 748        if (!msg && !wait)
 749                return -ENOMEM;
 750
 751        /* no free buffer ? wait for one (but bail after 15 seconds) */
 752        while (!msg) {
 753                /* enable "tx-complete" interrupts, if not already enabled */
 754                rpmsg_upref_sleepers(vrp);
 755
 756                /*
 757                 * sleep until a free buffer is available or 15 secs elapse.
 758                 * the timeout period is not configurable because there's
 759                 * little point in asking drivers to specify that.
 760                 * if later this happens to be required, it'd be easy to add.
 761                 */
 762                err = wait_event_interruptible_timeout(vrp->sendq,
 763                                        (msg = get_a_tx_buf(vrp)),
 764                                        msecs_to_jiffies(15000));
 765
 766                /* disable "tx-complete" interrupts if we're the last sleeper */
 767                rpmsg_downref_sleepers(vrp);
 768
 769                /* timeout ? */
 770                if (!err) {
 771                        dev_err(dev, "timeout waiting for a tx buffer\n");
 772                        return -ERESTARTSYS;
 773                }
 774        }
 775
 776        msg->len = len;
 777        msg->flags = 0;
 778        msg->src = src;
 779        msg->dst = dst;
 780        msg->reserved = 0;
 781        memcpy(msg->data, data, len);
 782
 783        dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
 784                                        msg->src, msg->dst, msg->len,
 785                                        msg->flags, msg->reserved);
 786#ifdef DEBUG
 787        print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
 788                                        msg, sizeof(*msg) + msg->len, true);
 789#endif
 790
 791        rpmsg_msg_sg_init(vrp, &sg, msg, sizeof(*msg) + len);
 792
 793        mutex_lock(&vrp->tx_lock);
 794
 795        /* add message to the remote processor's virtqueue */
 796        err = rpmsg_virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
 797        if (err) {
 798                /*
 799                 * need to reclaim the buffer here, otherwise it's lost
 800                 * (memory won't leak, but rpmsg won't use it again for TX).
 801                 * this will wait for a buffer management overhaul.
 802                 */
 803                dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err);
 804                goto out;
 805        }
 806
 807        /* tell the remote processor it has a pending message to read */
 808        virtqueue_kick(vrp->svq);
 809out:
 810        mutex_unlock(&vrp->tx_lock);
 811        return err;
 812}
 813EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
 814
 815static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
 816                             struct rpmsg_hdr *msg, unsigned int len)
 817{
 818        struct rpmsg_endpoint *ept;
 819        struct scatterlist sg;
 820        int err;
 821
 822        dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
 823                                        msg->src, msg->dst, msg->len,
 824                                        msg->flags, msg->reserved);
 825#ifdef DEBUG
 826        print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
 827                                        msg, sizeof(*msg) + msg->len, true);
 828#endif
 829
 830        /*
 831         * We currently use fixed-sized buffers, so trivially sanitize
 832         * the reported payload length.
 833         */
 834        if (len > RPMSG_BUF_SIZE ||
 835                msg->len > (len - sizeof(struct rpmsg_hdr))) {
 836                dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
 837                return -EINVAL;
 838        }
 839
 840        /* use the dst addr to fetch the callback of the appropriate user */
 841        mutex_lock(&vrp->endpoints_lock);
 842
 843        ept = idr_find(&vrp->endpoints, msg->dst);
 844
 845        /* let's make sure no one deallocates ept while we use it */
 846        if (ept)
 847                kref_get(&ept->refcount);
 848
 849        mutex_unlock(&vrp->endpoints_lock);
 850
 851        if (ept) {
 852                /* make sure ept->cb doesn't go away while we use it */
 853                mutex_lock(&ept->cb_lock);
 854
 855                if (ept->cb)
 856                        ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
 857                                msg->src);
 858
 859                mutex_unlock(&ept->cb_lock);
 860
 861                /* farewell, ept, we don't need you anymore */
 862                kref_put(&ept->refcount, __ept_release);
 863        } else
 864                dev_warn(dev, "msg received with no recipient\n");
 865
 866        /* publish the real size of the buffer */
 867        rpmsg_msg_sg_init(vrp, &sg, msg, RPMSG_BUF_SIZE);
 868
 869        /* add the buffer back to the remote processor's virtqueue */
 870        err = rpmsg_virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
 871        if (err < 0) {
 872                dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
 873                return err;
 874        }
 875
 876        return 0;
 877}
 878
 879/* called when an rx buffer is used, and it's time to digest a message */
 880static void rpmsg_recv_done(struct virtqueue *rvq)
 881{
 882        struct virtproc_info *vrp = rvq->vdev->priv;
 883        struct device *dev = &rvq->vdev->dev;
 884        struct rpmsg_hdr *msg;
 885        unsigned int len, msgs_received = 0;
 886        int err;
 887
 888        msg = virtqueue_get_buf(rvq, &len);
 889        if (!msg) {
 890                dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
 891                return;
 892        }
 893
 894        while (msg) {
 895                err = rpmsg_recv_single(vrp, dev, msg, len);
 896                if (err)
 897                        break;
 898
 899                msgs_received++;
 900
 901                msg = virtqueue_get_buf(rvq, &len);
 902        };
 903
 904        dev_dbg(dev, "Received %u messages\n", msgs_received);
 905
 906        /* tell the remote processor we added another available rx buffer */
 907        if (msgs_received)
 908                virtqueue_kick(vrp->rvq);
 909}
 910
 911/*
 912 * This is invoked whenever the remote processor completed processing
 913 * a TX msg we just sent it, and the buffer is put back to the used ring.
 914 *
 915 * Normally, though, we suppress this "tx complete" interrupt in order to
 916 * avoid the incurred overhead.
 917 */
 918static void rpmsg_xmit_done(struct virtqueue *svq)
 919{
 920        struct virtproc_info *vrp = svq->vdev->priv;
 921
 922        dev_dbg(&svq->vdev->dev, "%s\n", __func__);
 923
 924        /* wake up potential senders that are waiting for a tx buffer */
 925        wake_up_interruptible(&vrp->sendq);
 926}
 927
 928/* invoked when a name service announcement arrives */
 929static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
 930                                                        void *priv, u32 src)
 931{
 932        struct rpmsg_ns_msg *msg = data;
 933        struct rpmsg_channel *newch;
 934        struct rpmsg_channel_info chinfo;
 935        struct virtproc_info *vrp = priv;
 936        struct device *dev = &vrp->vdev->dev;
 937        int ret;
 938
 939        print_hex_dump(KERN_DEBUG, "NS announcement: ",
 940                        DUMP_PREFIX_NONE, 16, 1,
 941                        data, len, true);
 942
 943        if (len != sizeof(*msg)) {
 944                dev_err(dev, "malformed ns msg (%d)\n", len);
 945                return;
 946        }
 947
 948        /*
 949         * the name service ept does _not_ belong to a real rpmsg channel,
 950         * and is handled by the rpmsg bus itself.
 951         * for sanity reasons, make sure a valid rpdev has _not_ sneaked
 952         * in somehow.
 953         */
 954        if (rpdev) {
 955                dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
 956                return;
 957        }
 958
 959        /* don't trust the remote processor for null terminating the name */
 960        msg->name[RPMSG_NAME_SIZE - 1] = '\0';
 961
 962        dev_info(dev, "%sing channel %s addr 0x%x\n",
 963                        msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
 964                        msg->name, msg->addr);
 965
 966        strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
 967        chinfo.src = RPMSG_ADDR_ANY;
 968        chinfo.dst = msg->addr;
 969
 970        if (msg->flags & RPMSG_NS_DESTROY) {
 971                ret = rpmsg_destroy_channel(vrp, &chinfo);
 972                if (ret)
 973                        dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
 974        } else {
 975                newch = rpmsg_create_channel(vrp, &chinfo);
 976                if (!newch)
 977                        dev_err(dev, "rpmsg_create_channel failed\n");
 978        }
 979}
 980
 981static int rpmsg_probe(struct virtio_device *vdev)
 982{
 983        vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
 984        static const char * const names[] = { "input", "output" };
 985        struct virtqueue *vqs[2];
 986        struct virtproc_info *vrp;
 987        void *bufs_va;
 988        int err = 0, i;
 989        size_t total_buf_space;
 990        bool notify;
 991
 992        vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 993        if (!vrp)
 994                return -ENOMEM;
 995
 996        vrp->vdev = vdev;
 997
 998        idr_init(&vrp->endpoints);
 999        mutex_init(&vrp->endpoints_lock);
1000        mutex_init(&vrp->tx_lock);
1001        init_waitqueue_head(&vrp->sendq);
1002
1003        /* We expect two virtqueues, rx and tx (and in this order) */
1004        err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
1005        if (err)
1006                goto free_vrp;
1007
1008        vrp->rvq = vqs[0];
1009        vrp->svq = vqs[1];
1010
1011        /* we expect symmetric tx/rx vrings */
1012        WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
1013                virtqueue_get_vring_size(vrp->svq));
1014
1015        /* we need less buffers if vrings are small */
1016        if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
1017                vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
1018        else
1019                vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
1020
1021        total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
1022
1023        /* allocate coherent memory for the buffers */
1024        bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
1025                                     total_buf_space, &vrp->bufs_dma,
1026                                     GFP_KERNEL);
1027        if (!bufs_va) {
1028                err = -ENOMEM;
1029                goto vqs_del;
1030        }
1031
1032        dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
1033                                        (unsigned long long)vrp->bufs_dma);
1034
1035        /* half of the buffers is dedicated for RX */
1036        vrp->rbufs = bufs_va;
1037
1038        /* and half is dedicated for TX */
1039        vrp->sbufs = bufs_va + total_buf_space / 2;
1040
1041        /* set up the receive buffers */
1042        for (i = 0; i < vrp->num_bufs / 2; i++) {
1043                struct scatterlist sg;
1044                void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
1045
1046                rpmsg_msg_sg_init(vrp, &sg, cpu_addr, RPMSG_BUF_SIZE);
1047
1048                err = rpmsg_virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
1049                                                                GFP_KERNEL);
1050                WARN_ON(err); /* sanity check; this can't really happen */
1051        }
1052
1053        /* suppress "tx-complete" interrupts */
1054        virtqueue_disable_cb(vrp->svq);
1055
1056        vdev->priv = vrp;
1057
1058        /* if supported by the remote processor, enable the name service */
1059        if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
1060                /* a dedicated endpoint handles the name service msgs */
1061                vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
1062                                                vrp, RPMSG_NS_ADDR);
1063                if (!vrp->ns_ept) {
1064                        dev_err(&vdev->dev, "failed to create the ns ept\n");
1065                        err = -ENOMEM;
1066                        goto free_coherent;
1067                }
1068        }
1069
1070        /*
1071         * Prepare to kick but don't notify yet - we can't do this before
1072         * device is ready.
1073         */
1074        notify = virtqueue_kick_prepare(vrp->rvq);
1075
1076        /* From this point on, we can notify and get callbacks. */
1077        virtio_device_ready(vdev);
1078
1079        /* tell the remote processor it can start sending messages */
1080        /*
1081         * this might be concurrent with callbacks, but we are only
1082         * doing notify, not a full kick here, so that's ok.
1083         */
1084        if (notify)
1085                virtqueue_notify(vrp->rvq);
1086
1087        dev_info(&vdev->dev, "rpmsg host is online\n");
1088
1089        return 0;
1090
1091free_coherent:
1092        dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
1093                          bufs_va, vrp->bufs_dma);
1094vqs_del:
1095        vdev->config->del_vqs(vrp->vdev);
1096free_vrp:
1097        kfree(vrp);
1098        return err;
1099}
1100
1101static int rpmsg_remove_device(struct device *dev, void *data)
1102{
1103        device_unregister(dev);
1104
1105        return 0;
1106}
1107
1108static void rpmsg_remove(struct virtio_device *vdev)
1109{
1110        struct virtproc_info *vrp = vdev->priv;
1111        size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
1112        int ret;
1113
1114        vdev->config->reset(vdev);
1115
1116        ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
1117        if (ret)
1118                dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
1119
1120        if (vrp->ns_ept)
1121                __rpmsg_destroy_ept(vrp, vrp->ns_ept);
1122
1123        idr_destroy(&vrp->endpoints);
1124
1125        vdev->config->del_vqs(vrp->vdev);
1126
1127        dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
1128                          vrp->rbufs, vrp->bufs_dma);
1129
1130        kfree(vrp);
1131}
1132
1133static struct virtio_device_id id_table[] = {
1134        { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
1135        { 0 },
1136};
1137
1138static unsigned int features[] = {
1139        VIRTIO_RPMSG_F_NS,
1140};
1141
1142static struct virtio_driver virtio_ipc_driver = {
1143        .feature_table  = features,
1144        .feature_table_size = ARRAY_SIZE(features),
1145        .driver.name    = KBUILD_MODNAME,
1146        .driver.owner   = THIS_MODULE,
1147        .id_table       = id_table,
1148        .probe          = rpmsg_probe,
1149        .remove         = rpmsg_remove,
1150};
1151
1152static int __init rpmsg_init(void)
1153{
1154        int ret;
1155
1156        ret = bus_register(&rpmsg_bus);
1157        if (ret) {
1158                pr_err("failed to register rpmsg bus: %d\n", ret);
1159                return ret;
1160        }
1161
1162        ret = register_virtio_driver(&virtio_ipc_driver);
1163        if (ret) {
1164                pr_err("failed to register virtio driver: %d\n", ret);
1165                bus_unregister(&rpmsg_bus);
1166        }
1167
1168        return ret;
1169}
1170subsys_initcall(rpmsg_init);
1171
1172static void __exit rpmsg_fini(void)
1173{
1174        unregister_virtio_driver(&virtio_ipc_driver);
1175        bus_unregister(&rpmsg_bus);
1176}
1177module_exit(rpmsg_fini);
1178
1179MODULE_DEVICE_TABLE(virtio, id_table);
1180MODULE_DESCRIPTION("Virtio-based remote processor messaging bus");
1181MODULE_LICENSE("GPL v2");
1182