linux/drivers/rpmsg/virtio_rpmsg_bus.c
<<
>>
Prefs
   1/*
   2 * Virtio-based remote processor messaging bus
   3 *
   4 * Copyright (C) 2011 Texas Instruments, Inc.
   5 * Copyright (C) 2011 Google, Inc.
   6 *
   7 * Ohad Ben-Cohen <ohad@wizery.com>
   8 * Brian Swetland <swetland@google.com>
   9 *
  10 * This software is licensed under the terms of the GNU General Public
  11 * License version 2, as published by the Free Software Foundation, and
  12 * may be copied, distributed, and modified under those terms.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#define pr_fmt(fmt) "%s: " fmt, __func__
  21
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/virtio.h>
  25#include <linux/virtio_ids.h>
  26#include <linux/virtio_config.h>
  27#include <linux/scatterlist.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/slab.h>
  30#include <linux/idr.h>
  31#include <linux/jiffies.h>
  32#include <linux/sched.h>
  33#include <linux/wait.h>
  34#include <linux/rpmsg.h>
  35#include <linux/mutex.h>
  36#include <linux/of_device.h>
  37
  38#include "rpmsg_internal.h"
  39
  40/**
  41 * struct virtproc_info - virtual remote processor state
  42 * @vdev:       the virtio device
  43 * @rvq:        rx virtqueue
  44 * @svq:        tx virtqueue
  45 * @rbufs:      kernel address of rx buffers
  46 * @sbufs:      kernel address of tx buffers
  47 * @num_bufs:   total number of buffers for rx and tx
  48 * @buf_size:   size of one rx or tx buffer
  49 * @last_sbuf:  index of last tx buffer used
  50 * @bufs_dma:   dma base addr of the buffers
  51 * @tx_lock:    protects svq, sbufs and sleepers, to allow concurrent senders.
  52 *              sending a message might require waking up a dozing remote
  53 *              processor, which involves sleeping, hence the mutex.
  54 * @endpoints:  idr of local endpoints, allows fast retrieval
  55 * @endpoints_lock: lock of the endpoints set
  56 * @sendq:      wait queue of sending contexts waiting for a tx buffers
  57 * @sleepers:   number of senders that are waiting for a tx buffer
  58 * @ns_ept:     the bus's name service endpoint
  59 *
  60 * This structure stores the rpmsg state of a given virtio remote processor
  61 * device (there might be several virtio proc devices for each physical
  62 * remote processor).
  63 */
  64struct virtproc_info {
  65        struct virtio_device *vdev;
  66        struct virtqueue *rvq, *svq;
  67        void *rbufs, *sbufs;
  68        unsigned int num_bufs;
  69        unsigned int buf_size;
  70        int last_sbuf;
  71        dma_addr_t bufs_dma;
  72        struct mutex tx_lock;
  73        struct idr endpoints;
  74        struct mutex endpoints_lock;
  75        wait_queue_head_t sendq;
  76        atomic_t sleepers;
  77        struct rpmsg_endpoint *ns_ept;
  78};
  79
  80/* The feature bitmap for virtio rpmsg */
  81#define VIRTIO_RPMSG_F_NS       0 /* RP supports name service notifications */
  82
  83/**
  84 * struct rpmsg_hdr - common header for all rpmsg messages
  85 * @src: source address
  86 * @dst: destination address
  87 * @reserved: reserved for future use
  88 * @len: length of payload (in bytes)
  89 * @flags: message flags
  90 * @data: @len bytes of message payload data
  91 *
  92 * Every message sent(/received) on the rpmsg bus begins with this header.
  93 */
  94struct rpmsg_hdr {
  95        u32 src;
  96        u32 dst;
  97        u32 reserved;
  98        u16 len;
  99        u16 flags;
 100        u8 data[0];
 101} __packed;
 102
 103/**
 104 * struct rpmsg_ns_msg - dynamic name service announcement message
 105 * @name: name of remote service that is published
 106 * @addr: address of remote service that is published
 107 * @flags: indicates whether service is created or destroyed
 108 *
 109 * This message is sent across to publish a new service, or announce
 110 * about its removal. When we receive these messages, an appropriate
 111 * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
 112 * or ->remove() handler of the appropriate rpmsg driver will be invoked
 113 * (if/as-soon-as one is registered).
 114 */
 115struct rpmsg_ns_msg {
 116        char name[RPMSG_NAME_SIZE];
 117        u32 addr;
 118        u32 flags;
 119} __packed;
 120
 121/**
 122 * enum rpmsg_ns_flags - dynamic name service announcement flags
 123 *
 124 * @RPMSG_NS_CREATE: a new remote service was just created
 125 * @RPMSG_NS_DESTROY: a known remote service was just destroyed
 126 */
 127enum rpmsg_ns_flags {
 128        RPMSG_NS_CREATE         = 0,
 129        RPMSG_NS_DESTROY        = 1,
 130};
 131
 132/**
 133 * @vrp: the remote processor this channel belongs to
 134 */
 135struct virtio_rpmsg_channel {
 136        struct rpmsg_device rpdev;
 137
 138        struct virtproc_info *vrp;
 139};
 140
 141#define to_virtio_rpmsg_channel(_rpdev) \
 142        container_of(_rpdev, struct virtio_rpmsg_channel, rpdev)
 143
 144/*
 145 * We're allocating buffers of 512 bytes each for communications. The
 146 * number of buffers will be computed from the number of buffers supported
 147 * by the vring, upto a maximum of 512 buffers (256 in each direction).
 148 *
 149 * Each buffer will have 16 bytes for the msg header and 496 bytes for
 150 * the payload.
 151 *
 152 * This will utilize a maximum total space of 256KB for the buffers.
 153 *
 154 * We might also want to add support for user-provided buffers in time.
 155 * This will allow bigger buffer size flexibility, and can also be used
 156 * to achieve zero-copy messaging.
 157 *
 158 * Note that these numbers are purely a decision of this driver - we
 159 * can change this without changing anything in the firmware of the remote
 160 * processor.
 161 */
 162#define MAX_RPMSG_NUM_BUFS      (512)
 163#define MAX_RPMSG_BUF_SIZE      (512)
 164
 165/*
 166 * Local addresses are dynamically allocated on-demand.
 167 * We do not dynamically assign addresses from the low 1024 range,
 168 * in order to reserve that address range for predefined services.
 169 */
 170#define RPMSG_RESERVED_ADDRESSES        (1024)
 171
 172/* Address 53 is reserved for advertising remote services */
 173#define RPMSG_NS_ADDR                   (53)
 174
 175static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
 176static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
 177static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
 178                               u32 dst);
 179static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
 180                                        u32 dst, void *data, int len);
 181static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
 182static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
 183                                  int len, u32 dst);
 184static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
 185                                           u32 dst, void *data, int len);
 186
 187static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
 188        .destroy_ept = virtio_rpmsg_destroy_ept,
 189        .send = virtio_rpmsg_send,
 190        .sendto = virtio_rpmsg_sendto,
 191        .send_offchannel = virtio_rpmsg_send_offchannel,
 192        .trysend = virtio_rpmsg_trysend,
 193        .trysendto = virtio_rpmsg_trysendto,
 194        .trysend_offchannel = virtio_rpmsg_trysend_offchannel,
 195};
 196
 197/**
 198 * rpmsg_sg_init - initialize scatterlist according to cpu address location
 199 * @sg: scatterlist to fill
 200 * @cpu_addr: virtual address of the buffer
 201 * @len: buffer length
 202 *
 203 * An internal function filling scatterlist according to virtual address
 204 * location (in vmalloc or in kernel).
 205 */
 206static void
 207rpmsg_sg_init(struct scatterlist *sg, void *cpu_addr, unsigned int len)
 208{
 209        if (is_vmalloc_addr(cpu_addr)) {
 210                sg_init_table(sg, 1);
 211                sg_set_page(sg, vmalloc_to_page(cpu_addr), len,
 212                            offset_in_page(cpu_addr));
 213        } else {
 214                WARN_ON(!virt_addr_valid(cpu_addr));
 215                sg_init_one(sg, cpu_addr, len);
 216        }
 217}
 218
 219/**
 220 * __ept_release() - deallocate an rpmsg endpoint
 221 * @kref: the ept's reference count
 222 *
 223 * This function deallocates an ept, and is invoked when its @kref refcount
 224 * drops to zero.
 225 *
 226 * Never invoke this function directly!
 227 */
 228static void __ept_release(struct kref *kref)
 229{
 230        struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
 231                                                  refcount);
 232        /*
 233         * At this point no one holds a reference to ept anymore,
 234         * so we can directly free it
 235         */
 236        kfree(ept);
 237}
 238
 239/* for more info, see below documentation of rpmsg_create_ept() */
 240static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
 241                                                 struct rpmsg_device *rpdev,
 242                                                 rpmsg_rx_cb_t cb,
 243                                                 void *priv, u32 addr)
 244{
 245        int id_min, id_max, id;
 246        struct rpmsg_endpoint *ept;
 247        struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
 248
 249        ept = kzalloc(sizeof(*ept), GFP_KERNEL);
 250        if (!ept)
 251                return NULL;
 252
 253        kref_init(&ept->refcount);
 254        mutex_init(&ept->cb_lock);
 255
 256        ept->rpdev = rpdev;
 257        ept->cb = cb;
 258        ept->priv = priv;
 259        ept->ops = &virtio_endpoint_ops;
 260
 261        /* do we need to allocate a local address ? */
 262        if (addr == RPMSG_ADDR_ANY) {
 263                id_min = RPMSG_RESERVED_ADDRESSES;
 264                id_max = 0;
 265        } else {
 266                id_min = addr;
 267                id_max = addr + 1;
 268        }
 269
 270        mutex_lock(&vrp->endpoints_lock);
 271
 272        /* bind the endpoint to an rpmsg address (and allocate one if needed) */
 273        id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
 274        if (id < 0) {
 275                dev_err(dev, "idr_alloc failed: %d\n", id);
 276                goto free_ept;
 277        }
 278        ept->addr = id;
 279
 280        mutex_unlock(&vrp->endpoints_lock);
 281
 282        return ept;
 283
 284free_ept:
 285        mutex_unlock(&vrp->endpoints_lock);
 286        kref_put(&ept->refcount, __ept_release);
 287        return NULL;
 288}
 289
 290static struct rpmsg_endpoint *virtio_rpmsg_create_ept(struct rpmsg_device *rpdev,
 291                                                      rpmsg_rx_cb_t cb,
 292                                                      void *priv,
 293                                                      struct rpmsg_channel_info chinfo)
 294{
 295        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 296
 297        return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src);
 298}
 299
 300/**
 301 * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
 302 * @vrp: virtproc which owns this ept
 303 * @ept: endpoing to destroy
 304 *
 305 * An internal function which destroy an ept without assuming it is
 306 * bound to an rpmsg channel. This is needed for handling the internal
 307 * name service endpoint, which isn't bound to an rpmsg channel.
 308 * See also __rpmsg_create_ept().
 309 */
 310static void
 311__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 312{
 313        /* make sure new inbound messages can't find this ept anymore */
 314        mutex_lock(&vrp->endpoints_lock);
 315        idr_remove(&vrp->endpoints, ept->addr);
 316        mutex_unlock(&vrp->endpoints_lock);
 317
 318        /* make sure in-flight inbound messages won't invoke cb anymore */
 319        mutex_lock(&ept->cb_lock);
 320        ept->cb = NULL;
 321        mutex_unlock(&ept->cb_lock);
 322
 323        kref_put(&ept->refcount, __ept_release);
 324}
 325
 326static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
 327{
 328        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(ept->rpdev);
 329
 330        __rpmsg_destroy_ept(vch->vrp, ept);
 331}
 332
 333static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
 334{
 335        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 336        struct virtproc_info *vrp = vch->vrp;
 337        struct device *dev = &rpdev->dev;
 338        int err = 0;
 339
 340        /* need to tell remote processor's name service about this channel ? */
 341        if (rpdev->announce && rpdev->ept &&
 342            virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
 343                struct rpmsg_ns_msg nsm;
 344
 345                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
 346                nsm.addr = rpdev->ept->addr;
 347                nsm.flags = RPMSG_NS_CREATE;
 348
 349                err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
 350                if (err)
 351                        dev_err(dev, "failed to announce service %d\n", err);
 352        }
 353
 354        return err;
 355}
 356
 357static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
 358{
 359        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 360        struct virtproc_info *vrp = vch->vrp;
 361        struct device *dev = &rpdev->dev;
 362        int err = 0;
 363
 364        /* tell remote processor's name service we're removing this channel */
 365        if (rpdev->announce && rpdev->ept &&
 366            virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
 367                struct rpmsg_ns_msg nsm;
 368
 369                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
 370                nsm.addr = rpdev->ept->addr;
 371                nsm.flags = RPMSG_NS_DESTROY;
 372
 373                err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
 374                if (err)
 375                        dev_err(dev, "failed to announce service %d\n", err);
 376        }
 377
 378        return err;
 379}
 380
 381static const struct rpmsg_device_ops virtio_rpmsg_ops = {
 382        .create_ept = virtio_rpmsg_create_ept,
 383        .announce_create = virtio_rpmsg_announce_create,
 384        .announce_destroy = virtio_rpmsg_announce_destroy,
 385};
 386
 387static void virtio_rpmsg_release_device(struct device *dev)
 388{
 389        struct rpmsg_device *rpdev = to_rpmsg_device(dev);
 390        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 391
 392        kfree(vch);
 393}
 394
 395/*
 396 * create an rpmsg channel using its name and address info.
 397 * this function will be used to create both static and dynamic
 398 * channels.
 399 */
 400static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
 401                                                 struct rpmsg_channel_info *chinfo)
 402{
 403        struct virtio_rpmsg_channel *vch;
 404        struct rpmsg_device *rpdev;
 405        struct device *tmp, *dev = &vrp->vdev->dev;
 406        int ret;
 407
 408        /* make sure a similar channel doesn't already exist */
 409        tmp = rpmsg_find_device(dev, chinfo);
 410        if (tmp) {
 411                /* decrement the matched device's refcount back */
 412                put_device(tmp);
 413                dev_err(dev, "channel %s:%x:%x already exist\n",
 414                                chinfo->name, chinfo->src, chinfo->dst);
 415                return NULL;
 416        }
 417
 418        vch = kzalloc(sizeof(*vch), GFP_KERNEL);
 419        if (!vch)
 420                return NULL;
 421
 422        /* Link the channel to our vrp */
 423        vch->vrp = vrp;
 424
 425        /* Assign public information to the rpmsg_device */
 426        rpdev = &vch->rpdev;
 427        rpdev->src = chinfo->src;
 428        rpdev->dst = chinfo->dst;
 429        rpdev->ops = &virtio_rpmsg_ops;
 430
 431        /*
 432         * rpmsg server channels has predefined local address (for now),
 433         * and their existence needs to be announced remotely
 434         */
 435        rpdev->announce = rpdev->src != RPMSG_ADDR_ANY;
 436
 437        strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
 438
 439        rpdev->dev.parent = &vrp->vdev->dev;
 440        rpdev->dev.release = virtio_rpmsg_release_device;
 441        ret = rpmsg_register_device(rpdev);
 442        if (ret)
 443                return NULL;
 444
 445        return rpdev;
 446}
 447
 448/* super simple buffer "allocator" that is just enough for now */
 449static void *get_a_tx_buf(struct virtproc_info *vrp)
 450{
 451        unsigned int len;
 452        void *ret;
 453
 454        /* support multiple concurrent senders */
 455        mutex_lock(&vrp->tx_lock);
 456
 457        /*
 458         * either pick the next unused tx buffer
 459         * (half of our buffers are used for sending messages)
 460         */
 461        if (vrp->last_sbuf < vrp->num_bufs / 2)
 462                ret = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++;
 463        /* or recycle a used one */
 464        else
 465                ret = virtqueue_get_buf(vrp->svq, &len);
 466
 467        mutex_unlock(&vrp->tx_lock);
 468
 469        return ret;
 470}
 471
 472/**
 473 * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
 474 * @vrp: virtual remote processor state
 475 *
 476 * This function is called before a sender is blocked, waiting for
 477 * a tx buffer to become available.
 478 *
 479 * If we already have blocking senders, this function merely increases
 480 * the "sleepers" reference count, and exits.
 481 *
 482 * Otherwise, if this is the first sender to block, we also enable
 483 * virtio's tx callbacks, so we'd be immediately notified when a tx
 484 * buffer is consumed (we rely on virtio's tx callback in order
 485 * to wake up sleeping senders as soon as a tx buffer is used by the
 486 * remote processor).
 487 */
 488static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
 489{
 490        /* support multiple concurrent senders */
 491        mutex_lock(&vrp->tx_lock);
 492
 493        /* are we the first sleeping context waiting for tx buffers ? */
 494        if (atomic_inc_return(&vrp->sleepers) == 1)
 495                /* enable "tx-complete" interrupts before dozing off */
 496                virtqueue_enable_cb(vrp->svq);
 497
 498        mutex_unlock(&vrp->tx_lock);
 499}
 500
 501/**
 502 * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
 503 * @vrp: virtual remote processor state
 504 *
 505 * This function is called after a sender, that waited for a tx buffer
 506 * to become available, is unblocked.
 507 *
 508 * If we still have blocking senders, this function merely decreases
 509 * the "sleepers" reference count, and exits.
 510 *
 511 * Otherwise, if there are no more blocking senders, we also disable
 512 * virtio's tx callbacks, to avoid the overhead incurred with handling
 513 * those (now redundant) interrupts.
 514 */
 515static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
 516{
 517        /* support multiple concurrent senders */
 518        mutex_lock(&vrp->tx_lock);
 519
 520        /* are we the last sleeping context waiting for tx buffers ? */
 521        if (atomic_dec_and_test(&vrp->sleepers))
 522                /* disable "tx-complete" interrupts */
 523                virtqueue_disable_cb(vrp->svq);
 524
 525        mutex_unlock(&vrp->tx_lock);
 526}
 527
 528/**
 529 * rpmsg_send_offchannel_raw() - send a message across to the remote processor
 530 * @rpdev: the rpmsg channel
 531 * @src: source address
 532 * @dst: destination address
 533 * @data: payload of message
 534 * @len: length of payload
 535 * @wait: indicates whether caller should block in case no TX buffers available
 536 *
 537 * This function is the base implementation for all of the rpmsg sending API.
 538 *
 539 * It will send @data of length @len to @dst, and say it's from @src. The
 540 * message will be sent to the remote processor which the @rpdev channel
 541 * belongs to.
 542 *
 543 * The message is sent using one of the TX buffers that are available for
 544 * communication with this remote processor.
 545 *
 546 * If @wait is true, the caller will be blocked until either a TX buffer is
 547 * available, or 15 seconds elapses (we don't want callers to
 548 * sleep indefinitely due to misbehaving remote processors), and in that
 549 * case -ERESTARTSYS is returned. The number '15' itself was picked
 550 * arbitrarily; there's little point in asking drivers to provide a timeout
 551 * value themselves.
 552 *
 553 * Otherwise, if @wait is false, and there are no TX buffers available,
 554 * the function will immediately fail, and -ENOMEM will be returned.
 555 *
 556 * Normally drivers shouldn't use this function directly; instead, drivers
 557 * should use the appropriate rpmsg_{try}send{to, _offchannel} API
 558 * (see include/linux/rpmsg.h).
 559 *
 560 * Returns 0 on success and an appropriate error value on failure.
 561 */
 562static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
 563                                     u32 src, u32 dst,
 564                                     void *data, int len, bool wait)
 565{
 566        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 567        struct virtproc_info *vrp = vch->vrp;
 568        struct device *dev = &rpdev->dev;
 569        struct scatterlist sg;
 570        struct rpmsg_hdr *msg;
 571        int err;
 572
 573        /* bcasting isn't allowed */
 574        if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
 575                dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
 576                return -EINVAL;
 577        }
 578
 579        /*
 580         * We currently use fixed-sized buffers, and therefore the payload
 581         * length is limited.
 582         *
 583         * One of the possible improvements here is either to support
 584         * user-provided buffers (and then we can also support zero-copy
 585         * messaging), or to improve the buffer allocator, to support
 586         * variable-length buffer sizes.
 587         */
 588        if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
 589                dev_err(dev, "message is too big (%d)\n", len);
 590                return -EMSGSIZE;
 591        }
 592
 593        /* grab a buffer */
 594        msg = get_a_tx_buf(vrp);
 595        if (!msg && !wait)
 596                return -ENOMEM;
 597
 598        /* no free buffer ? wait for one (but bail after 15 seconds) */
 599        while (!msg) {
 600                /* enable "tx-complete" interrupts, if not already enabled */
 601                rpmsg_upref_sleepers(vrp);
 602
 603                /*
 604                 * sleep until a free buffer is available or 15 secs elapse.
 605                 * the timeout period is not configurable because there's
 606                 * little point in asking drivers to specify that.
 607                 * if later this happens to be required, it'd be easy to add.
 608                 */
 609                err = wait_event_interruptible_timeout(vrp->sendq,
 610                                        (msg = get_a_tx_buf(vrp)),
 611                                        msecs_to_jiffies(15000));
 612
 613                /* disable "tx-complete" interrupts if we're the last sleeper */
 614                rpmsg_downref_sleepers(vrp);
 615
 616                /* timeout ? */
 617                if (!err) {
 618                        dev_err(dev, "timeout waiting for a tx buffer\n");
 619                        return -ERESTARTSYS;
 620                }
 621        }
 622
 623        msg->len = len;
 624        msg->flags = 0;
 625        msg->src = src;
 626        msg->dst = dst;
 627        msg->reserved = 0;
 628        memcpy(msg->data, data, len);
 629
 630        dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
 631                msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
 632#if defined(CONFIG_DYNAMIC_DEBUG)
 633        dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
 634                         msg, sizeof(*msg) + msg->len, true);
 635#endif
 636
 637        rpmsg_sg_init(&sg, msg, sizeof(*msg) + len);
 638
 639        mutex_lock(&vrp->tx_lock);
 640
 641        /* add message to the remote processor's virtqueue */
 642        err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
 643        if (err) {
 644                /*
 645                 * need to reclaim the buffer here, otherwise it's lost
 646                 * (memory won't leak, but rpmsg won't use it again for TX).
 647                 * this will wait for a buffer management overhaul.
 648                 */
 649                dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err);
 650                goto out;
 651        }
 652
 653        /* tell the remote processor it has a pending message to read */
 654        virtqueue_kick(vrp->svq);
 655out:
 656        mutex_unlock(&vrp->tx_lock);
 657        return err;
 658}
 659
 660static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
 661{
 662        struct rpmsg_device *rpdev = ept->rpdev;
 663        u32 src = ept->addr, dst = rpdev->dst;
 664
 665        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
 666}
 667
 668static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
 669                               u32 dst)
 670{
 671        struct rpmsg_device *rpdev = ept->rpdev;
 672        u32 src = ept->addr;
 673
 674        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
 675}
 676
 677static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
 678                                        u32 dst, void *data, int len)
 679{
 680        struct rpmsg_device *rpdev = ept->rpdev;
 681
 682        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
 683}
 684
 685static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
 686{
 687        struct rpmsg_device *rpdev = ept->rpdev;
 688        u32 src = ept->addr, dst = rpdev->dst;
 689
 690        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
 691}
 692
 693static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
 694                                  int len, u32 dst)
 695{
 696        struct rpmsg_device *rpdev = ept->rpdev;
 697        u32 src = ept->addr;
 698
 699        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
 700}
 701
 702static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
 703                                           u32 dst, void *data, int len)
 704{
 705        struct rpmsg_device *rpdev = ept->rpdev;
 706
 707        return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
 708}
 709
 710static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
 711                             struct rpmsg_hdr *msg, unsigned int len)
 712{
 713        struct rpmsg_endpoint *ept;
 714        struct scatterlist sg;
 715        int err;
 716
 717        dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
 718                msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
 719#if defined(CONFIG_DYNAMIC_DEBUG)
 720        dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
 721                         msg, sizeof(*msg) + msg->len, true);
 722#endif
 723
 724        /*
 725         * We currently use fixed-sized buffers, so trivially sanitize
 726         * the reported payload length.
 727         */
 728        if (len > vrp->buf_size ||
 729            msg->len > (len - sizeof(struct rpmsg_hdr))) {
 730                dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
 731                return -EINVAL;
 732        }
 733
 734        /* use the dst addr to fetch the callback of the appropriate user */
 735        mutex_lock(&vrp->endpoints_lock);
 736
 737        ept = idr_find(&vrp->endpoints, msg->dst);
 738
 739        /* let's make sure no one deallocates ept while we use it */
 740        if (ept)
 741                kref_get(&ept->refcount);
 742
 743        mutex_unlock(&vrp->endpoints_lock);
 744
 745        if (ept) {
 746                /* make sure ept->cb doesn't go away while we use it */
 747                mutex_lock(&ept->cb_lock);
 748
 749                if (ept->cb)
 750                        ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
 751                                msg->src);
 752
 753                mutex_unlock(&ept->cb_lock);
 754
 755                /* farewell, ept, we don't need you anymore */
 756                kref_put(&ept->refcount, __ept_release);
 757        } else
 758                dev_warn(dev, "msg received with no recipient\n");
 759
 760        /* publish the real size of the buffer */
 761        rpmsg_sg_init(&sg, msg, vrp->buf_size);
 762
 763        /* add the buffer back to the remote processor's virtqueue */
 764        err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
 765        if (err < 0) {
 766                dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
 767                return err;
 768        }
 769
 770        return 0;
 771}
 772
 773/* called when an rx buffer is used, and it's time to digest a message */
 774static void rpmsg_recv_done(struct virtqueue *rvq)
 775{
 776        struct virtproc_info *vrp = rvq->vdev->priv;
 777        struct device *dev = &rvq->vdev->dev;
 778        struct rpmsg_hdr *msg;
 779        unsigned int len, msgs_received = 0;
 780        int err;
 781
 782        msg = virtqueue_get_buf(rvq, &len);
 783        if (!msg) {
 784                dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
 785                return;
 786        }
 787
 788        while (msg) {
 789                err = rpmsg_recv_single(vrp, dev, msg, len);
 790                if (err)
 791                        break;
 792
 793                msgs_received++;
 794
 795                msg = virtqueue_get_buf(rvq, &len);
 796        }
 797
 798        dev_dbg(dev, "Received %u messages\n", msgs_received);
 799
 800        /* tell the remote processor we added another available rx buffer */
 801        if (msgs_received)
 802                virtqueue_kick(vrp->rvq);
 803}
 804
 805/*
 806 * This is invoked whenever the remote processor completed processing
 807 * a TX msg we just sent it, and the buffer is put back to the used ring.
 808 *
 809 * Normally, though, we suppress this "tx complete" interrupt in order to
 810 * avoid the incurred overhead.
 811 */
 812static void rpmsg_xmit_done(struct virtqueue *svq)
 813{
 814        struct virtproc_info *vrp = svq->vdev->priv;
 815
 816        dev_dbg(&svq->vdev->dev, "%s\n", __func__);
 817
 818        /* wake up potential senders that are waiting for a tx buffer */
 819        wake_up_interruptible(&vrp->sendq);
 820}
 821
 822/* invoked when a name service announcement arrives */
 823static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
 824                       void *priv, u32 src)
 825{
 826        struct rpmsg_ns_msg *msg = data;
 827        struct rpmsg_device *newch;
 828        struct rpmsg_channel_info chinfo;
 829        struct virtproc_info *vrp = priv;
 830        struct device *dev = &vrp->vdev->dev;
 831        int ret;
 832
 833#if defined(CONFIG_DYNAMIC_DEBUG)
 834        dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
 835                         data, len, true);
 836#endif
 837
 838        if (len != sizeof(*msg)) {
 839                dev_err(dev, "malformed ns msg (%d)\n", len);
 840                return -EINVAL;
 841        }
 842
 843        /*
 844         * the name service ept does _not_ belong to a real rpmsg channel,
 845         * and is handled by the rpmsg bus itself.
 846         * for sanity reasons, make sure a valid rpdev has _not_ sneaked
 847         * in somehow.
 848         */
 849        if (rpdev) {
 850                dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
 851                return -EINVAL;
 852        }
 853
 854        /* don't trust the remote processor for null terminating the name */
 855        msg->name[RPMSG_NAME_SIZE - 1] = '\0';
 856
 857        dev_info(dev, "%sing channel %s addr 0x%x\n",
 858                 msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
 859                 msg->name, msg->addr);
 860
 861        strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
 862        chinfo.src = RPMSG_ADDR_ANY;
 863        chinfo.dst = msg->addr;
 864
 865        if (msg->flags & RPMSG_NS_DESTROY) {
 866                ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
 867                if (ret)
 868                        dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
 869        } else {
 870                newch = rpmsg_create_channel(vrp, &chinfo);
 871                if (!newch)
 872                        dev_err(dev, "rpmsg_create_channel failed\n");
 873        }
 874
 875        return 0;
 876}
 877
 878static int rpmsg_probe(struct virtio_device *vdev)
 879{
 880        vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
 881        static const char * const names[] = { "input", "output" };
 882        struct virtqueue *vqs[2];
 883        struct virtproc_info *vrp;
 884        void *bufs_va;
 885        int err = 0, i;
 886        size_t total_buf_space;
 887        bool notify;
 888
 889        vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 890        if (!vrp)
 891                return -ENOMEM;
 892
 893        vrp->vdev = vdev;
 894
 895        idr_init(&vrp->endpoints);
 896        mutex_init(&vrp->endpoints_lock);
 897        mutex_init(&vrp->tx_lock);
 898        init_waitqueue_head(&vrp->sendq);
 899
 900        /* We expect two virtqueues, rx and tx (and in this order) */
 901        err = virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL);
 902        if (err)
 903                goto free_vrp;
 904
 905        vrp->rvq = vqs[0];
 906        vrp->svq = vqs[1];
 907
 908        /* we expect symmetric tx/rx vrings */
 909        WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
 910                virtqueue_get_vring_size(vrp->svq));
 911
 912        /* we need less buffers if vrings are small */
 913        if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
 914                vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
 915        else
 916                vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
 917
 918        vrp->buf_size = MAX_RPMSG_BUF_SIZE;
 919
 920        total_buf_space = vrp->num_bufs * vrp->buf_size;
 921
 922        /* allocate coherent memory for the buffers */
 923        bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
 924                                     total_buf_space, &vrp->bufs_dma,
 925                                     GFP_KERNEL);
 926        if (!bufs_va) {
 927                err = -ENOMEM;
 928                goto vqs_del;
 929        }
 930
 931        dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n",
 932                bufs_va, &vrp->bufs_dma);
 933
 934        /* half of the buffers is dedicated for RX */
 935        vrp->rbufs = bufs_va;
 936
 937        /* and half is dedicated for TX */
 938        vrp->sbufs = bufs_va + total_buf_space / 2;
 939
 940        /* set up the receive buffers */
 941        for (i = 0; i < vrp->num_bufs / 2; i++) {
 942                struct scatterlist sg;
 943                void *cpu_addr = vrp->rbufs + i * vrp->buf_size;
 944
 945                rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size);
 946
 947                err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
 948                                          GFP_KERNEL);
 949                WARN_ON(err); /* sanity check; this can't really happen */
 950        }
 951
 952        /* suppress "tx-complete" interrupts */
 953        virtqueue_disable_cb(vrp->svq);
 954
 955        vdev->priv = vrp;
 956
 957        /* if supported by the remote processor, enable the name service */
 958        if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
 959                /* a dedicated endpoint handles the name service msgs */
 960                vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
 961                                                vrp, RPMSG_NS_ADDR);
 962                if (!vrp->ns_ept) {
 963                        dev_err(&vdev->dev, "failed to create the ns ept\n");
 964                        err = -ENOMEM;
 965                        goto free_coherent;
 966                }
 967        }
 968
 969        /*
 970         * Prepare to kick but don't notify yet - we can't do this before
 971         * device is ready.
 972         */
 973        notify = virtqueue_kick_prepare(vrp->rvq);
 974
 975        /* From this point on, we can notify and get callbacks. */
 976        virtio_device_ready(vdev);
 977
 978        /* tell the remote processor it can start sending messages */
 979        /*
 980         * this might be concurrent with callbacks, but we are only
 981         * doing notify, not a full kick here, so that's ok.
 982         */
 983        if (notify)
 984                virtqueue_notify(vrp->rvq);
 985
 986        dev_info(&vdev->dev, "rpmsg host is online\n");
 987
 988        return 0;
 989
 990free_coherent:
 991        dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
 992                          bufs_va, vrp->bufs_dma);
 993vqs_del:
 994        vdev->config->del_vqs(vrp->vdev);
 995free_vrp:
 996        kfree(vrp);
 997        return err;
 998}
 999
1000static int rpmsg_remove_device(struct device *dev, void *data)
1001{
1002        device_unregister(dev);
1003
1004        return 0;
1005}
1006
1007static void rpmsg_remove(struct virtio_device *vdev)
1008{
1009        struct virtproc_info *vrp = vdev->priv;
1010        size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
1011        int ret;
1012
1013        vdev->config->reset(vdev);
1014
1015        ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
1016        if (ret)
1017                dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
1018
1019        if (vrp->ns_ept)
1020                __rpmsg_destroy_ept(vrp, vrp->ns_ept);
1021
1022        idr_destroy(&vrp->endpoints);
1023
1024        vdev->config->del_vqs(vrp->vdev);
1025
1026        dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
1027                          vrp->rbufs, vrp->bufs_dma);
1028
1029        kfree(vrp);
1030}
1031
1032static struct virtio_device_id id_table[] = {
1033        { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
1034        { 0 },
1035};
1036
1037static unsigned int features[] = {
1038        VIRTIO_RPMSG_F_NS,
1039};
1040
1041static struct virtio_driver virtio_ipc_driver = {
1042        .feature_table  = features,
1043        .feature_table_size = ARRAY_SIZE(features),
1044        .driver.name    = KBUILD_MODNAME,
1045        .driver.owner   = THIS_MODULE,
1046        .id_table       = id_table,
1047        .probe          = rpmsg_probe,
1048        .remove         = rpmsg_remove,
1049};
1050
1051static int __init rpmsg_init(void)
1052{
1053        int ret;
1054
1055        ret = register_virtio_driver(&virtio_ipc_driver);
1056        if (ret)
1057                pr_err("failed to register virtio driver: %d\n", ret);
1058
1059        return ret;
1060}
1061subsys_initcall(rpmsg_init);
1062
1063static void __exit rpmsg_fini(void)
1064{
1065        unregister_virtio_driver(&virtio_ipc_driver);
1066}
1067module_exit(rpmsg_fini);
1068
1069MODULE_DEVICE_TABLE(virtio, id_table);
1070MODULE_DESCRIPTION("Virtio-based remote processor messaging bus");
1071MODULE_LICENSE("GPL v2");
1072