linux/drivers/remoteproc/remoteproc_virtio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Remote processor messaging transport (OMAP platform-specific bits)
   4 *
   5 * Copyright (C) 2011 Texas Instruments, Inc.
   6 * Copyright (C) 2011 Google, Inc.
   7 *
   8 * Ohad Ben-Cohen <ohad@wizery.com>
   9 * Brian Swetland <swetland@google.com>
  10 */
  11
  12#include <linux/dma-map-ops.h>
  13#include <linux/export.h>
  14#include <linux/of_reserved_mem.h>
  15#include <linux/remoteproc.h>
  16#include <linux/virtio.h>
  17#include <linux/virtio_config.h>
  18#include <linux/virtio_ids.h>
  19#include <linux/virtio_ring.h>
  20#include <linux/err.h>
  21#include <linux/kref.h>
  22#include <linux/slab.h>
  23
  24#include "remoteproc_internal.h"
  25
  26/* kick the remote processor, and let it know which virtqueue to poke at */
  27static bool rproc_virtio_notify(struct virtqueue *vq)
  28{
  29        struct rproc_vring *rvring = vq->priv;
  30        struct rproc *rproc = rvring->rvdev->rproc;
  31        int notifyid = rvring->notifyid;
  32
  33        dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
  34
  35        rproc->ops->kick(rproc, notifyid);
  36        return true;
  37}
  38
  39/**
  40 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
  41 * @rproc: handle to the remote processor
  42 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
  43 *
  44 * This function should be called by the platform-specific rproc driver,
  45 * when the remote processor signals that a specific virtqueue has pending
  46 * messages available.
  47 *
  48 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
  49 * and otherwise returns IRQ_HANDLED.
  50 */
  51irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
  52{
  53        struct rproc_vring *rvring;
  54
  55        dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
  56
  57        rvring = idr_find(&rproc->notifyids, notifyid);
  58        if (!rvring || !rvring->vq)
  59                return IRQ_NONE;
  60
  61        return vring_interrupt(0, rvring->vq);
  62}
  63EXPORT_SYMBOL(rproc_vq_interrupt);
  64
  65static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
  66                                    unsigned int id,
  67                                    void (*callback)(struct virtqueue *vq),
  68                                    const char *name, bool ctx)
  69{
  70        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  71        struct rproc *rproc = vdev_to_rproc(vdev);
  72        struct device *dev = &rproc->dev;
  73        struct rproc_mem_entry *mem;
  74        struct rproc_vring *rvring;
  75        struct fw_rsc_vdev *rsc;
  76        struct virtqueue *vq;
  77        void *addr;
  78        int len, size;
  79
  80        /* we're temporarily limited to two virtqueues per rvdev */
  81        if (id >= ARRAY_SIZE(rvdev->vring))
  82                return ERR_PTR(-EINVAL);
  83
  84        if (!name)
  85                return NULL;
  86
  87        /* Search allocated memory region by name */
  88        mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
  89                                          id);
  90        if (!mem || !mem->va)
  91                return ERR_PTR(-ENOMEM);
  92
  93        rvring = &rvdev->vring[id];
  94        addr = mem->va;
  95        len = rvring->len;
  96
  97        /* zero vring */
  98        size = vring_size(len, rvring->align);
  99        memset(addr, 0, size);
 100
 101        dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
 102                id, addr, len, rvring->notifyid);
 103
 104        /*
 105         * Create the new vq, and tell virtio we're not interested in
 106         * the 'weak' smp barriers, since we're talking with a real device.
 107         */
 108        vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
 109                                 addr, rproc_virtio_notify, callback, name);
 110        if (!vq) {
 111                dev_err(dev, "vring_new_virtqueue %s failed\n", name);
 112                rproc_free_vring(rvring);
 113                return ERR_PTR(-ENOMEM);
 114        }
 115
 116        rvring->vq = vq;
 117        vq->priv = rvring;
 118
 119        /* Update vring in resource table */
 120        rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
 121        rsc->vring[id].da = mem->da;
 122
 123        return vq;
 124}
 125
 126static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
 127{
 128        struct virtqueue *vq, *n;
 129        struct rproc_vring *rvring;
 130
 131        list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
 132                rvring = vq->priv;
 133                rvring->vq = NULL;
 134                vring_del_virtqueue(vq);
 135        }
 136}
 137
 138static void rproc_virtio_del_vqs(struct virtio_device *vdev)
 139{
 140        __rproc_virtio_del_vqs(vdev);
 141}
 142
 143static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
 144                                 struct virtqueue *vqs[],
 145                                 vq_callback_t *callbacks[],
 146                                 const char * const names[],
 147                                 const bool * ctx,
 148                                 struct irq_affinity *desc)
 149{
 150        int i, ret, queue_idx = 0;
 151
 152        for (i = 0; i < nvqs; ++i) {
 153                if (!names[i]) {
 154                        vqs[i] = NULL;
 155                        continue;
 156                }
 157
 158                vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
 159                                    ctx ? ctx[i] : false);
 160                if (IS_ERR(vqs[i])) {
 161                        ret = PTR_ERR(vqs[i]);
 162                        goto error;
 163                }
 164        }
 165
 166        return 0;
 167
 168error:
 169        __rproc_virtio_del_vqs(vdev);
 170        return ret;
 171}
 172
 173static u8 rproc_virtio_get_status(struct virtio_device *vdev)
 174{
 175        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 176        struct fw_rsc_vdev *rsc;
 177
 178        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 179
 180        return rsc->status;
 181}
 182
 183static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
 184{
 185        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 186        struct fw_rsc_vdev *rsc;
 187
 188        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 189
 190        rsc->status = status;
 191        dev_dbg(&vdev->dev, "status: %d\n", status);
 192}
 193
 194static void rproc_virtio_reset(struct virtio_device *vdev)
 195{
 196        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 197        struct fw_rsc_vdev *rsc;
 198
 199        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 200
 201        rsc->status = 0;
 202        dev_dbg(&vdev->dev, "reset !\n");
 203}
 204
 205/* provide the vdev features as retrieved from the firmware */
 206static u64 rproc_virtio_get_features(struct virtio_device *vdev)
 207{
 208        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 209        struct fw_rsc_vdev *rsc;
 210
 211        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 212
 213        return rsc->dfeatures;
 214}
 215
 216static void rproc_transport_features(struct virtio_device *vdev)
 217{
 218        /*
 219         * Packed ring isn't enabled on remoteproc for now,
 220         * because remoteproc uses vring_new_virtqueue() which
 221         * creates virtio rings on preallocated memory.
 222         */
 223        __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
 224}
 225
 226static int rproc_virtio_finalize_features(struct virtio_device *vdev)
 227{
 228        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 229        struct fw_rsc_vdev *rsc;
 230
 231        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 232
 233        /* Give virtio_ring a chance to accept features */
 234        vring_transport_features(vdev);
 235
 236        /* Give virtio_rproc a chance to accept features. */
 237        rproc_transport_features(vdev);
 238
 239        /* Make sure we don't have any features > 32 bits! */
 240        BUG_ON((u32)vdev->features != vdev->features);
 241
 242        /*
 243         * Remember the finalized features of our vdev, and provide it
 244         * to the remote processor once it is powered on.
 245         */
 246        rsc->gfeatures = vdev->features;
 247
 248        return 0;
 249}
 250
 251static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
 252                             void *buf, unsigned int len)
 253{
 254        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 255        struct fw_rsc_vdev *rsc;
 256        void *cfg;
 257
 258        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 259        cfg = &rsc->vring[rsc->num_of_vrings];
 260
 261        if (offset + len > rsc->config_len || offset + len < len) {
 262                dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
 263                return;
 264        }
 265
 266        memcpy(buf, cfg + offset, len);
 267}
 268
 269static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
 270                             const void *buf, unsigned int len)
 271{
 272        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 273        struct fw_rsc_vdev *rsc;
 274        void *cfg;
 275
 276        rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
 277        cfg = &rsc->vring[rsc->num_of_vrings];
 278
 279        if (offset + len > rsc->config_len || offset + len < len) {
 280                dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
 281                return;
 282        }
 283
 284        memcpy(cfg + offset, buf, len);
 285}
 286
 287static const struct virtio_config_ops rproc_virtio_config_ops = {
 288        .get_features   = rproc_virtio_get_features,
 289        .finalize_features = rproc_virtio_finalize_features,
 290        .find_vqs       = rproc_virtio_find_vqs,
 291        .del_vqs        = rproc_virtio_del_vqs,
 292        .reset          = rproc_virtio_reset,
 293        .set_status     = rproc_virtio_set_status,
 294        .get_status     = rproc_virtio_get_status,
 295        .get            = rproc_virtio_get,
 296        .set            = rproc_virtio_set,
 297};
 298
 299/*
 300 * This function is called whenever vdev is released, and is responsible
 301 * to decrement the remote processor's refcount which was taken when vdev was
 302 * added.
 303 *
 304 * Never call this function directly; it will be called by the driver
 305 * core when needed.
 306 */
 307static void rproc_virtio_dev_release(struct device *dev)
 308{
 309        struct virtio_device *vdev = dev_to_virtio(dev);
 310        struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
 311        struct rproc *rproc = vdev_to_rproc(vdev);
 312
 313        kfree(vdev);
 314
 315        kref_put(&rvdev->refcount, rproc_vdev_release);
 316
 317        put_device(&rproc->dev);
 318}
 319
 320/**
 321 * rproc_add_virtio_dev() - register an rproc-induced virtio device
 322 * @rvdev: the remote vdev
 323 * @id: the device type identification (used to match it with a driver).
 324 *
 325 * This function registers a virtio device. This vdev's partent is
 326 * the rproc device.
 327 *
 328 * Return: 0 on success or an appropriate error value otherwise
 329 */
 330int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
 331{
 332        struct rproc *rproc = rvdev->rproc;
 333        struct device *dev = &rvdev->dev;
 334        struct virtio_device *vdev;
 335        struct rproc_mem_entry *mem;
 336        int ret;
 337
 338        if (rproc->ops->kick == NULL) {
 339                ret = -EINVAL;
 340                dev_err(dev, ".kick method not defined for %s\n", rproc->name);
 341                goto out;
 342        }
 343
 344        /* Try to find dedicated vdev buffer carveout */
 345        mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
 346        if (mem) {
 347                phys_addr_t pa;
 348
 349                if (mem->of_resm_idx != -1) {
 350                        struct device_node *np = rproc->dev.parent->of_node;
 351
 352                        /* Associate reserved memory to vdev device */
 353                        ret = of_reserved_mem_device_init_by_idx(dev, np,
 354                                                                 mem->of_resm_idx);
 355                        if (ret) {
 356                                dev_err(dev, "Can't associate reserved memory\n");
 357                                goto out;
 358                        }
 359                } else {
 360                        if (mem->va) {
 361                                dev_warn(dev, "vdev %d buffer already mapped\n",
 362                                         rvdev->index);
 363                                pa = rproc_va_to_pa(mem->va);
 364                        } else {
 365                                /* Use dma address as carveout no memmapped yet */
 366                                pa = (phys_addr_t)mem->dma;
 367                        }
 368
 369                        /* Associate vdev buffer memory pool to vdev subdev */
 370                        ret = dma_declare_coherent_memory(dev, pa,
 371                                                           mem->da,
 372                                                           mem->len);
 373                        if (ret < 0) {
 374                                dev_err(dev, "Failed to associate buffer\n");
 375                                goto out;
 376                        }
 377                }
 378        } else {
 379                struct device_node *np = rproc->dev.parent->of_node;
 380
 381                /*
 382                 * If we don't have dedicated buffer, just attempt to re-assign
 383                 * the reserved memory from our parent. A default memory-region
 384                 * at index 0 from the parent's memory-regions is assigned for
 385                 * the rvdev dev to allocate from. Failure is non-critical and
 386                 * the allocations will fall back to global pools, so don't
 387                 * check return value either.
 388                 */
 389                of_reserved_mem_device_init_by_idx(dev, np, 0);
 390        }
 391
 392        /* Allocate virtio device */
 393        vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
 394        if (!vdev) {
 395                ret = -ENOMEM;
 396                goto out;
 397        }
 398        vdev->id.device = id,
 399        vdev->config = &rproc_virtio_config_ops,
 400        vdev->dev.parent = dev;
 401        vdev->dev.release = rproc_virtio_dev_release;
 402
 403        /*
 404         * We're indirectly making a non-temporary copy of the rproc pointer
 405         * here, because drivers probed with this vdev will indirectly
 406         * access the wrapping rproc.
 407         *
 408         * Therefore we must increment the rproc refcount here, and decrement
 409         * it _only_ when the vdev is released.
 410         */
 411        get_device(&rproc->dev);
 412
 413        /* Reference the vdev and vring allocations */
 414        kref_get(&rvdev->refcount);
 415
 416        ret = register_virtio_device(vdev);
 417        if (ret) {
 418                put_device(&vdev->dev);
 419                dev_err(dev, "failed to register vdev: %d\n", ret);
 420                goto out;
 421        }
 422
 423        dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
 424
 425out:
 426        return ret;
 427}
 428
 429/**
 430 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
 431 * @dev: the virtio device
 432 * @data: must be null
 433 *
 434 * This function unregisters an existing virtio device.
 435 *
 436 * Return: 0
 437 */
 438int rproc_remove_virtio_dev(struct device *dev, void *data)
 439{
 440        struct virtio_device *vdev = dev_to_virtio(dev);
 441
 442        unregister_virtio_device(vdev);
 443        return 0;
 444}
 445