linux/drivers/misc/fastrpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
   3// Copyright (c) 2018, Linaro Limited
   4
   5#include <linux/completion.h>
   6#include <linux/device.h>
   7#include <linux/dma-buf.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/idr.h>
  10#include <linux/list.h>
  11#include <linux/miscdevice.h>
  12#include <linux/module.h>
  13#include <linux/of_address.h>
  14#include <linux/of.h>
  15#include <linux/of_platform.h>
  16#include <linux/rpmsg.h>
  17#include <linux/scatterlist.h>
  18#include <linux/slab.h>
  19#include <uapi/misc/fastrpc.h>
  20
  21#define ADSP_DOMAIN_ID (0)
  22#define MDSP_DOMAIN_ID (1)
  23#define SDSP_DOMAIN_ID (2)
  24#define CDSP_DOMAIN_ID (3)
  25#define FASTRPC_DEV_MAX         4 /* adsp, mdsp, slpi, cdsp*/
  26#define FASTRPC_MAX_SESSIONS    9 /*8 compute, 1 cpz*/
  27#define FASTRPC_ALIGN           128
  28#define FASTRPC_MAX_FDLIST      16
  29#define FASTRPC_MAX_CRCLIST     64
  30#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
  31#define FASTRPC_CTX_MAX (256)
  32#define FASTRPC_INIT_HANDLE     1
  33#define FASTRPC_CTXID_MASK (0xFF0)
  34#define INIT_FILELEN_MAX (2 * 1024 * 1024)
  35#define INIT_MEMLEN_MAX  (8 * 1024 * 1024)
  36#define FASTRPC_DEVICE_NAME     "fastrpc"
  37
  38/* Retrives number of input buffers from the scalars parameter */
  39#define REMOTE_SCALARS_INBUFS(sc)       (((sc) >> 16) & 0x0ff)
  40
  41/* Retrives number of output buffers from the scalars parameter */
  42#define REMOTE_SCALARS_OUTBUFS(sc)      (((sc) >> 8) & 0x0ff)
  43
  44/* Retrives number of input handles from the scalars parameter */
  45#define REMOTE_SCALARS_INHANDLES(sc)    (((sc) >> 4) & 0x0f)
  46
  47/* Retrives number of output handles from the scalars parameter */
  48#define REMOTE_SCALARS_OUTHANDLES(sc)   ((sc) & 0x0f)
  49
  50#define REMOTE_SCALARS_LENGTH(sc)       (REMOTE_SCALARS_INBUFS(sc) +   \
  51                                         REMOTE_SCALARS_OUTBUFS(sc) +  \
  52                                         REMOTE_SCALARS_INHANDLES(sc)+ \
  53                                         REMOTE_SCALARS_OUTHANDLES(sc))
  54#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout)  \
  55                                (((attr & 0x07) << 29) |                \
  56                                ((method & 0x1f) << 24) |       \
  57                                ((in & 0xff) << 16) |           \
  58                                ((out & 0xff) <<  8) |          \
  59                                ((oin & 0x0f) <<  4) |          \
  60                                (oout & 0x0f))
  61
  62#define FASTRPC_SCALARS(method, in, out) \
  63                FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
  64
  65#define FASTRPC_CREATE_PROCESS_NARGS    6
  66/* Remote Method id table */
  67#define FASTRPC_RMID_INIT_ATTACH        0
  68#define FASTRPC_RMID_INIT_RELEASE       1
  69#define FASTRPC_RMID_INIT_CREATE        6
  70#define FASTRPC_RMID_INIT_CREATE_ATTR   7
  71#define FASTRPC_RMID_INIT_CREATE_STATIC 8
  72
  73#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
  74
  75static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
  76                                                "sdsp", "cdsp"};
  77struct fastrpc_phy_page {
  78        u64 addr;               /* physical address */
  79        u64 size;               /* size of contiguous region */
  80};
  81
  82struct fastrpc_invoke_buf {
  83        u32 num;                /* number of contiguous regions */
  84        u32 pgidx;              /* index to start of contiguous region */
  85};
  86
  87struct fastrpc_remote_arg {
  88        u64 pv;
  89        u64 len;
  90};
  91
  92struct fastrpc_msg {
  93        int pid;                /* process group id */
  94        int tid;                /* thread id */
  95        u64 ctx;                /* invoke caller context */
  96        u32 handle;     /* handle to invoke */
  97        u32 sc;         /* scalars structure describing the data */
  98        u64 addr;               /* physical address */
  99        u64 size;               /* size of contiguous region */
 100};
 101
 102struct fastrpc_invoke_rsp {
 103        u64 ctx;                /* invoke caller context */
 104        int retval;             /* invoke return value */
 105};
 106
 107struct fastrpc_buf {
 108        struct fastrpc_user *fl;
 109        struct dma_buf *dmabuf;
 110        struct device *dev;
 111        void *virt;
 112        u64 phys;
 113        u64 size;
 114        /* Lock for dma buf attachments */
 115        struct mutex lock;
 116        struct list_head attachments;
 117};
 118
 119struct fastrpc_dma_buf_attachment {
 120        struct device *dev;
 121        struct sg_table sgt;
 122        struct list_head node;
 123};
 124
 125struct fastrpc_map {
 126        struct list_head node;
 127        struct fastrpc_user *fl;
 128        int fd;
 129        struct dma_buf *buf;
 130        struct sg_table *table;
 131        struct dma_buf_attachment *attach;
 132        u64 phys;
 133        u64 size;
 134        void *va;
 135        u64 len;
 136        struct kref refcount;
 137};
 138
 139struct fastrpc_invoke_ctx {
 140        int nscalars;
 141        int nbufs;
 142        int retval;
 143        int pid;
 144        int tgid;
 145        u32 sc;
 146        u32 *crc;
 147        u64 ctxid;
 148        u64 msg_sz;
 149        struct kref refcount;
 150        struct list_head node; /* list of ctxs */
 151        struct completion work;
 152        struct fastrpc_msg msg;
 153        struct fastrpc_user *fl;
 154        struct fastrpc_remote_arg *rpra;
 155        struct fastrpc_map **maps;
 156        struct fastrpc_buf *buf;
 157        struct fastrpc_invoke_args *args;
 158        struct fastrpc_channel_ctx *cctx;
 159};
 160
 161struct fastrpc_session_ctx {
 162        struct device *dev;
 163        int sid;
 164        bool used;
 165        bool valid;
 166};
 167
 168struct fastrpc_channel_ctx {
 169        int domain_id;
 170        int sesscount;
 171        struct rpmsg_device *rpdev;
 172        struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
 173        spinlock_t lock;
 174        struct idr ctx_idr;
 175        struct list_head users;
 176        struct miscdevice miscdev;
 177};
 178
 179struct fastrpc_user {
 180        struct list_head user;
 181        struct list_head maps;
 182        struct list_head pending;
 183
 184        struct fastrpc_channel_ctx *cctx;
 185        struct fastrpc_session_ctx *sctx;
 186        struct fastrpc_buf *init_mem;
 187
 188        int tgid;
 189        int pd;
 190        /* Lock for lists */
 191        spinlock_t lock;
 192        /* lock for allocations */
 193        struct mutex mutex;
 194};
 195
 196static void fastrpc_free_map(struct kref *ref)
 197{
 198        struct fastrpc_map *map;
 199
 200        map = container_of(ref, struct fastrpc_map, refcount);
 201
 202        if (map->table) {
 203                dma_buf_unmap_attachment(map->attach, map->table,
 204                                         DMA_BIDIRECTIONAL);
 205                dma_buf_detach(map->buf, map->attach);
 206                dma_buf_put(map->buf);
 207        }
 208
 209        kfree(map);
 210}
 211
 212static void fastrpc_map_put(struct fastrpc_map *map)
 213{
 214        if (map)
 215                kref_put(&map->refcount, fastrpc_free_map);
 216}
 217
 218static void fastrpc_map_get(struct fastrpc_map *map)
 219{
 220        if (map)
 221                kref_get(&map->refcount);
 222}
 223
 224static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
 225                            struct fastrpc_map **ppmap)
 226{
 227        struct fastrpc_map *map = NULL;
 228
 229        mutex_lock(&fl->mutex);
 230        list_for_each_entry(map, &fl->maps, node) {
 231                if (map->fd == fd) {
 232                        fastrpc_map_get(map);
 233                        *ppmap = map;
 234                        mutex_unlock(&fl->mutex);
 235                        return 0;
 236                }
 237        }
 238        mutex_unlock(&fl->mutex);
 239
 240        return -ENOENT;
 241}
 242
 243static void fastrpc_buf_free(struct fastrpc_buf *buf)
 244{
 245        dma_free_coherent(buf->dev, buf->size, buf->virt,
 246                          FASTRPC_PHYS(buf->phys));
 247        kfree(buf);
 248}
 249
 250static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
 251                             u64 size, struct fastrpc_buf **obuf)
 252{
 253        struct fastrpc_buf *buf;
 254
 255        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 256        if (!buf)
 257                return -ENOMEM;
 258
 259        INIT_LIST_HEAD(&buf->attachments);
 260        mutex_init(&buf->lock);
 261
 262        buf->fl = fl;
 263        buf->virt = NULL;
 264        buf->phys = 0;
 265        buf->size = size;
 266        buf->dev = dev;
 267
 268        buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
 269                                       GFP_KERNEL);
 270        if (!buf->virt)
 271                return -ENOMEM;
 272
 273        if (fl->sctx && fl->sctx->sid)
 274                buf->phys += ((u64)fl->sctx->sid << 32);
 275
 276        *obuf = buf;
 277
 278        return 0;
 279}
 280
 281static void fastrpc_context_free(struct kref *ref)
 282{
 283        struct fastrpc_invoke_ctx *ctx;
 284        struct fastrpc_channel_ctx *cctx;
 285        int i;
 286
 287        ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
 288        cctx = ctx->cctx;
 289
 290        for (i = 0; i < ctx->nscalars; i++)
 291                fastrpc_map_put(ctx->maps[i]);
 292
 293        if (ctx->buf)
 294                fastrpc_buf_free(ctx->buf);
 295
 296        spin_lock(&cctx->lock);
 297        idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
 298        spin_unlock(&cctx->lock);
 299
 300        kfree(ctx->maps);
 301        kfree(ctx);
 302}
 303
 304static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
 305{
 306        kref_get(&ctx->refcount);
 307}
 308
 309static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
 310{
 311        kref_put(&ctx->refcount, fastrpc_context_free);
 312}
 313
 314static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
 315                        struct fastrpc_user *user, u32 kernel, u32 sc,
 316                        struct fastrpc_invoke_args *args)
 317{
 318        struct fastrpc_channel_ctx *cctx = user->cctx;
 319        struct fastrpc_invoke_ctx *ctx = NULL;
 320        int ret;
 321
 322        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 323        if (!ctx)
 324                return ERR_PTR(-ENOMEM);
 325
 326        INIT_LIST_HEAD(&ctx->node);
 327        ctx->fl = user;
 328        ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
 329        ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
 330                     REMOTE_SCALARS_OUTBUFS(sc);
 331
 332        if (ctx->nscalars) {
 333                ctx->maps = kcalloc(ctx->nscalars,
 334                                    sizeof(*ctx->maps), GFP_KERNEL);
 335                if (!ctx->maps) {
 336                        kfree(ctx);
 337                        return ERR_PTR(-ENOMEM);
 338                }
 339                ctx->args = args;
 340        }
 341
 342        ctx->sc = sc;
 343        ctx->retval = -1;
 344        ctx->pid = current->pid;
 345        ctx->tgid = user->tgid;
 346        ctx->cctx = cctx;
 347        init_completion(&ctx->work);
 348
 349        spin_lock(&user->lock);
 350        list_add_tail(&ctx->node, &user->pending);
 351        spin_unlock(&user->lock);
 352
 353        spin_lock(&cctx->lock);
 354        ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
 355                               FASTRPC_CTX_MAX, GFP_ATOMIC);
 356        if (ret < 0) {
 357                spin_unlock(&cctx->lock);
 358                goto err_idr;
 359        }
 360        ctx->ctxid = ret << 4;
 361        spin_unlock(&cctx->lock);
 362
 363        kref_init(&ctx->refcount);
 364
 365        return ctx;
 366err_idr:
 367        spin_lock(&user->lock);
 368        list_del(&ctx->node);
 369        spin_unlock(&user->lock);
 370        kfree(ctx->maps);
 371        kfree(ctx);
 372
 373        return ERR_PTR(ret);
 374}
 375
 376static struct sg_table *
 377fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
 378                    enum dma_data_direction dir)
 379{
 380        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 381        struct sg_table *table;
 382
 383        table = &a->sgt;
 384
 385        if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
 386                return ERR_PTR(-ENOMEM);
 387
 388        return table;
 389}
 390
 391static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
 392                                  struct sg_table *table,
 393                                  enum dma_data_direction dir)
 394{
 395        dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
 396}
 397
 398static void fastrpc_release(struct dma_buf *dmabuf)
 399{
 400        struct fastrpc_buf *buffer = dmabuf->priv;
 401
 402        fastrpc_buf_free(buffer);
 403}
 404
 405static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
 406                                  struct dma_buf_attachment *attachment)
 407{
 408        struct fastrpc_dma_buf_attachment *a;
 409        struct fastrpc_buf *buffer = dmabuf->priv;
 410        int ret;
 411
 412        a = kzalloc(sizeof(*a), GFP_KERNEL);
 413        if (!a)
 414                return -ENOMEM;
 415
 416        ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
 417                              FASTRPC_PHYS(buffer->phys), buffer->size);
 418        if (ret < 0) {
 419                dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
 420                return -EINVAL;
 421        }
 422
 423        a->dev = attachment->dev;
 424        INIT_LIST_HEAD(&a->node);
 425        attachment->priv = a;
 426
 427        mutex_lock(&buffer->lock);
 428        list_add(&a->node, &buffer->attachments);
 429        mutex_unlock(&buffer->lock);
 430
 431        return 0;
 432}
 433
 434static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
 435                                    struct dma_buf_attachment *attachment)
 436{
 437        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 438        struct fastrpc_buf *buffer = dmabuf->priv;
 439
 440        mutex_lock(&buffer->lock);
 441        list_del(&a->node);
 442        mutex_unlock(&buffer->lock);
 443        kfree(a);
 444}
 445
 446static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
 447{
 448        struct fastrpc_buf *buf = dmabuf->priv;
 449
 450        return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
 451}
 452
 453static void *fastrpc_vmap(struct dma_buf *dmabuf)
 454{
 455        struct fastrpc_buf *buf = dmabuf->priv;
 456
 457        return buf->virt;
 458}
 459
 460static int fastrpc_mmap(struct dma_buf *dmabuf,
 461                        struct vm_area_struct *vma)
 462{
 463        struct fastrpc_buf *buf = dmabuf->priv;
 464        size_t size = vma->vm_end - vma->vm_start;
 465
 466        return dma_mmap_coherent(buf->dev, vma, buf->virt,
 467                                 FASTRPC_PHYS(buf->phys), size);
 468}
 469
 470static const struct dma_buf_ops fastrpc_dma_buf_ops = {
 471        .attach = fastrpc_dma_buf_attach,
 472        .detach = fastrpc_dma_buf_detatch,
 473        .map_dma_buf = fastrpc_map_dma_buf,
 474        .unmap_dma_buf = fastrpc_unmap_dma_buf,
 475        .mmap = fastrpc_mmap,
 476        .map = fastrpc_kmap,
 477        .vmap = fastrpc_vmap,
 478        .release = fastrpc_release,
 479};
 480
 481static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
 482                              u64 len, struct fastrpc_map **ppmap)
 483{
 484        struct fastrpc_session_ctx *sess = fl->sctx;
 485        struct fastrpc_map *map = NULL;
 486        int err = 0;
 487
 488        if (!fastrpc_map_find(fl, fd, ppmap))
 489                return 0;
 490
 491        map = kzalloc(sizeof(*map), GFP_KERNEL);
 492        if (!map)
 493                return -ENOMEM;
 494
 495        INIT_LIST_HEAD(&map->node);
 496        map->fl = fl;
 497        map->fd = fd;
 498        map->buf = dma_buf_get(fd);
 499        if (IS_ERR(map->buf)) {
 500                err = PTR_ERR(map->buf);
 501                goto get_err;
 502        }
 503
 504        map->attach = dma_buf_attach(map->buf, sess->dev);
 505        if (IS_ERR(map->attach)) {
 506                dev_err(sess->dev, "Failed to attach dmabuf\n");
 507                err = PTR_ERR(map->attach);
 508                goto attach_err;
 509        }
 510
 511        map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
 512        if (IS_ERR(map->table)) {
 513                err = PTR_ERR(map->table);
 514                goto map_err;
 515        }
 516
 517        map->phys = sg_dma_address(map->table->sgl);
 518        map->phys += ((u64)fl->sctx->sid << 32);
 519        map->size = len;
 520        map->va = sg_virt(map->table->sgl);
 521        map->len = len;
 522        kref_init(&map->refcount);
 523
 524        spin_lock(&fl->lock);
 525        list_add_tail(&map->node, &fl->maps);
 526        spin_unlock(&fl->lock);
 527        *ppmap = map;
 528
 529        return 0;
 530
 531map_err:
 532        dma_buf_detach(map->buf, map->attach);
 533attach_err:
 534        dma_buf_put(map->buf);
 535get_err:
 536        kfree(map);
 537
 538        return err;
 539}
 540
 541/*
 542 * Fastrpc payload buffer with metadata looks like:
 543 *
 544 * >>>>>>  START of METADATA <<<<<<<<<
 545 * +---------------------------------+
 546 * |           Arguments             |
 547 * | type:(struct fastrpc_remote_arg)|
 548 * |             (0 - N)             |
 549 * +---------------------------------+
 550 * |         Invoke Buffer list      |
 551 * | type:(struct fastrpc_invoke_buf)|
 552 * |           (0 - N)               |
 553 * +---------------------------------+
 554 * |         Page info list          |
 555 * | type:(struct fastrpc_phy_page)  |
 556 * |             (0 - N)             |
 557 * +---------------------------------+
 558 * |         Optional info           |
 559 * |(can be specific to SoC/Firmware)|
 560 * +---------------------------------+
 561 * >>>>>>>>  END of METADATA <<<<<<<<<
 562 * +---------------------------------+
 563 * |         Inline ARGS             |
 564 * |            (0-N)                |
 565 * +---------------------------------+
 566 */
 567
 568static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 569{
 570        int size = 0;
 571
 572        size = (sizeof(struct fastrpc_remote_arg) +
 573                sizeof(struct fastrpc_invoke_buf) +
 574                sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
 575                sizeof(u64) * FASTRPC_MAX_FDLIST +
 576                sizeof(u32) * FASTRPC_MAX_CRCLIST;
 577
 578        return size;
 579}
 580
 581static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 582{
 583        u64 size = 0;
 584        int i;
 585
 586        size = ALIGN(metalen, FASTRPC_ALIGN);
 587        for (i = 0; i < ctx->nscalars; i++) {
 588                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 589                        size = ALIGN(size, FASTRPC_ALIGN);
 590                        size += ctx->args[i].length;
 591                }
 592        }
 593
 594        return size;
 595}
 596
 597static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
 598{
 599        struct device *dev = ctx->fl->sctx->dev;
 600        int i, err;
 601
 602        for (i = 0; i < ctx->nscalars; ++i) {
 603                /* Make sure reserved field is set to 0 */
 604                if (ctx->args[i].reserved)
 605                        return -EINVAL;
 606
 607                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
 608                    ctx->args[i].length == 0)
 609                        continue;
 610
 611                err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
 612                                         ctx->args[i].length, &ctx->maps[i]);
 613                if (err) {
 614                        dev_err(dev, "Error Creating map %d\n", err);
 615                        return -EINVAL;
 616                }
 617
 618        }
 619        return 0;
 620}
 621
 622static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
 623{
 624        struct device *dev = ctx->fl->sctx->dev;
 625        struct fastrpc_remote_arg *rpra;
 626        struct fastrpc_invoke_buf *list;
 627        struct fastrpc_phy_page *pages;
 628        int inbufs, i, err = 0;
 629        u64 rlen, pkt_size;
 630        uintptr_t args;
 631        int metalen;
 632
 633
 634        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
 635        metalen = fastrpc_get_meta_size(ctx);
 636        pkt_size = fastrpc_get_payload_size(ctx, metalen);
 637
 638        err = fastrpc_create_maps(ctx);
 639        if (err)
 640                return err;
 641
 642        ctx->msg_sz = pkt_size;
 643
 644        err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
 645        if (err)
 646                return err;
 647
 648        rpra = ctx->buf->virt;
 649        list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
 650        pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
 651                sizeof(*rpra));
 652        args = (uintptr_t)ctx->buf->virt + metalen;
 653        rlen = pkt_size - metalen;
 654        ctx->rpra = rpra;
 655
 656        for (i = 0; i < ctx->nbufs; ++i) {
 657                u64 len = ctx->args[i].length;
 658
 659                rpra[i].pv = 0;
 660                rpra[i].len = len;
 661                list[i].num = len ? 1 : 0;
 662                list[i].pgidx = i;
 663
 664                if (!len)
 665                        continue;
 666
 667                pages[i].size = roundup(len, PAGE_SIZE);
 668
 669                if (ctx->maps[i]) {
 670                        rpra[i].pv = (u64) ctx->args[i].ptr;
 671                        pages[i].addr = ctx->maps[i]->phys;
 672                } else {
 673                        rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
 674                        args = ALIGN(args, FASTRPC_ALIGN);
 675                        if (rlen < len)
 676                                goto bail;
 677
 678                        rpra[i].pv = args;
 679                        pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
 680                        pages[i].addr = pages[i].addr & PAGE_MASK;
 681                        args = args + len;
 682                        rlen -= len;
 683                }
 684
 685                if (i < inbufs && !ctx->maps[i]) {
 686                        void *dst = (void *)(uintptr_t)rpra[i].pv;
 687                        void *src = (void *)(uintptr_t)ctx->args[i].ptr;
 688
 689                        if (!kernel) {
 690                                if (copy_from_user(dst, (void __user *)src,
 691                                                   len)) {
 692                                        err = -EFAULT;
 693                                        goto bail;
 694                                }
 695                        } else {
 696                                memcpy(dst, src, len);
 697                        }
 698                }
 699        }
 700
 701        for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
 702                rpra[i].pv = (u64) ctx->args[i].ptr;
 703                rpra[i].len = ctx->args[i].length;
 704                list[i].num = ctx->args[i].length ? 1 : 0;
 705                list[i].pgidx = i;
 706                pages[i].addr = ctx->maps[i]->phys;
 707                pages[i].size = ctx->maps[i]->size;
 708        }
 709
 710bail:
 711        if (err)
 712                dev_err(dev, "Error: get invoke args failed:%d\n", err);
 713
 714        return err;
 715}
 716
 717static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
 718                            u32 kernel)
 719{
 720        struct fastrpc_remote_arg *rpra = ctx->rpra;
 721        int i, inbufs;
 722
 723        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
 724
 725        for (i = inbufs; i < ctx->nbufs; ++i) {
 726                void *src = (void *)(uintptr_t)rpra[i].pv;
 727                void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
 728                u64 len = rpra[i].len;
 729
 730                if (!kernel) {
 731                        if (copy_to_user((void __user *)dst, src, len))
 732                                return -EFAULT;
 733                } else {
 734                        memcpy(dst, src, len);
 735                }
 736        }
 737
 738        return 0;
 739}
 740
 741static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
 742                               struct fastrpc_invoke_ctx *ctx,
 743                               u32 kernel, uint32_t handle)
 744{
 745        struct fastrpc_channel_ctx *cctx;
 746        struct fastrpc_user *fl = ctx->fl;
 747        struct fastrpc_msg *msg = &ctx->msg;
 748
 749        cctx = fl->cctx;
 750        msg->pid = fl->tgid;
 751        msg->tid = current->pid;
 752
 753        if (kernel)
 754                msg->pid = 0;
 755
 756        msg->ctx = ctx->ctxid | fl->pd;
 757        msg->handle = handle;
 758        msg->sc = ctx->sc;
 759        msg->addr = ctx->buf ? ctx->buf->phys : 0;
 760        msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
 761        fastrpc_context_get(ctx);
 762
 763        return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
 764}
 765
 766static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
 767                                   u32 handle, u32 sc,
 768                                   struct fastrpc_invoke_args *args)
 769{
 770        struct fastrpc_invoke_ctx *ctx = NULL;
 771        int err = 0;
 772
 773        if (!fl->sctx)
 774                return -EINVAL;
 775
 776        ctx = fastrpc_context_alloc(fl, kernel, sc, args);
 777        if (IS_ERR(ctx))
 778                return PTR_ERR(ctx);
 779
 780        if (ctx->nscalars) {
 781                err = fastrpc_get_args(kernel, ctx);
 782                if (err)
 783                        goto bail;
 784        }
 785        /* Send invoke buffer to remote dsp */
 786        err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
 787        if (err)
 788                goto bail;
 789
 790        /* Wait for remote dsp to respond or time out */
 791        err = wait_for_completion_interruptible(&ctx->work);
 792        if (err)
 793                goto bail;
 794
 795        /* Check the response from remote dsp */
 796        err = ctx->retval;
 797        if (err)
 798                goto bail;
 799
 800        if (ctx->nscalars) {
 801                /* populate all the output buffers with results */
 802                err = fastrpc_put_args(ctx, kernel);
 803                if (err)
 804                        goto bail;
 805        }
 806
 807bail:
 808        /* We are done with this compute context, remove it from pending list */
 809        spin_lock(&fl->lock);
 810        list_del(&ctx->node);
 811        spin_unlock(&fl->lock);
 812        fastrpc_context_put(ctx);
 813
 814        if (err)
 815                dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
 816
 817        return err;
 818}
 819
 820static int fastrpc_init_create_process(struct fastrpc_user *fl,
 821                                        char __user *argp)
 822{
 823        struct fastrpc_init_create init;
 824        struct fastrpc_invoke_args *args;
 825        struct fastrpc_phy_page pages[1];
 826        struct fastrpc_map *map = NULL;
 827        struct fastrpc_buf *imem = NULL;
 828        int memlen;
 829        int err;
 830        struct {
 831                int pgid;
 832                u32 namelen;
 833                u32 filelen;
 834                u32 pageslen;
 835                u32 attrs;
 836                u32 siglen;
 837        } inbuf;
 838        u32 sc;
 839
 840        args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
 841        if (!args)
 842                return -ENOMEM;
 843
 844        if (copy_from_user(&init, argp, sizeof(init))) {
 845                err = -EFAULT;
 846                goto bail;
 847        }
 848
 849        if (init.filelen > INIT_FILELEN_MAX) {
 850                err = -EINVAL;
 851                goto bail;
 852        }
 853
 854        inbuf.pgid = fl->tgid;
 855        inbuf.namelen = strlen(current->comm) + 1;
 856        inbuf.filelen = init.filelen;
 857        inbuf.pageslen = 1;
 858        inbuf.attrs = init.attrs;
 859        inbuf.siglen = init.siglen;
 860        fl->pd = 1;
 861
 862        if (init.filelen && init.filefd) {
 863                err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
 864                if (err)
 865                        goto bail;
 866        }
 867
 868        memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
 869                       1024 * 1024);
 870        err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
 871                                &imem);
 872        if (err) {
 873                fastrpc_map_put(map);
 874                goto bail;
 875        }
 876
 877        fl->init_mem = imem;
 878        args[0].ptr = (u64)(uintptr_t)&inbuf;
 879        args[0].length = sizeof(inbuf);
 880        args[0].fd = -1;
 881
 882        args[1].ptr = (u64)(uintptr_t)current->comm;
 883        args[1].length = inbuf.namelen;
 884        args[1].fd = -1;
 885
 886        args[2].ptr = (u64) init.file;
 887        args[2].length = inbuf.filelen;
 888        args[2].fd = init.filefd;
 889
 890        pages[0].addr = imem->phys;
 891        pages[0].size = imem->size;
 892
 893        args[3].ptr = (u64)(uintptr_t) pages;
 894        args[3].length = 1 * sizeof(*pages);
 895        args[3].fd = -1;
 896
 897        args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
 898        args[4].length = sizeof(inbuf.attrs);
 899        args[4].fd = -1;
 900
 901        args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
 902        args[5].length = sizeof(inbuf.siglen);
 903        args[5].fd = -1;
 904
 905        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
 906        if (init.attrs)
 907                sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
 908
 909        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
 910                                      sc, args);
 911
 912        if (err) {
 913                fastrpc_map_put(map);
 914                fastrpc_buf_free(imem);
 915        }
 916
 917bail:
 918        kfree(args);
 919
 920        return err;
 921}
 922
 923static struct fastrpc_session_ctx *fastrpc_session_alloc(
 924                                        struct fastrpc_channel_ctx *cctx)
 925{
 926        struct fastrpc_session_ctx *session = NULL;
 927        int i;
 928
 929        spin_lock(&cctx->lock);
 930        for (i = 0; i < cctx->sesscount; i++) {
 931                if (!cctx->session[i].used && cctx->session[i].valid) {
 932                        cctx->session[i].used = true;
 933                        session = &cctx->session[i];
 934                        break;
 935                }
 936        }
 937        spin_unlock(&cctx->lock);
 938
 939        return session;
 940}
 941
 942static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
 943                                 struct fastrpc_session_ctx *session)
 944{
 945        spin_lock(&cctx->lock);
 946        session->used = false;
 947        spin_unlock(&cctx->lock);
 948}
 949
 950static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
 951{
 952        struct fastrpc_invoke_args args[1];
 953        int tgid = 0;
 954        u32 sc;
 955
 956        tgid = fl->tgid;
 957        args[0].ptr = (u64)(uintptr_t) &tgid;
 958        args[0].length = sizeof(tgid);
 959        args[0].fd = -1;
 960        args[0].reserved = 0;
 961        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
 962
 963        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
 964                                       sc, &args[0]);
 965}
 966
 967static int fastrpc_device_release(struct inode *inode, struct file *file)
 968{
 969        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
 970        struct fastrpc_channel_ctx *cctx = fl->cctx;
 971        struct fastrpc_invoke_ctx *ctx, *n;
 972        struct fastrpc_map *map, *m;
 973
 974        fastrpc_release_current_dsp_process(fl);
 975
 976        spin_lock(&cctx->lock);
 977        list_del(&fl->user);
 978        spin_unlock(&cctx->lock);
 979
 980        if (fl->init_mem)
 981                fastrpc_buf_free(fl->init_mem);
 982
 983        list_for_each_entry_safe(ctx, n, &fl->pending, node) {
 984                list_del(&ctx->node);
 985                fastrpc_context_put(ctx);
 986        }
 987
 988        list_for_each_entry_safe(map, m, &fl->maps, node) {
 989                list_del(&map->node);
 990                fastrpc_map_put(map);
 991        }
 992
 993        fastrpc_session_free(cctx, fl->sctx);
 994
 995        mutex_destroy(&fl->mutex);
 996        kfree(fl);
 997        file->private_data = NULL;
 998
 999        return 0;
1000}
1001
1002static int fastrpc_device_open(struct inode *inode, struct file *filp)
1003{
1004        struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1005        struct fastrpc_user *fl = NULL;
1006
1007        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1008        if (!fl)
1009                return -ENOMEM;
1010
1011        filp->private_data = fl;
1012        spin_lock_init(&fl->lock);
1013        mutex_init(&fl->mutex);
1014        INIT_LIST_HEAD(&fl->pending);
1015        INIT_LIST_HEAD(&fl->maps);
1016        INIT_LIST_HEAD(&fl->user);
1017        fl->tgid = current->tgid;
1018        fl->cctx = cctx;
1019
1020        fl->sctx = fastrpc_session_alloc(cctx);
1021        if (!fl->sctx) {
1022                dev_err(&cctx->rpdev->dev, "No session available\n");
1023                mutex_destroy(&fl->mutex);
1024                kfree(fl);
1025
1026                return -EBUSY;
1027        }
1028
1029        spin_lock(&cctx->lock);
1030        list_add_tail(&fl->user, &cctx->users);
1031        spin_unlock(&cctx->lock);
1032
1033        return 0;
1034}
1035
1036static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
1037{
1038        struct dma_buf *buf;
1039        int info;
1040
1041        if (copy_from_user(&info, argp, sizeof(info)))
1042                return -EFAULT;
1043
1044        buf = dma_buf_get(info);
1045        if (IS_ERR_OR_NULL(buf))
1046                return -EINVAL;
1047        /*
1048         * one for the last get and other for the ALLOC_DMA_BUFF ioctl
1049         */
1050        dma_buf_put(buf);
1051        dma_buf_put(buf);
1052
1053        return 0;
1054}
1055
1056static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1057{
1058        struct fastrpc_alloc_dma_buf bp;
1059        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1060        struct fastrpc_buf *buf = NULL;
1061        int err;
1062
1063        if (copy_from_user(&bp, argp, sizeof(bp)))
1064                return -EFAULT;
1065
1066        err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1067        if (err)
1068                return err;
1069        exp_info.ops = &fastrpc_dma_buf_ops;
1070        exp_info.size = bp.size;
1071        exp_info.flags = O_RDWR;
1072        exp_info.priv = buf;
1073        buf->dmabuf = dma_buf_export(&exp_info);
1074        if (IS_ERR(buf->dmabuf)) {
1075                err = PTR_ERR(buf->dmabuf);
1076                fastrpc_buf_free(buf);
1077                return err;
1078        }
1079
1080        bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1081        if (bp.fd < 0) {
1082                dma_buf_put(buf->dmabuf);
1083                return -EINVAL;
1084        }
1085
1086        if (copy_to_user(argp, &bp, sizeof(bp))) {
1087                dma_buf_put(buf->dmabuf);
1088                return -EFAULT;
1089        }
1090
1091        get_dma_buf(buf->dmabuf);
1092
1093        return 0;
1094}
1095
1096static int fastrpc_init_attach(struct fastrpc_user *fl)
1097{
1098        struct fastrpc_invoke_args args[1];
1099        int tgid = fl->tgid;
1100        u32 sc;
1101
1102        args[0].ptr = (u64)(uintptr_t) &tgid;
1103        args[0].length = sizeof(tgid);
1104        args[0].fd = -1;
1105        args[0].reserved = 0;
1106        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1107        fl->pd = 0;
1108
1109        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1110                                       sc, &args[0]);
1111}
1112
1113static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1114{
1115        struct fastrpc_invoke_args *args = NULL;
1116        struct fastrpc_invoke inv;
1117        u32 nscalars;
1118        int err;
1119
1120        if (copy_from_user(&inv, argp, sizeof(inv)))
1121                return -EFAULT;
1122
1123        /* nscalars is truncated here to max supported value */
1124        nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1125        if (nscalars) {
1126                args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1127                if (!args)
1128                        return -ENOMEM;
1129
1130                if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1131                                   nscalars * sizeof(*args))) {
1132                        kfree(args);
1133                        return -EFAULT;
1134                }
1135        }
1136
1137        err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1138        kfree(args);
1139
1140        return err;
1141}
1142
1143static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1144                                 unsigned long arg)
1145{
1146        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1147        char __user *argp = (char __user *)arg;
1148        int err;
1149
1150        switch (cmd) {
1151        case FASTRPC_IOCTL_INVOKE:
1152                err = fastrpc_invoke(fl, argp);
1153                break;
1154        case FASTRPC_IOCTL_INIT_ATTACH:
1155                err = fastrpc_init_attach(fl);
1156                break;
1157        case FASTRPC_IOCTL_INIT_CREATE:
1158                err = fastrpc_init_create_process(fl, argp);
1159                break;
1160        case FASTRPC_IOCTL_FREE_DMA_BUFF:
1161                err = fastrpc_dmabuf_free(fl, argp);
1162                break;
1163        case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1164                err = fastrpc_dmabuf_alloc(fl, argp);
1165                break;
1166        default:
1167                err = -ENOTTY;
1168                break;
1169        }
1170
1171        return err;
1172}
1173
1174static const struct file_operations fastrpc_fops = {
1175        .open = fastrpc_device_open,
1176        .release = fastrpc_device_release,
1177        .unlocked_ioctl = fastrpc_device_ioctl,
1178        .compat_ioctl = fastrpc_device_ioctl,
1179};
1180
1181static int fastrpc_cb_probe(struct platform_device *pdev)
1182{
1183        struct fastrpc_channel_ctx *cctx;
1184        struct fastrpc_session_ctx *sess;
1185        struct device *dev = &pdev->dev;
1186        int i, sessions = 0;
1187        int rc;
1188
1189        cctx = dev_get_drvdata(dev->parent);
1190        if (!cctx)
1191                return -EINVAL;
1192
1193        of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1194
1195        spin_lock(&cctx->lock);
1196        sess = &cctx->session[cctx->sesscount];
1197        sess->used = false;
1198        sess->valid = true;
1199        sess->dev = dev;
1200        dev_set_drvdata(dev, sess);
1201
1202        if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1203                dev_info(dev, "FastRPC Session ID not specified in DT\n");
1204
1205        if (sessions > 0) {
1206                struct fastrpc_session_ctx *dup_sess;
1207
1208                for (i = 1; i < sessions; i++) {
1209                        if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1210                                break;
1211                        dup_sess = &cctx->session[cctx->sesscount];
1212                        memcpy(dup_sess, sess, sizeof(*dup_sess));
1213                }
1214        }
1215        cctx->sesscount++;
1216        spin_unlock(&cctx->lock);
1217        rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1218        if (rc) {
1219                dev_err(dev, "32-bit DMA enable failed\n");
1220                return rc;
1221        }
1222
1223        return 0;
1224}
1225
1226static int fastrpc_cb_remove(struct platform_device *pdev)
1227{
1228        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1229        struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1230        int i;
1231
1232        spin_lock(&cctx->lock);
1233        for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1234                if (cctx->session[i].sid == sess->sid) {
1235                        cctx->session[i].valid = false;
1236                        cctx->sesscount--;
1237                }
1238        }
1239        spin_unlock(&cctx->lock);
1240
1241        return 0;
1242}
1243
1244static const struct of_device_id fastrpc_match_table[] = {
1245        { .compatible = "qcom,fastrpc-compute-cb", },
1246        {}
1247};
1248
1249static struct platform_driver fastrpc_cb_driver = {
1250        .probe = fastrpc_cb_probe,
1251        .remove = fastrpc_cb_remove,
1252        .driver = {
1253                .name = "qcom,fastrpc-cb",
1254                .of_match_table = fastrpc_match_table,
1255                .suppress_bind_attrs = true,
1256        },
1257};
1258
1259static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1260{
1261        struct device *rdev = &rpdev->dev;
1262        struct fastrpc_channel_ctx *data;
1263        int i, err, domain_id = -1;
1264        const char *domain;
1265
1266        data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
1267        if (!data)
1268                return -ENOMEM;
1269
1270        err = of_property_read_string(rdev->of_node, "label", &domain);
1271        if (err) {
1272                dev_info(rdev, "FastRPC Domain not specified in DT\n");
1273                return err;
1274        }
1275
1276        for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1277                if (!strcmp(domains[i], domain)) {
1278                        domain_id = i;
1279                        break;
1280                }
1281        }
1282
1283        if (domain_id < 0) {
1284                dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1285                return -EINVAL;
1286        }
1287
1288        data->miscdev.minor = MISC_DYNAMIC_MINOR;
1289        data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1290                                domains[domain_id]);
1291        data->miscdev.fops = &fastrpc_fops;
1292        err = misc_register(&data->miscdev);
1293        if (err)
1294                return err;
1295
1296        dev_set_drvdata(&rpdev->dev, data);
1297        dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1298        INIT_LIST_HEAD(&data->users);
1299        spin_lock_init(&data->lock);
1300        idr_init(&data->ctx_idr);
1301        data->domain_id = domain_id;
1302        data->rpdev = rpdev;
1303
1304        return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1305}
1306
1307static void fastrpc_notify_users(struct fastrpc_user *user)
1308{
1309        struct fastrpc_invoke_ctx *ctx;
1310
1311        spin_lock(&user->lock);
1312        list_for_each_entry(ctx, &user->pending, node)
1313                complete(&ctx->work);
1314        spin_unlock(&user->lock);
1315}
1316
1317static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1318{
1319        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1320        struct fastrpc_user *user;
1321
1322        spin_lock(&cctx->lock);
1323        list_for_each_entry(user, &cctx->users, user)
1324                fastrpc_notify_users(user);
1325        spin_unlock(&cctx->lock);
1326
1327        misc_deregister(&cctx->miscdev);
1328        of_platform_depopulate(&rpdev->dev);
1329        kfree(cctx);
1330}
1331
1332static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1333                                  int len, void *priv, u32 addr)
1334{
1335        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1336        struct fastrpc_invoke_rsp *rsp = data;
1337        struct fastrpc_invoke_ctx *ctx;
1338        unsigned long flags;
1339        unsigned long ctxid;
1340
1341        if (len < sizeof(*rsp))
1342                return -EINVAL;
1343
1344        ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1345
1346        spin_lock_irqsave(&cctx->lock, flags);
1347        ctx = idr_find(&cctx->ctx_idr, ctxid);
1348        spin_unlock_irqrestore(&cctx->lock, flags);
1349
1350        if (!ctx) {
1351                dev_err(&rpdev->dev, "No context ID matches response\n");
1352                return -ENOENT;
1353        }
1354
1355        ctx->retval = rsp->retval;
1356        complete(&ctx->work);
1357        fastrpc_context_put(ctx);
1358
1359        return 0;
1360}
1361
1362static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1363        { .compatible = "qcom,fastrpc" },
1364        { },
1365};
1366MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1367
1368static struct rpmsg_driver fastrpc_driver = {
1369        .probe = fastrpc_rpmsg_probe,
1370        .remove = fastrpc_rpmsg_remove,
1371        .callback = fastrpc_rpmsg_callback,
1372        .drv = {
1373                .name = "qcom,fastrpc",
1374                .of_match_table = fastrpc_rpmsg_of_match,
1375        },
1376};
1377
1378static int fastrpc_init(void)
1379{
1380        int ret;
1381
1382        ret = platform_driver_register(&fastrpc_cb_driver);
1383        if (ret < 0) {
1384                pr_err("fastrpc: failed to register cb driver\n");
1385                return ret;
1386        }
1387
1388        ret = register_rpmsg_driver(&fastrpc_driver);
1389        if (ret < 0) {
1390                pr_err("fastrpc: failed to register rpmsg driver\n");
1391                platform_driver_unregister(&fastrpc_cb_driver);
1392                return ret;
1393        }
1394
1395        return 0;
1396}
1397module_init(fastrpc_init);
1398
1399static void fastrpc_exit(void)
1400{
1401        platform_driver_unregister(&fastrpc_cb_driver);
1402        unregister_rpmsg_driver(&fastrpc_driver);
1403}
1404module_exit(fastrpc_exit);
1405
1406MODULE_LICENSE("GPL v2");
1407