linux/drivers/misc/fastrpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
   3// Copyright (c) 2018, Linaro Limited
   4
   5#include <linux/completion.h>
   6#include <linux/device.h>
   7#include <linux/dma-buf.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/idr.h>
  10#include <linux/list.h>
  11#include <linux/miscdevice.h>
  12#include <linux/module.h>
  13#include <linux/of_address.h>
  14#include <linux/of.h>
  15#include <linux/sort.h>
  16#include <linux/of_platform.h>
  17#include <linux/rpmsg.h>
  18#include <linux/scatterlist.h>
  19#include <linux/slab.h>
  20#include <uapi/misc/fastrpc.h>
  21
  22#define ADSP_DOMAIN_ID (0)
  23#define MDSP_DOMAIN_ID (1)
  24#define SDSP_DOMAIN_ID (2)
  25#define CDSP_DOMAIN_ID (3)
  26#define FASTRPC_DEV_MAX         4 /* adsp, mdsp, slpi, cdsp*/
  27#define FASTRPC_MAX_SESSIONS    9 /*8 compute, 1 cpz*/
  28#define FASTRPC_ALIGN           128
  29#define FASTRPC_MAX_FDLIST      16
  30#define FASTRPC_MAX_CRCLIST     64
  31#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
  32#define FASTRPC_CTX_MAX (256)
  33#define FASTRPC_INIT_HANDLE     1
  34#define FASTRPC_CTXID_MASK (0xFF0)
  35#define INIT_FILELEN_MAX (64 * 1024 * 1024)
  36#define INIT_MEMLEN_MAX  (8 * 1024 * 1024)
  37#define FASTRPC_DEVICE_NAME     "fastrpc"
  38
  39/* Retrives number of input buffers from the scalars parameter */
  40#define REMOTE_SCALARS_INBUFS(sc)       (((sc) >> 16) & 0x0ff)
  41
  42/* Retrives number of output buffers from the scalars parameter */
  43#define REMOTE_SCALARS_OUTBUFS(sc)      (((sc) >> 8) & 0x0ff)
  44
  45/* Retrives number of input handles from the scalars parameter */
  46#define REMOTE_SCALARS_INHANDLES(sc)    (((sc) >> 4) & 0x0f)
  47
  48/* Retrives number of output handles from the scalars parameter */
  49#define REMOTE_SCALARS_OUTHANDLES(sc)   ((sc) & 0x0f)
  50
  51#define REMOTE_SCALARS_LENGTH(sc)       (REMOTE_SCALARS_INBUFS(sc) +   \
  52                                         REMOTE_SCALARS_OUTBUFS(sc) +  \
  53                                         REMOTE_SCALARS_INHANDLES(sc)+ \
  54                                         REMOTE_SCALARS_OUTHANDLES(sc))
  55#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout)  \
  56                                (((attr & 0x07) << 29) |                \
  57                                ((method & 0x1f) << 24) |       \
  58                                ((in & 0xff) << 16) |           \
  59                                ((out & 0xff) <<  8) |          \
  60                                ((oin & 0x0f) <<  4) |          \
  61                                (oout & 0x0f))
  62
  63#define FASTRPC_SCALARS(method, in, out) \
  64                FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
  65
  66#define FASTRPC_CREATE_PROCESS_NARGS    6
  67/* Remote Method id table */
  68#define FASTRPC_RMID_INIT_ATTACH        0
  69#define FASTRPC_RMID_INIT_RELEASE       1
  70#define FASTRPC_RMID_INIT_CREATE        6
  71#define FASTRPC_RMID_INIT_CREATE_ATTR   7
  72#define FASTRPC_RMID_INIT_CREATE_STATIC 8
  73
  74#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
  75
  76static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
  77                                                "sdsp", "cdsp"};
  78struct fastrpc_phy_page {
  79        u64 addr;               /* physical address */
  80        u64 size;               /* size of contiguous region */
  81};
  82
  83struct fastrpc_invoke_buf {
  84        u32 num;                /* number of contiguous regions */
  85        u32 pgidx;              /* index to start of contiguous region */
  86};
  87
  88struct fastrpc_remote_arg {
  89        u64 pv;
  90        u64 len;
  91};
  92
  93struct fastrpc_msg {
  94        int pid;                /* process group id */
  95        int tid;                /* thread id */
  96        u64 ctx;                /* invoke caller context */
  97        u32 handle;     /* handle to invoke */
  98        u32 sc;         /* scalars structure describing the data */
  99        u64 addr;               /* physical address */
 100        u64 size;               /* size of contiguous region */
 101};
 102
 103struct fastrpc_invoke_rsp {
 104        u64 ctx;                /* invoke caller context */
 105        int retval;             /* invoke return value */
 106};
 107
 108struct fastrpc_buf_overlap {
 109        u64 start;
 110        u64 end;
 111        int raix;
 112        u64 mstart;
 113        u64 mend;
 114        u64 offset;
 115};
 116
 117struct fastrpc_buf {
 118        struct fastrpc_user *fl;
 119        struct dma_buf *dmabuf;
 120        struct device *dev;
 121        void *virt;
 122        u64 phys;
 123        u64 size;
 124        /* Lock for dma buf attachments */
 125        struct mutex lock;
 126        struct list_head attachments;
 127};
 128
 129struct fastrpc_dma_buf_attachment {
 130        struct device *dev;
 131        struct sg_table sgt;
 132        struct list_head node;
 133};
 134
 135struct fastrpc_map {
 136        struct list_head node;
 137        struct fastrpc_user *fl;
 138        int fd;
 139        struct dma_buf *buf;
 140        struct sg_table *table;
 141        struct dma_buf_attachment *attach;
 142        u64 phys;
 143        u64 size;
 144        void *va;
 145        u64 len;
 146        struct kref refcount;
 147};
 148
 149struct fastrpc_invoke_ctx {
 150        int nscalars;
 151        int nbufs;
 152        int retval;
 153        int pid;
 154        int tgid;
 155        u32 sc;
 156        u32 *crc;
 157        u64 ctxid;
 158        u64 msg_sz;
 159        struct kref refcount;
 160        struct list_head node; /* list of ctxs */
 161        struct completion work;
 162        struct work_struct put_work;
 163        struct fastrpc_msg msg;
 164        struct fastrpc_user *fl;
 165        struct fastrpc_remote_arg *rpra;
 166        struct fastrpc_map **maps;
 167        struct fastrpc_buf *buf;
 168        struct fastrpc_invoke_args *args;
 169        struct fastrpc_buf_overlap *olaps;
 170        struct fastrpc_channel_ctx *cctx;
 171};
 172
 173struct fastrpc_session_ctx {
 174        struct device *dev;
 175        int sid;
 176        bool used;
 177        bool valid;
 178};
 179
 180struct fastrpc_channel_ctx {
 181        int domain_id;
 182        int sesscount;
 183        struct rpmsg_device *rpdev;
 184        struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
 185        spinlock_t lock;
 186        struct idr ctx_idr;
 187        struct list_head users;
 188        struct miscdevice miscdev;
 189};
 190
 191struct fastrpc_user {
 192        struct list_head user;
 193        struct list_head maps;
 194        struct list_head pending;
 195
 196        struct fastrpc_channel_ctx *cctx;
 197        struct fastrpc_session_ctx *sctx;
 198        struct fastrpc_buf *init_mem;
 199
 200        int tgid;
 201        int pd;
 202        /* Lock for lists */
 203        spinlock_t lock;
 204        /* lock for allocations */
 205        struct mutex mutex;
 206};
 207
 208static void fastrpc_free_map(struct kref *ref)
 209{
 210        struct fastrpc_map *map;
 211
 212        map = container_of(ref, struct fastrpc_map, refcount);
 213
 214        if (map->table) {
 215                dma_buf_unmap_attachment(map->attach, map->table,
 216                                         DMA_BIDIRECTIONAL);
 217                dma_buf_detach(map->buf, map->attach);
 218                dma_buf_put(map->buf);
 219        }
 220
 221        kfree(map);
 222}
 223
 224static void fastrpc_map_put(struct fastrpc_map *map)
 225{
 226        if (map)
 227                kref_put(&map->refcount, fastrpc_free_map);
 228}
 229
 230static void fastrpc_map_get(struct fastrpc_map *map)
 231{
 232        if (map)
 233                kref_get(&map->refcount);
 234}
 235
 236static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
 237                            struct fastrpc_map **ppmap)
 238{
 239        struct fastrpc_map *map = NULL;
 240
 241        mutex_lock(&fl->mutex);
 242        list_for_each_entry(map, &fl->maps, node) {
 243                if (map->fd == fd) {
 244                        fastrpc_map_get(map);
 245                        *ppmap = map;
 246                        mutex_unlock(&fl->mutex);
 247                        return 0;
 248                }
 249        }
 250        mutex_unlock(&fl->mutex);
 251
 252        return -ENOENT;
 253}
 254
 255static void fastrpc_buf_free(struct fastrpc_buf *buf)
 256{
 257        dma_free_coherent(buf->dev, buf->size, buf->virt,
 258                          FASTRPC_PHYS(buf->phys));
 259        kfree(buf);
 260}
 261
 262static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
 263                             u64 size, struct fastrpc_buf **obuf)
 264{
 265        struct fastrpc_buf *buf;
 266
 267        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 268        if (!buf)
 269                return -ENOMEM;
 270
 271        INIT_LIST_HEAD(&buf->attachments);
 272        mutex_init(&buf->lock);
 273
 274        buf->fl = fl;
 275        buf->virt = NULL;
 276        buf->phys = 0;
 277        buf->size = size;
 278        buf->dev = dev;
 279
 280        buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
 281                                       GFP_KERNEL);
 282        if (!buf->virt)
 283                return -ENOMEM;
 284
 285        if (fl->sctx && fl->sctx->sid)
 286                buf->phys += ((u64)fl->sctx->sid << 32);
 287
 288        *obuf = buf;
 289
 290        return 0;
 291}
 292
 293static void fastrpc_context_free(struct kref *ref)
 294{
 295        struct fastrpc_invoke_ctx *ctx;
 296        struct fastrpc_channel_ctx *cctx;
 297        unsigned long flags;
 298        int i;
 299
 300        ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
 301        cctx = ctx->cctx;
 302
 303        for (i = 0; i < ctx->nscalars; i++)
 304                fastrpc_map_put(ctx->maps[i]);
 305
 306        if (ctx->buf)
 307                fastrpc_buf_free(ctx->buf);
 308
 309        spin_lock_irqsave(&cctx->lock, flags);
 310        idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
 311        spin_unlock_irqrestore(&cctx->lock, flags);
 312
 313        kfree(ctx->maps);
 314        kfree(ctx->olaps);
 315        kfree(ctx);
 316}
 317
 318static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
 319{
 320        kref_get(&ctx->refcount);
 321}
 322
 323static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
 324{
 325        kref_put(&ctx->refcount, fastrpc_context_free);
 326}
 327
 328static void fastrpc_context_put_wq(struct work_struct *work)
 329{
 330        struct fastrpc_invoke_ctx *ctx =
 331                        container_of(work, struct fastrpc_invoke_ctx, put_work);
 332
 333        fastrpc_context_put(ctx);
 334}
 335
 336#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
 337static int olaps_cmp(const void *a, const void *b)
 338{
 339        struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
 340        struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
 341        /* sort with lowest starting buffer first */
 342        int st = CMP(pa->start, pb->start);
 343        /* sort with highest ending buffer first */
 344        int ed = CMP(pb->end, pa->end);
 345
 346        return st == 0 ? ed : st;
 347}
 348
 349static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
 350{
 351        u64 max_end = 0;
 352        int i;
 353
 354        for (i = 0; i < ctx->nbufs; ++i) {
 355                ctx->olaps[i].start = ctx->args[i].ptr;
 356                ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
 357                ctx->olaps[i].raix = i;
 358        }
 359
 360        sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
 361
 362        for (i = 0; i < ctx->nbufs; ++i) {
 363                /* Falling inside previous range */
 364                if (ctx->olaps[i].start < max_end) {
 365                        ctx->olaps[i].mstart = max_end;
 366                        ctx->olaps[i].mend = ctx->olaps[i].end;
 367                        ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
 368
 369                        if (ctx->olaps[i].end > max_end) {
 370                                max_end = ctx->olaps[i].end;
 371                        } else {
 372                                ctx->olaps[i].mend = 0;
 373                                ctx->olaps[i].mstart = 0;
 374                        }
 375
 376                } else  {
 377                        ctx->olaps[i].mend = ctx->olaps[i].end;
 378                        ctx->olaps[i].mstart = ctx->olaps[i].start;
 379                        ctx->olaps[i].offset = 0;
 380                        max_end = ctx->olaps[i].end;
 381                }
 382        }
 383}
 384
 385static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
 386                        struct fastrpc_user *user, u32 kernel, u32 sc,
 387                        struct fastrpc_invoke_args *args)
 388{
 389        struct fastrpc_channel_ctx *cctx = user->cctx;
 390        struct fastrpc_invoke_ctx *ctx = NULL;
 391        unsigned long flags;
 392        int ret;
 393
 394        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 395        if (!ctx)
 396                return ERR_PTR(-ENOMEM);
 397
 398        INIT_LIST_HEAD(&ctx->node);
 399        ctx->fl = user;
 400        ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
 401        ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
 402                     REMOTE_SCALARS_OUTBUFS(sc);
 403
 404        if (ctx->nscalars) {
 405                ctx->maps = kcalloc(ctx->nscalars,
 406                                    sizeof(*ctx->maps), GFP_KERNEL);
 407                if (!ctx->maps) {
 408                        kfree(ctx);
 409                        return ERR_PTR(-ENOMEM);
 410                }
 411                ctx->olaps = kcalloc(ctx->nscalars,
 412                                    sizeof(*ctx->olaps), GFP_KERNEL);
 413                if (!ctx->olaps) {
 414                        kfree(ctx->maps);
 415                        kfree(ctx);
 416                        return ERR_PTR(-ENOMEM);
 417                }
 418                ctx->args = args;
 419                fastrpc_get_buff_overlaps(ctx);
 420        }
 421
 422        ctx->sc = sc;
 423        ctx->retval = -1;
 424        ctx->pid = current->pid;
 425        ctx->tgid = user->tgid;
 426        ctx->cctx = cctx;
 427        init_completion(&ctx->work);
 428        INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
 429
 430        spin_lock(&user->lock);
 431        list_add_tail(&ctx->node, &user->pending);
 432        spin_unlock(&user->lock);
 433
 434        spin_lock_irqsave(&cctx->lock, flags);
 435        ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
 436                               FASTRPC_CTX_MAX, GFP_ATOMIC);
 437        if (ret < 0) {
 438                spin_unlock_irqrestore(&cctx->lock, flags);
 439                goto err_idr;
 440        }
 441        ctx->ctxid = ret << 4;
 442        spin_unlock_irqrestore(&cctx->lock, flags);
 443
 444        kref_init(&ctx->refcount);
 445
 446        return ctx;
 447err_idr:
 448        spin_lock(&user->lock);
 449        list_del(&ctx->node);
 450        spin_unlock(&user->lock);
 451        kfree(ctx->maps);
 452        kfree(ctx->olaps);
 453        kfree(ctx);
 454
 455        return ERR_PTR(ret);
 456}
 457
 458static struct sg_table *
 459fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
 460                    enum dma_data_direction dir)
 461{
 462        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 463        struct sg_table *table;
 464
 465        table = &a->sgt;
 466
 467        if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
 468                return ERR_PTR(-ENOMEM);
 469
 470        return table;
 471}
 472
 473static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
 474                                  struct sg_table *table,
 475                                  enum dma_data_direction dir)
 476{
 477        dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
 478}
 479
 480static void fastrpc_release(struct dma_buf *dmabuf)
 481{
 482        struct fastrpc_buf *buffer = dmabuf->priv;
 483
 484        fastrpc_buf_free(buffer);
 485}
 486
 487static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
 488                                  struct dma_buf_attachment *attachment)
 489{
 490        struct fastrpc_dma_buf_attachment *a;
 491        struct fastrpc_buf *buffer = dmabuf->priv;
 492        int ret;
 493
 494        a = kzalloc(sizeof(*a), GFP_KERNEL);
 495        if (!a)
 496                return -ENOMEM;
 497
 498        ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
 499                              FASTRPC_PHYS(buffer->phys), buffer->size);
 500        if (ret < 0) {
 501                dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
 502                return -EINVAL;
 503        }
 504
 505        a->dev = attachment->dev;
 506        INIT_LIST_HEAD(&a->node);
 507        attachment->priv = a;
 508
 509        mutex_lock(&buffer->lock);
 510        list_add(&a->node, &buffer->attachments);
 511        mutex_unlock(&buffer->lock);
 512
 513        return 0;
 514}
 515
 516static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
 517                                    struct dma_buf_attachment *attachment)
 518{
 519        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 520        struct fastrpc_buf *buffer = dmabuf->priv;
 521
 522        mutex_lock(&buffer->lock);
 523        list_del(&a->node);
 524        mutex_unlock(&buffer->lock);
 525        kfree(a);
 526}
 527
 528static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
 529{
 530        struct fastrpc_buf *buf = dmabuf->priv;
 531
 532        return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
 533}
 534
 535static void *fastrpc_vmap(struct dma_buf *dmabuf)
 536{
 537        struct fastrpc_buf *buf = dmabuf->priv;
 538
 539        return buf->virt;
 540}
 541
 542static int fastrpc_mmap(struct dma_buf *dmabuf,
 543                        struct vm_area_struct *vma)
 544{
 545        struct fastrpc_buf *buf = dmabuf->priv;
 546        size_t size = vma->vm_end - vma->vm_start;
 547
 548        return dma_mmap_coherent(buf->dev, vma, buf->virt,
 549                                 FASTRPC_PHYS(buf->phys), size);
 550}
 551
 552static const struct dma_buf_ops fastrpc_dma_buf_ops = {
 553        .attach = fastrpc_dma_buf_attach,
 554        .detach = fastrpc_dma_buf_detatch,
 555        .map_dma_buf = fastrpc_map_dma_buf,
 556        .unmap_dma_buf = fastrpc_unmap_dma_buf,
 557        .mmap = fastrpc_mmap,
 558        .map = fastrpc_kmap,
 559        .vmap = fastrpc_vmap,
 560        .release = fastrpc_release,
 561};
 562
 563static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
 564                              u64 len, struct fastrpc_map **ppmap)
 565{
 566        struct fastrpc_session_ctx *sess = fl->sctx;
 567        struct fastrpc_map *map = NULL;
 568        int err = 0;
 569
 570        if (!fastrpc_map_find(fl, fd, ppmap))
 571                return 0;
 572
 573        map = kzalloc(sizeof(*map), GFP_KERNEL);
 574        if (!map)
 575                return -ENOMEM;
 576
 577        INIT_LIST_HEAD(&map->node);
 578        map->fl = fl;
 579        map->fd = fd;
 580        map->buf = dma_buf_get(fd);
 581        if (IS_ERR(map->buf)) {
 582                err = PTR_ERR(map->buf);
 583                goto get_err;
 584        }
 585
 586        map->attach = dma_buf_attach(map->buf, sess->dev);
 587        if (IS_ERR(map->attach)) {
 588                dev_err(sess->dev, "Failed to attach dmabuf\n");
 589                err = PTR_ERR(map->attach);
 590                goto attach_err;
 591        }
 592
 593        map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
 594        if (IS_ERR(map->table)) {
 595                err = PTR_ERR(map->table);
 596                goto map_err;
 597        }
 598
 599        map->phys = sg_dma_address(map->table->sgl);
 600        map->phys += ((u64)fl->sctx->sid << 32);
 601        map->size = len;
 602        map->va = sg_virt(map->table->sgl);
 603        map->len = len;
 604        kref_init(&map->refcount);
 605
 606        spin_lock(&fl->lock);
 607        list_add_tail(&map->node, &fl->maps);
 608        spin_unlock(&fl->lock);
 609        *ppmap = map;
 610
 611        return 0;
 612
 613map_err:
 614        dma_buf_detach(map->buf, map->attach);
 615attach_err:
 616        dma_buf_put(map->buf);
 617get_err:
 618        kfree(map);
 619
 620        return err;
 621}
 622
 623/*
 624 * Fastrpc payload buffer with metadata looks like:
 625 *
 626 * >>>>>>  START of METADATA <<<<<<<<<
 627 * +---------------------------------+
 628 * |           Arguments             |
 629 * | type:(struct fastrpc_remote_arg)|
 630 * |             (0 - N)             |
 631 * +---------------------------------+
 632 * |         Invoke Buffer list      |
 633 * | type:(struct fastrpc_invoke_buf)|
 634 * |           (0 - N)               |
 635 * +---------------------------------+
 636 * |         Page info list          |
 637 * | type:(struct fastrpc_phy_page)  |
 638 * |             (0 - N)             |
 639 * +---------------------------------+
 640 * |         Optional info           |
 641 * |(can be specific to SoC/Firmware)|
 642 * +---------------------------------+
 643 * >>>>>>>>  END of METADATA <<<<<<<<<
 644 * +---------------------------------+
 645 * |         Inline ARGS             |
 646 * |            (0-N)                |
 647 * +---------------------------------+
 648 */
 649
 650static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 651{
 652        int size = 0;
 653
 654        size = (sizeof(struct fastrpc_remote_arg) +
 655                sizeof(struct fastrpc_invoke_buf) +
 656                sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
 657                sizeof(u64) * FASTRPC_MAX_FDLIST +
 658                sizeof(u32) * FASTRPC_MAX_CRCLIST;
 659
 660        return size;
 661}
 662
 663static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 664{
 665        u64 size = 0;
 666        int i;
 667
 668        size = ALIGN(metalen, FASTRPC_ALIGN);
 669        for (i = 0; i < ctx->nscalars; i++) {
 670                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 671
 672                        if (ctx->olaps[i].offset == 0)
 673                                size = ALIGN(size, FASTRPC_ALIGN);
 674
 675                        size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
 676                }
 677        }
 678
 679        return size;
 680}
 681
 682static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
 683{
 684        struct device *dev = ctx->fl->sctx->dev;
 685        int i, err;
 686
 687        for (i = 0; i < ctx->nscalars; ++i) {
 688                /* Make sure reserved field is set to 0 */
 689                if (ctx->args[i].reserved)
 690                        return -EINVAL;
 691
 692                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
 693                    ctx->args[i].length == 0)
 694                        continue;
 695
 696                err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
 697                                         ctx->args[i].length, &ctx->maps[i]);
 698                if (err) {
 699                        dev_err(dev, "Error Creating map %d\n", err);
 700                        return -EINVAL;
 701                }
 702
 703        }
 704        return 0;
 705}
 706
 707static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
 708{
 709        struct device *dev = ctx->fl->sctx->dev;
 710        struct fastrpc_remote_arg *rpra;
 711        struct fastrpc_invoke_buf *list;
 712        struct fastrpc_phy_page *pages;
 713        int inbufs, i, oix, err = 0;
 714        u64 len, rlen, pkt_size;
 715        u64 pg_start, pg_end;
 716        uintptr_t args;
 717        int metalen;
 718
 719        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
 720        metalen = fastrpc_get_meta_size(ctx);
 721        pkt_size = fastrpc_get_payload_size(ctx, metalen);
 722
 723        err = fastrpc_create_maps(ctx);
 724        if (err)
 725                return err;
 726
 727        ctx->msg_sz = pkt_size;
 728
 729        err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
 730        if (err)
 731                return err;
 732
 733        rpra = ctx->buf->virt;
 734        list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
 735        pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
 736                sizeof(*rpra));
 737        args = (uintptr_t)ctx->buf->virt + metalen;
 738        rlen = pkt_size - metalen;
 739        ctx->rpra = rpra;
 740
 741        for (oix = 0; oix < ctx->nbufs; ++oix) {
 742                int mlen;
 743
 744                i = ctx->olaps[oix].raix;
 745                len = ctx->args[i].length;
 746
 747                rpra[i].pv = 0;
 748                rpra[i].len = len;
 749                list[i].num = len ? 1 : 0;
 750                list[i].pgidx = i;
 751
 752                if (!len)
 753                        continue;
 754
 755                if (ctx->maps[i]) {
 756                        struct vm_area_struct *vma = NULL;
 757
 758                        rpra[i].pv = (u64) ctx->args[i].ptr;
 759                        pages[i].addr = ctx->maps[i]->phys;
 760
 761                        vma = find_vma(current->mm, ctx->args[i].ptr);
 762                        if (vma)
 763                                pages[i].addr += ctx->args[i].ptr -
 764                                                 vma->vm_start;
 765
 766                        pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
 767                        pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
 768                                  PAGE_SHIFT;
 769                        pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
 770
 771                } else {
 772
 773                        if (ctx->olaps[oix].offset == 0) {
 774                                rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
 775                                args = ALIGN(args, FASTRPC_ALIGN);
 776                        }
 777
 778                        mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
 779
 780                        if (rlen < mlen)
 781                                goto bail;
 782
 783                        rpra[i].pv = args - ctx->olaps[oix].offset;
 784                        pages[i].addr = ctx->buf->phys -
 785                                        ctx->olaps[oix].offset +
 786                                        (pkt_size - rlen);
 787                        pages[i].addr = pages[i].addr & PAGE_MASK;
 788
 789                        pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
 790                        pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
 791                        pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
 792                        args = args + mlen;
 793                        rlen -= mlen;
 794                }
 795
 796                if (i < inbufs && !ctx->maps[i]) {
 797                        void *dst = (void *)(uintptr_t)rpra[i].pv;
 798                        void *src = (void *)(uintptr_t)ctx->args[i].ptr;
 799
 800                        if (!kernel) {
 801                                if (copy_from_user(dst, (void __user *)src,
 802                                                   len)) {
 803                                        err = -EFAULT;
 804                                        goto bail;
 805                                }
 806                        } else {
 807                                memcpy(dst, src, len);
 808                        }
 809                }
 810        }
 811
 812        for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
 813                rpra[i].pv = (u64) ctx->args[i].ptr;
 814                rpra[i].len = ctx->args[i].length;
 815                list[i].num = ctx->args[i].length ? 1 : 0;
 816                list[i].pgidx = i;
 817                pages[i].addr = ctx->maps[i]->phys;
 818                pages[i].size = ctx->maps[i]->size;
 819        }
 820
 821bail:
 822        if (err)
 823                dev_err(dev, "Error: get invoke args failed:%d\n", err);
 824
 825        return err;
 826}
 827
 828static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
 829                            u32 kernel)
 830{
 831        struct fastrpc_remote_arg *rpra = ctx->rpra;
 832        int i, inbufs;
 833
 834        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
 835
 836        for (i = inbufs; i < ctx->nbufs; ++i) {
 837                void *src = (void *)(uintptr_t)rpra[i].pv;
 838                void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
 839                u64 len = rpra[i].len;
 840
 841                if (!kernel) {
 842                        if (copy_to_user((void __user *)dst, src, len))
 843                                return -EFAULT;
 844                } else {
 845                        memcpy(dst, src, len);
 846                }
 847        }
 848
 849        return 0;
 850}
 851
 852static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
 853                               struct fastrpc_invoke_ctx *ctx,
 854                               u32 kernel, uint32_t handle)
 855{
 856        struct fastrpc_channel_ctx *cctx;
 857        struct fastrpc_user *fl = ctx->fl;
 858        struct fastrpc_msg *msg = &ctx->msg;
 859
 860        cctx = fl->cctx;
 861        msg->pid = fl->tgid;
 862        msg->tid = current->pid;
 863
 864        if (kernel)
 865                msg->pid = 0;
 866
 867        msg->ctx = ctx->ctxid | fl->pd;
 868        msg->handle = handle;
 869        msg->sc = ctx->sc;
 870        msg->addr = ctx->buf ? ctx->buf->phys : 0;
 871        msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
 872        fastrpc_context_get(ctx);
 873
 874        return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
 875}
 876
 877static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
 878                                   u32 handle, u32 sc,
 879                                   struct fastrpc_invoke_args *args)
 880{
 881        struct fastrpc_invoke_ctx *ctx = NULL;
 882        int err = 0;
 883
 884        if (!fl->sctx)
 885                return -EINVAL;
 886
 887        ctx = fastrpc_context_alloc(fl, kernel, sc, args);
 888        if (IS_ERR(ctx))
 889                return PTR_ERR(ctx);
 890
 891        if (ctx->nscalars) {
 892                err = fastrpc_get_args(kernel, ctx);
 893                if (err)
 894                        goto bail;
 895        }
 896
 897        /* make sure that all CPU memory writes are seen by DSP */
 898        dma_wmb();
 899        /* Send invoke buffer to remote dsp */
 900        err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
 901        if (err)
 902                goto bail;
 903
 904        /* Wait for remote dsp to respond or time out */
 905        err = wait_for_completion_interruptible(&ctx->work);
 906        if (err)
 907                goto bail;
 908
 909        /* Check the response from remote dsp */
 910        err = ctx->retval;
 911        if (err)
 912                goto bail;
 913
 914        if (ctx->nscalars) {
 915                /* make sure that all memory writes by DSP are seen by CPU */
 916                dma_rmb();
 917                /* populate all the output buffers with results */
 918                err = fastrpc_put_args(ctx, kernel);
 919                if (err)
 920                        goto bail;
 921        }
 922
 923bail:
 924        /* We are done with this compute context, remove it from pending list */
 925        spin_lock(&fl->lock);
 926        list_del(&ctx->node);
 927        spin_unlock(&fl->lock);
 928        fastrpc_context_put(ctx);
 929
 930        if (err)
 931                dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
 932
 933        return err;
 934}
 935
 936static int fastrpc_init_create_process(struct fastrpc_user *fl,
 937                                        char __user *argp)
 938{
 939        struct fastrpc_init_create init;
 940        struct fastrpc_invoke_args *args;
 941        struct fastrpc_phy_page pages[1];
 942        struct fastrpc_map *map = NULL;
 943        struct fastrpc_buf *imem = NULL;
 944        int memlen;
 945        int err;
 946        struct {
 947                int pgid;
 948                u32 namelen;
 949                u32 filelen;
 950                u32 pageslen;
 951                u32 attrs;
 952                u32 siglen;
 953        } inbuf;
 954        u32 sc;
 955
 956        args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
 957        if (!args)
 958                return -ENOMEM;
 959
 960        if (copy_from_user(&init, argp, sizeof(init))) {
 961                err = -EFAULT;
 962                goto err;
 963        }
 964
 965        if (init.filelen > INIT_FILELEN_MAX) {
 966                err = -EINVAL;
 967                goto err;
 968        }
 969
 970        inbuf.pgid = fl->tgid;
 971        inbuf.namelen = strlen(current->comm) + 1;
 972        inbuf.filelen = init.filelen;
 973        inbuf.pageslen = 1;
 974        inbuf.attrs = init.attrs;
 975        inbuf.siglen = init.siglen;
 976        fl->pd = 1;
 977
 978        if (init.filelen && init.filefd) {
 979                err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
 980                if (err)
 981                        goto err;
 982        }
 983
 984        memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
 985                       1024 * 1024);
 986        err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
 987                                &imem);
 988        if (err)
 989                goto err_alloc;
 990
 991        fl->init_mem = imem;
 992        args[0].ptr = (u64)(uintptr_t)&inbuf;
 993        args[0].length = sizeof(inbuf);
 994        args[0].fd = -1;
 995
 996        args[1].ptr = (u64)(uintptr_t)current->comm;
 997        args[1].length = inbuf.namelen;
 998        args[1].fd = -1;
 999
1000        args[2].ptr = (u64) init.file;
1001        args[2].length = inbuf.filelen;
1002        args[2].fd = init.filefd;
1003
1004        pages[0].addr = imem->phys;
1005        pages[0].size = imem->size;
1006
1007        args[3].ptr = (u64)(uintptr_t) pages;
1008        args[3].length = 1 * sizeof(*pages);
1009        args[3].fd = -1;
1010
1011        args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1012        args[4].length = sizeof(inbuf.attrs);
1013        args[4].fd = -1;
1014
1015        args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1016        args[5].length = sizeof(inbuf.siglen);
1017        args[5].fd = -1;
1018
1019        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1020        if (init.attrs)
1021                sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1022
1023        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1024                                      sc, args);
1025        if (err)
1026                goto err_invoke;
1027
1028        kfree(args);
1029
1030        return 0;
1031
1032err_invoke:
1033        fl->init_mem = NULL;
1034        fastrpc_buf_free(imem);
1035err_alloc:
1036        if (map) {
1037                spin_lock(&fl->lock);
1038                list_del(&map->node);
1039                spin_unlock(&fl->lock);
1040                fastrpc_map_put(map);
1041        }
1042err:
1043        kfree(args);
1044
1045        return err;
1046}
1047
1048static struct fastrpc_session_ctx *fastrpc_session_alloc(
1049                                        struct fastrpc_channel_ctx *cctx)
1050{
1051        struct fastrpc_session_ctx *session = NULL;
1052        unsigned long flags;
1053        int i;
1054
1055        spin_lock_irqsave(&cctx->lock, flags);
1056        for (i = 0; i < cctx->sesscount; i++) {
1057                if (!cctx->session[i].used && cctx->session[i].valid) {
1058                        cctx->session[i].used = true;
1059                        session = &cctx->session[i];
1060                        break;
1061                }
1062        }
1063        spin_unlock_irqrestore(&cctx->lock, flags);
1064
1065        return session;
1066}
1067
1068static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1069                                 struct fastrpc_session_ctx *session)
1070{
1071        unsigned long flags;
1072
1073        spin_lock_irqsave(&cctx->lock, flags);
1074        session->used = false;
1075        spin_unlock_irqrestore(&cctx->lock, flags);
1076}
1077
1078static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1079{
1080        struct fastrpc_invoke_args args[1];
1081        int tgid = 0;
1082        u32 sc;
1083
1084        tgid = fl->tgid;
1085        args[0].ptr = (u64)(uintptr_t) &tgid;
1086        args[0].length = sizeof(tgid);
1087        args[0].fd = -1;
1088        args[0].reserved = 0;
1089        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1090
1091        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1092                                       sc, &args[0]);
1093}
1094
1095static int fastrpc_device_release(struct inode *inode, struct file *file)
1096{
1097        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1098        struct fastrpc_channel_ctx *cctx = fl->cctx;
1099        struct fastrpc_invoke_ctx *ctx, *n;
1100        struct fastrpc_map *map, *m;
1101        unsigned long flags;
1102
1103        fastrpc_release_current_dsp_process(fl);
1104
1105        spin_lock_irqsave(&cctx->lock, flags);
1106        list_del(&fl->user);
1107        spin_unlock_irqrestore(&cctx->lock, flags);
1108
1109        if (fl->init_mem)
1110                fastrpc_buf_free(fl->init_mem);
1111
1112        list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1113                list_del(&ctx->node);
1114                fastrpc_context_put(ctx);
1115        }
1116
1117        list_for_each_entry_safe(map, m, &fl->maps, node) {
1118                list_del(&map->node);
1119                fastrpc_map_put(map);
1120        }
1121
1122        fastrpc_session_free(cctx, fl->sctx);
1123
1124        mutex_destroy(&fl->mutex);
1125        kfree(fl);
1126        file->private_data = NULL;
1127
1128        return 0;
1129}
1130
1131static int fastrpc_device_open(struct inode *inode, struct file *filp)
1132{
1133        struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1134        struct fastrpc_user *fl = NULL;
1135        unsigned long flags;
1136
1137        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1138        if (!fl)
1139                return -ENOMEM;
1140
1141        filp->private_data = fl;
1142        spin_lock_init(&fl->lock);
1143        mutex_init(&fl->mutex);
1144        INIT_LIST_HEAD(&fl->pending);
1145        INIT_LIST_HEAD(&fl->maps);
1146        INIT_LIST_HEAD(&fl->user);
1147        fl->tgid = current->tgid;
1148        fl->cctx = cctx;
1149
1150        fl->sctx = fastrpc_session_alloc(cctx);
1151        if (!fl->sctx) {
1152                dev_err(&cctx->rpdev->dev, "No session available\n");
1153                mutex_destroy(&fl->mutex);
1154                kfree(fl);
1155
1156                return -EBUSY;
1157        }
1158
1159        spin_lock_irqsave(&cctx->lock, flags);
1160        list_add_tail(&fl->user, &cctx->users);
1161        spin_unlock_irqrestore(&cctx->lock, flags);
1162
1163        return 0;
1164}
1165
1166static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
1167{
1168        struct dma_buf *buf;
1169        int info;
1170
1171        if (copy_from_user(&info, argp, sizeof(info)))
1172                return -EFAULT;
1173
1174        buf = dma_buf_get(info);
1175        if (IS_ERR_OR_NULL(buf))
1176                return -EINVAL;
1177        /*
1178         * one for the last get and other for the ALLOC_DMA_BUFF ioctl
1179         */
1180        dma_buf_put(buf);
1181        dma_buf_put(buf);
1182
1183        return 0;
1184}
1185
1186static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1187{
1188        struct fastrpc_alloc_dma_buf bp;
1189        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1190        struct fastrpc_buf *buf = NULL;
1191        int err;
1192
1193        if (copy_from_user(&bp, argp, sizeof(bp)))
1194                return -EFAULT;
1195
1196        err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1197        if (err)
1198                return err;
1199        exp_info.ops = &fastrpc_dma_buf_ops;
1200        exp_info.size = bp.size;
1201        exp_info.flags = O_RDWR;
1202        exp_info.priv = buf;
1203        buf->dmabuf = dma_buf_export(&exp_info);
1204        if (IS_ERR(buf->dmabuf)) {
1205                err = PTR_ERR(buf->dmabuf);
1206                fastrpc_buf_free(buf);
1207                return err;
1208        }
1209
1210        bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1211        if (bp.fd < 0) {
1212                dma_buf_put(buf->dmabuf);
1213                return -EINVAL;
1214        }
1215
1216        if (copy_to_user(argp, &bp, sizeof(bp))) {
1217                dma_buf_put(buf->dmabuf);
1218                return -EFAULT;
1219        }
1220
1221        get_dma_buf(buf->dmabuf);
1222
1223        return 0;
1224}
1225
1226static int fastrpc_init_attach(struct fastrpc_user *fl)
1227{
1228        struct fastrpc_invoke_args args[1];
1229        int tgid = fl->tgid;
1230        u32 sc;
1231
1232        args[0].ptr = (u64)(uintptr_t) &tgid;
1233        args[0].length = sizeof(tgid);
1234        args[0].fd = -1;
1235        args[0].reserved = 0;
1236        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1237        fl->pd = 0;
1238
1239        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1240                                       sc, &args[0]);
1241}
1242
1243static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1244{
1245        struct fastrpc_invoke_args *args = NULL;
1246        struct fastrpc_invoke inv;
1247        u32 nscalars;
1248        int err;
1249
1250        if (copy_from_user(&inv, argp, sizeof(inv)))
1251                return -EFAULT;
1252
1253        /* nscalars is truncated here to max supported value */
1254        nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1255        if (nscalars) {
1256                args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1257                if (!args)
1258                        return -ENOMEM;
1259
1260                if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1261                                   nscalars * sizeof(*args))) {
1262                        kfree(args);
1263                        return -EFAULT;
1264                }
1265        }
1266
1267        err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1268        kfree(args);
1269
1270        return err;
1271}
1272
1273static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1274                                 unsigned long arg)
1275{
1276        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1277        char __user *argp = (char __user *)arg;
1278        int err;
1279
1280        switch (cmd) {
1281        case FASTRPC_IOCTL_INVOKE:
1282                err = fastrpc_invoke(fl, argp);
1283                break;
1284        case FASTRPC_IOCTL_INIT_ATTACH:
1285                err = fastrpc_init_attach(fl);
1286                break;
1287        case FASTRPC_IOCTL_INIT_CREATE:
1288                err = fastrpc_init_create_process(fl, argp);
1289                break;
1290        case FASTRPC_IOCTL_FREE_DMA_BUFF:
1291                err = fastrpc_dmabuf_free(fl, argp);
1292                break;
1293        case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1294                err = fastrpc_dmabuf_alloc(fl, argp);
1295                break;
1296        default:
1297                err = -ENOTTY;
1298                break;
1299        }
1300
1301        return err;
1302}
1303
1304static const struct file_operations fastrpc_fops = {
1305        .open = fastrpc_device_open,
1306        .release = fastrpc_device_release,
1307        .unlocked_ioctl = fastrpc_device_ioctl,
1308        .compat_ioctl = fastrpc_device_ioctl,
1309};
1310
1311static int fastrpc_cb_probe(struct platform_device *pdev)
1312{
1313        struct fastrpc_channel_ctx *cctx;
1314        struct fastrpc_session_ctx *sess;
1315        struct device *dev = &pdev->dev;
1316        int i, sessions = 0;
1317        unsigned long flags;
1318        int rc;
1319
1320        cctx = dev_get_drvdata(dev->parent);
1321        if (!cctx)
1322                return -EINVAL;
1323
1324        of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1325
1326        spin_lock_irqsave(&cctx->lock, flags);
1327        sess = &cctx->session[cctx->sesscount];
1328        sess->used = false;
1329        sess->valid = true;
1330        sess->dev = dev;
1331        dev_set_drvdata(dev, sess);
1332
1333        if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1334                dev_info(dev, "FastRPC Session ID not specified in DT\n");
1335
1336        if (sessions > 0) {
1337                struct fastrpc_session_ctx *dup_sess;
1338
1339                for (i = 1; i < sessions; i++) {
1340                        if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1341                                break;
1342                        dup_sess = &cctx->session[cctx->sesscount];
1343                        memcpy(dup_sess, sess, sizeof(*dup_sess));
1344                }
1345        }
1346        cctx->sesscount++;
1347        spin_unlock_irqrestore(&cctx->lock, flags);
1348        rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1349        if (rc) {
1350                dev_err(dev, "32-bit DMA enable failed\n");
1351                return rc;
1352        }
1353
1354        return 0;
1355}
1356
1357static int fastrpc_cb_remove(struct platform_device *pdev)
1358{
1359        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1360        struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1361        unsigned long flags;
1362        int i;
1363
1364        spin_lock_irqsave(&cctx->lock, flags);
1365        for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1366                if (cctx->session[i].sid == sess->sid) {
1367                        cctx->session[i].valid = false;
1368                        cctx->sesscount--;
1369                }
1370        }
1371        spin_unlock_irqrestore(&cctx->lock, flags);
1372
1373        return 0;
1374}
1375
1376static const struct of_device_id fastrpc_match_table[] = {
1377        { .compatible = "qcom,fastrpc-compute-cb", },
1378        {}
1379};
1380
1381static struct platform_driver fastrpc_cb_driver = {
1382        .probe = fastrpc_cb_probe,
1383        .remove = fastrpc_cb_remove,
1384        .driver = {
1385                .name = "qcom,fastrpc-cb",
1386                .of_match_table = fastrpc_match_table,
1387                .suppress_bind_attrs = true,
1388        },
1389};
1390
1391static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1392{
1393        struct device *rdev = &rpdev->dev;
1394        struct fastrpc_channel_ctx *data;
1395        int i, err, domain_id = -1;
1396        const char *domain;
1397
1398        data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
1399        if (!data)
1400                return -ENOMEM;
1401
1402        err = of_property_read_string(rdev->of_node, "label", &domain);
1403        if (err) {
1404                dev_info(rdev, "FastRPC Domain not specified in DT\n");
1405                return err;
1406        }
1407
1408        for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1409                if (!strcmp(domains[i], domain)) {
1410                        domain_id = i;
1411                        break;
1412                }
1413        }
1414
1415        if (domain_id < 0) {
1416                dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1417                return -EINVAL;
1418        }
1419
1420        data->miscdev.minor = MISC_DYNAMIC_MINOR;
1421        data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1422                                domains[domain_id]);
1423        data->miscdev.fops = &fastrpc_fops;
1424        err = misc_register(&data->miscdev);
1425        if (err)
1426                return err;
1427
1428        dev_set_drvdata(&rpdev->dev, data);
1429        dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1430        INIT_LIST_HEAD(&data->users);
1431        spin_lock_init(&data->lock);
1432        idr_init(&data->ctx_idr);
1433        data->domain_id = domain_id;
1434        data->rpdev = rpdev;
1435
1436        return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1437}
1438
1439static void fastrpc_notify_users(struct fastrpc_user *user)
1440{
1441        struct fastrpc_invoke_ctx *ctx;
1442
1443        spin_lock(&user->lock);
1444        list_for_each_entry(ctx, &user->pending, node)
1445                complete(&ctx->work);
1446        spin_unlock(&user->lock);
1447}
1448
1449static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1450{
1451        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1452        struct fastrpc_user *user;
1453        unsigned long flags;
1454
1455        spin_lock_irqsave(&cctx->lock, flags);
1456        list_for_each_entry(user, &cctx->users, user)
1457                fastrpc_notify_users(user);
1458        spin_unlock_irqrestore(&cctx->lock, flags);
1459
1460        misc_deregister(&cctx->miscdev);
1461        of_platform_depopulate(&rpdev->dev);
1462        kfree(cctx);
1463}
1464
1465static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1466                                  int len, void *priv, u32 addr)
1467{
1468        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1469        struct fastrpc_invoke_rsp *rsp = data;
1470        struct fastrpc_invoke_ctx *ctx;
1471        unsigned long flags;
1472        unsigned long ctxid;
1473
1474        if (len < sizeof(*rsp))
1475                return -EINVAL;
1476
1477        ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1478
1479        spin_lock_irqsave(&cctx->lock, flags);
1480        ctx = idr_find(&cctx->ctx_idr, ctxid);
1481        spin_unlock_irqrestore(&cctx->lock, flags);
1482
1483        if (!ctx) {
1484                dev_err(&rpdev->dev, "No context ID matches response\n");
1485                return -ENOENT;
1486        }
1487
1488        ctx->retval = rsp->retval;
1489        complete(&ctx->work);
1490
1491        /*
1492         * The DMA buffer associated with the context cannot be freed in
1493         * interrupt context so schedule it through a worker thread to
1494         * avoid a kernel BUG.
1495         */
1496        schedule_work(&ctx->put_work);
1497
1498        return 0;
1499}
1500
1501static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1502        { .compatible = "qcom,fastrpc" },
1503        { },
1504};
1505MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1506
1507static struct rpmsg_driver fastrpc_driver = {
1508        .probe = fastrpc_rpmsg_probe,
1509        .remove = fastrpc_rpmsg_remove,
1510        .callback = fastrpc_rpmsg_callback,
1511        .drv = {
1512                .name = "qcom,fastrpc",
1513                .of_match_table = fastrpc_rpmsg_of_match,
1514        },
1515};
1516
1517static int fastrpc_init(void)
1518{
1519        int ret;
1520
1521        ret = platform_driver_register(&fastrpc_cb_driver);
1522        if (ret < 0) {
1523                pr_err("fastrpc: failed to register cb driver\n");
1524                return ret;
1525        }
1526
1527        ret = register_rpmsg_driver(&fastrpc_driver);
1528        if (ret < 0) {
1529                pr_err("fastrpc: failed to register rpmsg driver\n");
1530                platform_driver_unregister(&fastrpc_cb_driver);
1531                return ret;
1532        }
1533
1534        return 0;
1535}
1536module_init(fastrpc_init);
1537
1538static void fastrpc_exit(void)
1539{
1540        platform_driver_unregister(&fastrpc_cb_driver);
1541        unregister_rpmsg_driver(&fastrpc_driver);
1542}
1543module_exit(fastrpc_exit);
1544
1545MODULE_LICENSE("GPL v2");
1546