linux/drivers/misc/fastrpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
   3// Copyright (c) 2018, Linaro Limited
   4
   5#include <linux/completion.h>
   6#include <linux/device.h>
   7#include <linux/dma-buf.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/idr.h>
  10#include <linux/list.h>
  11#include <linux/miscdevice.h>
  12#include <linux/module.h>
  13#include <linux/of_address.h>
  14#include <linux/of.h>
  15#include <linux/sort.h>
  16#include <linux/of_platform.h>
  17#include <linux/rpmsg.h>
  18#include <linux/scatterlist.h>
  19#include <linux/slab.h>
  20#include <linux/qcom_scm.h>
  21#include <uapi/misc/fastrpc.h>
  22
  23#define ADSP_DOMAIN_ID (0)
  24#define MDSP_DOMAIN_ID (1)
  25#define SDSP_DOMAIN_ID (2)
  26#define CDSP_DOMAIN_ID (3)
  27#define FASTRPC_DEV_MAX         4 /* adsp, mdsp, slpi, cdsp*/
  28#define FASTRPC_MAX_SESSIONS    13 /*12 compute, 1 cpz*/
  29#define FASTRPC_MAX_VMIDS       16
  30#define FASTRPC_ALIGN           128
  31#define FASTRPC_MAX_FDLIST      16
  32#define FASTRPC_MAX_CRCLIST     64
  33#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
  34#define FASTRPC_CTX_MAX (256)
  35#define FASTRPC_INIT_HANDLE     1
  36#define FASTRPC_DSP_UTILITIES_HANDLE    2
  37#define FASTRPC_CTXID_MASK (0xFF0)
  38#define INIT_FILELEN_MAX (2 * 1024 * 1024)
  39#define FASTRPC_DEVICE_NAME     "fastrpc"
  40#define ADSP_MMAP_ADD_PAGES 0x1000
  41#define DSP_UNSUPPORTED_API (0x80000414)
  42/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
  43#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
  44#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
  45
  46/* Retrives number of input buffers from the scalars parameter */
  47#define REMOTE_SCALARS_INBUFS(sc)       (((sc) >> 16) & 0x0ff)
  48
  49/* Retrives number of output buffers from the scalars parameter */
  50#define REMOTE_SCALARS_OUTBUFS(sc)      (((sc) >> 8) & 0x0ff)
  51
  52/* Retrives number of input handles from the scalars parameter */
  53#define REMOTE_SCALARS_INHANDLES(sc)    (((sc) >> 4) & 0x0f)
  54
  55/* Retrives number of output handles from the scalars parameter */
  56#define REMOTE_SCALARS_OUTHANDLES(sc)   ((sc) & 0x0f)
  57
  58#define REMOTE_SCALARS_LENGTH(sc)       (REMOTE_SCALARS_INBUFS(sc) +   \
  59                                         REMOTE_SCALARS_OUTBUFS(sc) +  \
  60                                         REMOTE_SCALARS_INHANDLES(sc)+ \
  61                                         REMOTE_SCALARS_OUTHANDLES(sc))
  62#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout)  \
  63                                (((attr & 0x07) << 29) |                \
  64                                ((method & 0x1f) << 24) |       \
  65                                ((in & 0xff) << 16) |           \
  66                                ((out & 0xff) <<  8) |          \
  67                                ((oin & 0x0f) <<  4) |          \
  68                                (oout & 0x0f))
  69
  70#define FASTRPC_SCALARS(method, in, out) \
  71                FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
  72
  73#define FASTRPC_CREATE_PROCESS_NARGS    6
  74/* Remote Method id table */
  75#define FASTRPC_RMID_INIT_ATTACH        0
  76#define FASTRPC_RMID_INIT_RELEASE       1
  77#define FASTRPC_RMID_INIT_MMAP          4
  78#define FASTRPC_RMID_INIT_MUNMAP        5
  79#define FASTRPC_RMID_INIT_CREATE        6
  80#define FASTRPC_RMID_INIT_CREATE_ATTR   7
  81#define FASTRPC_RMID_INIT_CREATE_STATIC 8
  82#define FASTRPC_RMID_INIT_MEM_MAP      10
  83#define FASTRPC_RMID_INIT_MEM_UNMAP    11
  84
  85/* Protection Domain(PD) ids */
  86#define AUDIO_PD        (0) /* also GUEST_OS PD? */
  87#define USER_PD         (1)
  88#define SENSORS_PD      (2)
  89
  90#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
  91
  92static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
  93                                                "sdsp", "cdsp"};
  94struct fastrpc_phy_page {
  95        u64 addr;               /* physical address */
  96        u64 size;               /* size of contiguous region */
  97};
  98
  99struct fastrpc_invoke_buf {
 100        u32 num;                /* number of contiguous regions */
 101        u32 pgidx;              /* index to start of contiguous region */
 102};
 103
 104struct fastrpc_remote_dmahandle {
 105        s32 fd;         /* dma handle fd */
 106        u32 offset;     /* dma handle offset */
 107        u32 len;        /* dma handle length */
 108};
 109
 110struct fastrpc_remote_buf {
 111        u64 pv;         /* buffer pointer */
 112        u64 len;        /* length of buffer */
 113};
 114
 115union fastrpc_remote_arg {
 116        struct fastrpc_remote_buf buf;
 117        struct fastrpc_remote_dmahandle dma;
 118};
 119
 120struct fastrpc_mmap_rsp_msg {
 121        u64 vaddr;
 122};
 123
 124struct fastrpc_mmap_req_msg {
 125        s32 pgid;
 126        u32 flags;
 127        u64 vaddr;
 128        s32 num;
 129};
 130
 131struct fastrpc_mem_map_req_msg {
 132        s32 pgid;
 133        s32 fd;
 134        s32 offset;
 135        u32 flags;
 136        u64 vaddrin;
 137        s32 num;
 138        s32 data_len;
 139};
 140
 141struct fastrpc_munmap_req_msg {
 142        s32 pgid;
 143        u64 vaddr;
 144        u64 size;
 145};
 146
 147struct fastrpc_mem_unmap_req_msg {
 148        s32 pgid;
 149        s32 fd;
 150        u64 vaddrin;
 151        u64 len;
 152};
 153
 154struct fastrpc_msg {
 155        int pid;                /* process group id */
 156        int tid;                /* thread id */
 157        u64 ctx;                /* invoke caller context */
 158        u32 handle;     /* handle to invoke */
 159        u32 sc;         /* scalars structure describing the data */
 160        u64 addr;               /* physical address */
 161        u64 size;               /* size of contiguous region */
 162};
 163
 164struct fastrpc_invoke_rsp {
 165        u64 ctx;                /* invoke caller context */
 166        int retval;             /* invoke return value */
 167};
 168
 169struct fastrpc_buf_overlap {
 170        u64 start;
 171        u64 end;
 172        int raix;
 173        u64 mstart;
 174        u64 mend;
 175        u64 offset;
 176};
 177
 178struct fastrpc_buf {
 179        struct fastrpc_user *fl;
 180        struct dma_buf *dmabuf;
 181        struct device *dev;
 182        void *virt;
 183        u64 phys;
 184        u64 size;
 185        /* Lock for dma buf attachments */
 186        struct mutex lock;
 187        struct list_head attachments;
 188        /* mmap support */
 189        struct list_head node; /* list of user requested mmaps */
 190        uintptr_t raddr;
 191};
 192
 193struct fastrpc_dma_buf_attachment {
 194        struct device *dev;
 195        struct sg_table sgt;
 196        struct list_head node;
 197};
 198
 199struct fastrpc_map {
 200        struct list_head node;
 201        struct fastrpc_user *fl;
 202        int fd;
 203        struct dma_buf *buf;
 204        struct sg_table *table;
 205        struct dma_buf_attachment *attach;
 206        u64 phys;
 207        u64 size;
 208        void *va;
 209        u64 len;
 210        u64 raddr;
 211        u32 attr;
 212        struct kref refcount;
 213};
 214
 215struct fastrpc_invoke_ctx {
 216        int nscalars;
 217        int nbufs;
 218        int retval;
 219        int pid;
 220        int tgid;
 221        u32 sc;
 222        u32 *crc;
 223        u64 ctxid;
 224        u64 msg_sz;
 225        struct kref refcount;
 226        struct list_head node; /* list of ctxs */
 227        struct completion work;
 228        struct work_struct put_work;
 229        struct fastrpc_msg msg;
 230        struct fastrpc_user *fl;
 231        union fastrpc_remote_arg *rpra;
 232        struct fastrpc_map **maps;
 233        struct fastrpc_buf *buf;
 234        struct fastrpc_invoke_args *args;
 235        struct fastrpc_buf_overlap *olaps;
 236        struct fastrpc_channel_ctx *cctx;
 237};
 238
 239struct fastrpc_session_ctx {
 240        struct device *dev;
 241        int sid;
 242        bool used;
 243        bool valid;
 244};
 245
 246struct fastrpc_channel_ctx {
 247        int domain_id;
 248        int sesscount;
 249        int vmcount;
 250        u32 perms;
 251        struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
 252        struct rpmsg_device *rpdev;
 253        struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
 254        spinlock_t lock;
 255        struct idr ctx_idr;
 256        struct list_head users;
 257        struct kref refcount;
 258        /* Flag if dsp attributes are cached */
 259        bool valid_attributes;
 260        u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
 261        struct fastrpc_device *secure_fdevice;
 262        struct fastrpc_device *fdevice;
 263        bool secure;
 264        bool unsigned_support;
 265};
 266
 267struct fastrpc_device {
 268        struct fastrpc_channel_ctx *cctx;
 269        struct miscdevice miscdev;
 270        bool secure;
 271};
 272
 273struct fastrpc_user {
 274        struct list_head user;
 275        struct list_head maps;
 276        struct list_head pending;
 277        struct list_head mmaps;
 278
 279        struct fastrpc_channel_ctx *cctx;
 280        struct fastrpc_session_ctx *sctx;
 281        struct fastrpc_buf *init_mem;
 282
 283        int tgid;
 284        int pd;
 285        bool is_secure_dev;
 286        /* Lock for lists */
 287        spinlock_t lock;
 288        /* lock for allocations */
 289        struct mutex mutex;
 290};
 291
 292static void fastrpc_free_map(struct kref *ref)
 293{
 294        struct fastrpc_map *map;
 295
 296        map = container_of(ref, struct fastrpc_map, refcount);
 297
 298        if (map->table) {
 299                if (map->attr & FASTRPC_ATTR_SECUREMAP) {
 300                        struct qcom_scm_vmperm perm;
 301                        int err = 0;
 302
 303                        perm.vmid = QCOM_SCM_VMID_HLOS;
 304                        perm.perm = QCOM_SCM_PERM_RWX;
 305                        err = qcom_scm_assign_mem(map->phys, map->size,
 306                                &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
 307                        if (err) {
 308                                dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
 309                                                map->phys, map->size, err);
 310                                return;
 311                        }
 312                }
 313                dma_buf_unmap_attachment(map->attach, map->table,
 314                                         DMA_BIDIRECTIONAL);
 315                dma_buf_detach(map->buf, map->attach);
 316                dma_buf_put(map->buf);
 317        }
 318
 319        kfree(map);
 320}
 321
 322static void fastrpc_map_put(struct fastrpc_map *map)
 323{
 324        if (map)
 325                kref_put(&map->refcount, fastrpc_free_map);
 326}
 327
 328static void fastrpc_map_get(struct fastrpc_map *map)
 329{
 330        if (map)
 331                kref_get(&map->refcount);
 332}
 333
 334
 335static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
 336                            struct fastrpc_map **ppmap)
 337{
 338        struct fastrpc_map *map = NULL;
 339
 340        mutex_lock(&fl->mutex);
 341        list_for_each_entry(map, &fl->maps, node) {
 342                if (map->fd == fd) {
 343                        *ppmap = map;
 344                        mutex_unlock(&fl->mutex);
 345                        return 0;
 346                }
 347        }
 348        mutex_unlock(&fl->mutex);
 349
 350        return -ENOENT;
 351}
 352
 353static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
 354                            struct fastrpc_map **ppmap)
 355{
 356        int ret = fastrpc_map_lookup(fl, fd, ppmap);
 357
 358        if (!ret)
 359                fastrpc_map_get(*ppmap);
 360
 361        return ret;
 362}
 363
 364static void fastrpc_buf_free(struct fastrpc_buf *buf)
 365{
 366        dma_free_coherent(buf->dev, buf->size, buf->virt,
 367                          FASTRPC_PHYS(buf->phys));
 368        kfree(buf);
 369}
 370
 371static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
 372                             u64 size, struct fastrpc_buf **obuf)
 373{
 374        struct fastrpc_buf *buf;
 375
 376        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 377        if (!buf)
 378                return -ENOMEM;
 379
 380        INIT_LIST_HEAD(&buf->attachments);
 381        INIT_LIST_HEAD(&buf->node);
 382        mutex_init(&buf->lock);
 383
 384        buf->fl = fl;
 385        buf->virt = NULL;
 386        buf->phys = 0;
 387        buf->size = size;
 388        buf->dev = dev;
 389        buf->raddr = 0;
 390
 391        buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
 392                                       GFP_KERNEL);
 393        if (!buf->virt) {
 394                mutex_destroy(&buf->lock);
 395                kfree(buf);
 396                return -ENOMEM;
 397        }
 398
 399        if (fl->sctx && fl->sctx->sid)
 400                buf->phys += ((u64)fl->sctx->sid << 32);
 401
 402        *obuf = buf;
 403
 404        return 0;
 405}
 406
 407static void fastrpc_channel_ctx_free(struct kref *ref)
 408{
 409        struct fastrpc_channel_ctx *cctx;
 410
 411        cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
 412
 413        kfree(cctx);
 414}
 415
 416static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
 417{
 418        kref_get(&cctx->refcount);
 419}
 420
 421static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
 422{
 423        kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
 424}
 425
 426static void fastrpc_context_free(struct kref *ref)
 427{
 428        struct fastrpc_invoke_ctx *ctx;
 429        struct fastrpc_channel_ctx *cctx;
 430        unsigned long flags;
 431        int i;
 432
 433        ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
 434        cctx = ctx->cctx;
 435
 436        for (i = 0; i < ctx->nbufs; i++)
 437                fastrpc_map_put(ctx->maps[i]);
 438
 439        if (ctx->buf)
 440                fastrpc_buf_free(ctx->buf);
 441
 442        spin_lock_irqsave(&cctx->lock, flags);
 443        idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
 444        spin_unlock_irqrestore(&cctx->lock, flags);
 445
 446        kfree(ctx->maps);
 447        kfree(ctx->olaps);
 448        kfree(ctx);
 449
 450        fastrpc_channel_ctx_put(cctx);
 451}
 452
 453static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
 454{
 455        kref_get(&ctx->refcount);
 456}
 457
 458static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
 459{
 460        kref_put(&ctx->refcount, fastrpc_context_free);
 461}
 462
 463static void fastrpc_context_put_wq(struct work_struct *work)
 464{
 465        struct fastrpc_invoke_ctx *ctx =
 466                        container_of(work, struct fastrpc_invoke_ctx, put_work);
 467
 468        fastrpc_context_put(ctx);
 469}
 470
 471#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
 472static int olaps_cmp(const void *a, const void *b)
 473{
 474        struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
 475        struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
 476        /* sort with lowest starting buffer first */
 477        int st = CMP(pa->start, pb->start);
 478        /* sort with highest ending buffer first */
 479        int ed = CMP(pb->end, pa->end);
 480
 481        return st == 0 ? ed : st;
 482}
 483
 484static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
 485{
 486        u64 max_end = 0;
 487        int i;
 488
 489        for (i = 0; i < ctx->nbufs; ++i) {
 490                ctx->olaps[i].start = ctx->args[i].ptr;
 491                ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
 492                ctx->olaps[i].raix = i;
 493        }
 494
 495        sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
 496
 497        for (i = 0; i < ctx->nbufs; ++i) {
 498                /* Falling inside previous range */
 499                if (ctx->olaps[i].start < max_end) {
 500                        ctx->olaps[i].mstart = max_end;
 501                        ctx->olaps[i].mend = ctx->olaps[i].end;
 502                        ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
 503
 504                        if (ctx->olaps[i].end > max_end) {
 505                                max_end = ctx->olaps[i].end;
 506                        } else {
 507                                ctx->olaps[i].mend = 0;
 508                                ctx->olaps[i].mstart = 0;
 509                        }
 510
 511                } else  {
 512                        ctx->olaps[i].mend = ctx->olaps[i].end;
 513                        ctx->olaps[i].mstart = ctx->olaps[i].start;
 514                        ctx->olaps[i].offset = 0;
 515                        max_end = ctx->olaps[i].end;
 516                }
 517        }
 518}
 519
 520static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
 521                        struct fastrpc_user *user, u32 kernel, u32 sc,
 522                        struct fastrpc_invoke_args *args)
 523{
 524        struct fastrpc_channel_ctx *cctx = user->cctx;
 525        struct fastrpc_invoke_ctx *ctx = NULL;
 526        unsigned long flags;
 527        int ret;
 528
 529        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 530        if (!ctx)
 531                return ERR_PTR(-ENOMEM);
 532
 533        INIT_LIST_HEAD(&ctx->node);
 534        ctx->fl = user;
 535        ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
 536        ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
 537                     REMOTE_SCALARS_OUTBUFS(sc);
 538
 539        if (ctx->nscalars) {
 540                ctx->maps = kcalloc(ctx->nscalars,
 541                                    sizeof(*ctx->maps), GFP_KERNEL);
 542                if (!ctx->maps) {
 543                        kfree(ctx);
 544                        return ERR_PTR(-ENOMEM);
 545                }
 546                ctx->olaps = kcalloc(ctx->nscalars,
 547                                    sizeof(*ctx->olaps), GFP_KERNEL);
 548                if (!ctx->olaps) {
 549                        kfree(ctx->maps);
 550                        kfree(ctx);
 551                        return ERR_PTR(-ENOMEM);
 552                }
 553                ctx->args = args;
 554                fastrpc_get_buff_overlaps(ctx);
 555        }
 556
 557        /* Released in fastrpc_context_put() */
 558        fastrpc_channel_ctx_get(cctx);
 559
 560        ctx->sc = sc;
 561        ctx->retval = -1;
 562        ctx->pid = current->pid;
 563        ctx->tgid = user->tgid;
 564        ctx->cctx = cctx;
 565        init_completion(&ctx->work);
 566        INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
 567
 568        spin_lock(&user->lock);
 569        list_add_tail(&ctx->node, &user->pending);
 570        spin_unlock(&user->lock);
 571
 572        spin_lock_irqsave(&cctx->lock, flags);
 573        ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
 574                               FASTRPC_CTX_MAX, GFP_ATOMIC);
 575        if (ret < 0) {
 576                spin_unlock_irqrestore(&cctx->lock, flags);
 577                goto err_idr;
 578        }
 579        ctx->ctxid = ret << 4;
 580        spin_unlock_irqrestore(&cctx->lock, flags);
 581
 582        kref_init(&ctx->refcount);
 583
 584        return ctx;
 585err_idr:
 586        spin_lock(&user->lock);
 587        list_del(&ctx->node);
 588        spin_unlock(&user->lock);
 589        fastrpc_channel_ctx_put(cctx);
 590        kfree(ctx->maps);
 591        kfree(ctx->olaps);
 592        kfree(ctx);
 593
 594        return ERR_PTR(ret);
 595}
 596
 597static struct sg_table *
 598fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
 599                    enum dma_data_direction dir)
 600{
 601        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 602        struct sg_table *table;
 603        int ret;
 604
 605        table = &a->sgt;
 606
 607        ret = dma_map_sgtable(attachment->dev, table, dir, 0);
 608        if (ret)
 609                table = ERR_PTR(ret);
 610        return table;
 611}
 612
 613static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
 614                                  struct sg_table *table,
 615                                  enum dma_data_direction dir)
 616{
 617        dma_unmap_sgtable(attach->dev, table, dir, 0);
 618}
 619
 620static void fastrpc_release(struct dma_buf *dmabuf)
 621{
 622        struct fastrpc_buf *buffer = dmabuf->priv;
 623
 624        fastrpc_buf_free(buffer);
 625}
 626
 627static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
 628                                  struct dma_buf_attachment *attachment)
 629{
 630        struct fastrpc_dma_buf_attachment *a;
 631        struct fastrpc_buf *buffer = dmabuf->priv;
 632        int ret;
 633
 634        a = kzalloc(sizeof(*a), GFP_KERNEL);
 635        if (!a)
 636                return -ENOMEM;
 637
 638        ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
 639                              FASTRPC_PHYS(buffer->phys), buffer->size);
 640        if (ret < 0) {
 641                dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
 642                kfree(a);
 643                return -EINVAL;
 644        }
 645
 646        a->dev = attachment->dev;
 647        INIT_LIST_HEAD(&a->node);
 648        attachment->priv = a;
 649
 650        mutex_lock(&buffer->lock);
 651        list_add(&a->node, &buffer->attachments);
 652        mutex_unlock(&buffer->lock);
 653
 654        return 0;
 655}
 656
 657static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
 658                                    struct dma_buf_attachment *attachment)
 659{
 660        struct fastrpc_dma_buf_attachment *a = attachment->priv;
 661        struct fastrpc_buf *buffer = dmabuf->priv;
 662
 663        mutex_lock(&buffer->lock);
 664        list_del(&a->node);
 665        mutex_unlock(&buffer->lock);
 666        sg_free_table(&a->sgt);
 667        kfree(a);
 668}
 669
 670static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
 671{
 672        struct fastrpc_buf *buf = dmabuf->priv;
 673
 674        iosys_map_set_vaddr(map, buf->virt);
 675
 676        return 0;
 677}
 678
 679static int fastrpc_mmap(struct dma_buf *dmabuf,
 680                        struct vm_area_struct *vma)
 681{
 682        struct fastrpc_buf *buf = dmabuf->priv;
 683        size_t size = vma->vm_end - vma->vm_start;
 684
 685        return dma_mmap_coherent(buf->dev, vma, buf->virt,
 686                                 FASTRPC_PHYS(buf->phys), size);
 687}
 688
 689static const struct dma_buf_ops fastrpc_dma_buf_ops = {
 690        .attach = fastrpc_dma_buf_attach,
 691        .detach = fastrpc_dma_buf_detatch,
 692        .map_dma_buf = fastrpc_map_dma_buf,
 693        .unmap_dma_buf = fastrpc_unmap_dma_buf,
 694        .mmap = fastrpc_mmap,
 695        .vmap = fastrpc_vmap,
 696        .release = fastrpc_release,
 697};
 698
 699static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
 700                              u64 len, u32 attr, struct fastrpc_map **ppmap)
 701{
 702        struct fastrpc_session_ctx *sess = fl->sctx;
 703        struct fastrpc_map *map = NULL;
 704        int err = 0;
 705
 706        if (!fastrpc_map_find(fl, fd, ppmap))
 707                return 0;
 708
 709        map = kzalloc(sizeof(*map), GFP_KERNEL);
 710        if (!map)
 711                return -ENOMEM;
 712
 713        INIT_LIST_HEAD(&map->node);
 714        map->fl = fl;
 715        map->fd = fd;
 716        map->buf = dma_buf_get(fd);
 717        if (IS_ERR(map->buf)) {
 718                err = PTR_ERR(map->buf);
 719                goto get_err;
 720        }
 721
 722        map->attach = dma_buf_attach(map->buf, sess->dev);
 723        if (IS_ERR(map->attach)) {
 724                dev_err(sess->dev, "Failed to attach dmabuf\n");
 725                err = PTR_ERR(map->attach);
 726                goto attach_err;
 727        }
 728
 729        map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
 730        if (IS_ERR(map->table)) {
 731                err = PTR_ERR(map->table);
 732                goto map_err;
 733        }
 734
 735        map->phys = sg_dma_address(map->table->sgl);
 736        map->phys += ((u64)fl->sctx->sid << 32);
 737        map->size = len;
 738        map->va = sg_virt(map->table->sgl);
 739        map->len = len;
 740        kref_init(&map->refcount);
 741
 742        if (attr & FASTRPC_ATTR_SECUREMAP) {
 743                /*
 744                 * If subsystem VMIDs are defined in DTSI, then do
 745                 * hyp_assign from HLOS to those VM(s)
 746                 */
 747                unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
 748
 749                map->attr = attr;
 750                err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
 751                                fl->cctx->vmperms, fl->cctx->vmcount);
 752                if (err) {
 753                        dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
 754                                        map->phys, map->size, err);
 755                        goto map_err;
 756                }
 757        }
 758        spin_lock(&fl->lock);
 759        list_add_tail(&map->node, &fl->maps);
 760        spin_unlock(&fl->lock);
 761        *ppmap = map;
 762
 763        return 0;
 764
 765map_err:
 766        dma_buf_detach(map->buf, map->attach);
 767attach_err:
 768        dma_buf_put(map->buf);
 769get_err:
 770        kfree(map);
 771
 772        return err;
 773}
 774
 775/*
 776 * Fastrpc payload buffer with metadata looks like:
 777 *
 778 * >>>>>>  START of METADATA <<<<<<<<<
 779 * +---------------------------------+
 780 * |           Arguments             |
 781 * | type:(union fastrpc_remote_arg)|
 782 * |             (0 - N)             |
 783 * +---------------------------------+
 784 * |         Invoke Buffer list      |
 785 * | type:(struct fastrpc_invoke_buf)|
 786 * |           (0 - N)               |
 787 * +---------------------------------+
 788 * |         Page info list          |
 789 * | type:(struct fastrpc_phy_page)  |
 790 * |             (0 - N)             |
 791 * +---------------------------------+
 792 * |         Optional info           |
 793 * |(can be specific to SoC/Firmware)|
 794 * +---------------------------------+
 795 * >>>>>>>>  END of METADATA <<<<<<<<<
 796 * +---------------------------------+
 797 * |         Inline ARGS             |
 798 * |            (0-N)                |
 799 * +---------------------------------+
 800 */
 801
 802static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 803{
 804        int size = 0;
 805
 806        size = (sizeof(struct fastrpc_remote_buf) +
 807                sizeof(struct fastrpc_invoke_buf) +
 808                sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
 809                sizeof(u64) * FASTRPC_MAX_FDLIST +
 810                sizeof(u32) * FASTRPC_MAX_CRCLIST;
 811
 812        return size;
 813}
 814
 815static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 816{
 817        u64 size = 0;
 818        int oix;
 819
 820        size = ALIGN(metalen, FASTRPC_ALIGN);
 821        for (oix = 0; oix < ctx->nbufs; oix++) {
 822                int i = ctx->olaps[oix].raix;
 823
 824                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 825
 826                        if (ctx->olaps[oix].offset == 0)
 827                                size = ALIGN(size, FASTRPC_ALIGN);
 828
 829                        size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
 830                }
 831        }
 832
 833        return size;
 834}
 835
 836static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
 837{
 838        struct device *dev = ctx->fl->sctx->dev;
 839        int i, err;
 840
 841        for (i = 0; i < ctx->nscalars; ++i) {
 842
 843                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
 844                    ctx->args[i].length == 0)
 845                        continue;
 846
 847                err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
 848                         ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
 849                if (err) {
 850                        dev_err(dev, "Error Creating map %d\n", err);
 851                        return -EINVAL;
 852                }
 853
 854        }
 855        return 0;
 856}
 857
 858static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
 859{
 860        return (struct fastrpc_invoke_buf *)(&pra[len]);
 861}
 862
 863static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
 864{
 865        return (struct fastrpc_phy_page *)(&buf[len]);
 866}
 867
 868static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
 869{
 870        struct device *dev = ctx->fl->sctx->dev;
 871        union fastrpc_remote_arg *rpra;
 872        struct fastrpc_invoke_buf *list;
 873        struct fastrpc_phy_page *pages;
 874        int inbufs, i, oix, err = 0;
 875        u64 len, rlen, pkt_size;
 876        u64 pg_start, pg_end;
 877        uintptr_t args;
 878        int metalen;
 879
 880        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
 881        metalen = fastrpc_get_meta_size(ctx);
 882        pkt_size = fastrpc_get_payload_size(ctx, metalen);
 883
 884        err = fastrpc_create_maps(ctx);
 885        if (err)
 886                return err;
 887
 888        ctx->msg_sz = pkt_size;
 889
 890        err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
 891        if (err)
 892                return err;
 893
 894        rpra = ctx->buf->virt;
 895        list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
 896        pages = fastrpc_phy_page_start(list, ctx->nscalars);
 897        args = (uintptr_t)ctx->buf->virt + metalen;
 898        rlen = pkt_size - metalen;
 899        ctx->rpra = rpra;
 900
 901        for (oix = 0; oix < ctx->nbufs; ++oix) {
 902                int mlen;
 903
 904                i = ctx->olaps[oix].raix;
 905                len = ctx->args[i].length;
 906
 907                rpra[i].buf.pv = 0;
 908                rpra[i].buf.len = len;
 909                list[i].num = len ? 1 : 0;
 910                list[i].pgidx = i;
 911
 912                if (!len)
 913                        continue;
 914
 915                if (ctx->maps[i]) {
 916                        struct vm_area_struct *vma = NULL;
 917
 918                        rpra[i].buf.pv = (u64) ctx->args[i].ptr;
 919                        pages[i].addr = ctx->maps[i]->phys;
 920
 921                        mmap_read_lock(current->mm);
 922                        vma = find_vma(current->mm, ctx->args[i].ptr);
 923                        if (vma)
 924                                pages[i].addr += ctx->args[i].ptr -
 925                                                 vma->vm_start;
 926                        mmap_read_unlock(current->mm);
 927
 928                        pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
 929                        pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
 930                                  PAGE_SHIFT;
 931                        pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
 932
 933                } else {
 934
 935                        if (ctx->olaps[oix].offset == 0) {
 936                                rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
 937                                args = ALIGN(args, FASTRPC_ALIGN);
 938                        }
 939
 940                        mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
 941
 942                        if (rlen < mlen)
 943                                goto bail;
 944
 945                        rpra[i].buf.pv = args - ctx->olaps[oix].offset;
 946                        pages[i].addr = ctx->buf->phys -
 947                                        ctx->olaps[oix].offset +
 948                                        (pkt_size - rlen);
 949                        pages[i].addr = pages[i].addr & PAGE_MASK;
 950
 951                        pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
 952                        pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
 953                        pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
 954                        args = args + mlen;
 955                        rlen -= mlen;
 956                }
 957
 958                if (i < inbufs && !ctx->maps[i]) {
 959                        void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
 960                        void *src = (void *)(uintptr_t)ctx->args[i].ptr;
 961
 962                        if (!kernel) {
 963                                if (copy_from_user(dst, (void __user *)src,
 964                                                   len)) {
 965                                        err = -EFAULT;
 966                                        goto bail;
 967                                }
 968                        } else {
 969                                memcpy(dst, src, len);
 970                        }
 971                }
 972        }
 973
 974        for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
 975                list[i].num = ctx->args[i].length ? 1 : 0;
 976                list[i].pgidx = i;
 977                if (ctx->maps[i]) {
 978                        pages[i].addr = ctx->maps[i]->phys;
 979                        pages[i].size = ctx->maps[i]->size;
 980                }
 981                rpra[i].dma.fd = ctx->args[i].fd;
 982                rpra[i].dma.len = ctx->args[i].length;
 983                rpra[i].dma.offset = (u64) ctx->args[i].ptr;
 984        }
 985
 986bail:
 987        if (err)
 988                dev_err(dev, "Error: get invoke args failed:%d\n", err);
 989
 990        return err;
 991}
 992
 993static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
 994                            u32 kernel)
 995{
 996        union fastrpc_remote_arg *rpra = ctx->rpra;
 997        struct fastrpc_user *fl = ctx->fl;
 998        struct fastrpc_map *mmap = NULL;
 999        struct fastrpc_invoke_buf *list;
1000        struct fastrpc_phy_page *pages;
1001        u64 *fdlist;
1002        int i, inbufs, outbufs, handles;
1003
1004        inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1005        outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1006        handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1007        list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1008        pages = fastrpc_phy_page_start(list, ctx->nscalars);
1009        fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1010
1011        for (i = inbufs; i < ctx->nbufs; ++i) {
1012                if (!ctx->maps[i]) {
1013                        void *src = (void *)(uintptr_t)rpra[i].buf.pv;
1014                        void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
1015                        u64 len = rpra[i].buf.len;
1016
1017                        if (!kernel) {
1018                                if (copy_to_user((void __user *)dst, src, len))
1019                                        return -EFAULT;
1020                        } else {
1021                                memcpy(dst, src, len);
1022                        }
1023                }
1024        }
1025
1026        for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1027                if (!fdlist[i])
1028                        break;
1029                if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
1030                        fastrpc_map_put(mmap);
1031        }
1032
1033        return 0;
1034}
1035
1036static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1037                               struct fastrpc_invoke_ctx *ctx,
1038                               u32 kernel, uint32_t handle)
1039{
1040        struct fastrpc_channel_ctx *cctx;
1041        struct fastrpc_user *fl = ctx->fl;
1042        struct fastrpc_msg *msg = &ctx->msg;
1043        int ret;
1044
1045        cctx = fl->cctx;
1046        msg->pid = fl->tgid;
1047        msg->tid = current->pid;
1048
1049        if (kernel)
1050                msg->pid = 0;
1051
1052        msg->ctx = ctx->ctxid | fl->pd;
1053        msg->handle = handle;
1054        msg->sc = ctx->sc;
1055        msg->addr = ctx->buf ? ctx->buf->phys : 0;
1056        msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1057        fastrpc_context_get(ctx);
1058
1059        ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1060
1061        if (ret)
1062                fastrpc_context_put(ctx);
1063
1064        return ret;
1065
1066}
1067
1068static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
1069                                   u32 handle, u32 sc,
1070                                   struct fastrpc_invoke_args *args)
1071{
1072        struct fastrpc_invoke_ctx *ctx = NULL;
1073        int err = 0;
1074
1075        if (!fl->sctx)
1076                return -EINVAL;
1077
1078        if (!fl->cctx->rpdev)
1079                return -EPIPE;
1080
1081        if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1082                dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n",  handle);
1083                return -EPERM;
1084        }
1085
1086        ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1087        if (IS_ERR(ctx))
1088                return PTR_ERR(ctx);
1089
1090        if (ctx->nscalars) {
1091                err = fastrpc_get_args(kernel, ctx);
1092                if (err)
1093                        goto bail;
1094        }
1095
1096        /* make sure that all CPU memory writes are seen by DSP */
1097        dma_wmb();
1098        /* Send invoke buffer to remote dsp */
1099        err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1100        if (err)
1101                goto bail;
1102
1103        if (kernel) {
1104                if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1105                        err = -ETIMEDOUT;
1106        } else {
1107                err = wait_for_completion_interruptible(&ctx->work);
1108        }
1109
1110        if (err)
1111                goto bail;
1112
1113        /* Check the response from remote dsp */
1114        err = ctx->retval;
1115        if (err)
1116                goto bail;
1117
1118        if (ctx->nscalars) {
1119                /* make sure that all memory writes by DSP are seen by CPU */
1120                dma_rmb();
1121                /* populate all the output buffers with results */
1122                err = fastrpc_put_args(ctx, kernel);
1123                if (err)
1124                        goto bail;
1125        }
1126
1127bail:
1128        if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1129                /* We are done with this compute context */
1130                spin_lock(&fl->lock);
1131                list_del(&ctx->node);
1132                spin_unlock(&fl->lock);
1133                fastrpc_context_put(ctx);
1134        }
1135        if (err)
1136                dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1137
1138        return err;
1139}
1140
1141static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1142{
1143        /* Check if the device node is non-secure and channel is secure*/
1144        if (!fl->is_secure_dev && fl->cctx->secure) {
1145                /*
1146                 * Allow untrusted applications to offload only to Unsigned PD when
1147                 * channel is configured as secure and block untrusted apps on channel
1148                 * that does not support unsigned PD offload
1149                 */
1150                if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1151                        dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1152                        return true;
1153                }
1154        }
1155
1156        return false;
1157}
1158
1159static int fastrpc_init_create_process(struct fastrpc_user *fl,
1160                                        char __user *argp)
1161{
1162        struct fastrpc_init_create init;
1163        struct fastrpc_invoke_args *args;
1164        struct fastrpc_phy_page pages[1];
1165        struct fastrpc_map *map = NULL;
1166        struct fastrpc_buf *imem = NULL;
1167        int memlen;
1168        int err;
1169        struct {
1170                int pgid;
1171                u32 namelen;
1172                u32 filelen;
1173                u32 pageslen;
1174                u32 attrs;
1175                u32 siglen;
1176        } inbuf;
1177        u32 sc;
1178        bool unsigned_module = false;
1179
1180        args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1181        if (!args)
1182                return -ENOMEM;
1183
1184        if (copy_from_user(&init, argp, sizeof(init))) {
1185                err = -EFAULT;
1186                goto err;
1187        }
1188
1189        if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1190                unsigned_module = true;
1191
1192        if (is_session_rejected(fl, unsigned_module)) {
1193                err = -ECONNREFUSED;
1194                goto err;
1195        }
1196
1197        if (init.filelen > INIT_FILELEN_MAX) {
1198                err = -EINVAL;
1199                goto err;
1200        }
1201
1202        inbuf.pgid = fl->tgid;
1203        inbuf.namelen = strlen(current->comm) + 1;
1204        inbuf.filelen = init.filelen;
1205        inbuf.pageslen = 1;
1206        inbuf.attrs = init.attrs;
1207        inbuf.siglen = init.siglen;
1208        fl->pd = USER_PD;
1209
1210        if (init.filelen && init.filefd) {
1211                err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1212                if (err)
1213                        goto err;
1214        }
1215
1216        memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1217                       1024 * 1024);
1218        err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1219                                &imem);
1220        if (err)
1221                goto err_alloc;
1222
1223        fl->init_mem = imem;
1224        args[0].ptr = (u64)(uintptr_t)&inbuf;
1225        args[0].length = sizeof(inbuf);
1226        args[0].fd = -1;
1227
1228        args[1].ptr = (u64)(uintptr_t)current->comm;
1229        args[1].length = inbuf.namelen;
1230        args[1].fd = -1;
1231
1232        args[2].ptr = (u64) init.file;
1233        args[2].length = inbuf.filelen;
1234        args[2].fd = init.filefd;
1235
1236        pages[0].addr = imem->phys;
1237        pages[0].size = imem->size;
1238
1239        args[3].ptr = (u64)(uintptr_t) pages;
1240        args[3].length = 1 * sizeof(*pages);
1241        args[3].fd = -1;
1242
1243        args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1244        args[4].length = sizeof(inbuf.attrs);
1245        args[4].fd = -1;
1246
1247        args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1248        args[5].length = sizeof(inbuf.siglen);
1249        args[5].fd = -1;
1250
1251        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1252        if (init.attrs)
1253                sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1254
1255        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1256                                      sc, args);
1257        if (err)
1258                goto err_invoke;
1259
1260        kfree(args);
1261
1262        return 0;
1263
1264err_invoke:
1265        fl->init_mem = NULL;
1266        fastrpc_buf_free(imem);
1267err_alloc:
1268        if (map) {
1269                spin_lock(&fl->lock);
1270                list_del(&map->node);
1271                spin_unlock(&fl->lock);
1272                fastrpc_map_put(map);
1273        }
1274err:
1275        kfree(args);
1276
1277        return err;
1278}
1279
1280static struct fastrpc_session_ctx *fastrpc_session_alloc(
1281                                        struct fastrpc_channel_ctx *cctx)
1282{
1283        struct fastrpc_session_ctx *session = NULL;
1284        unsigned long flags;
1285        int i;
1286
1287        spin_lock_irqsave(&cctx->lock, flags);
1288        for (i = 0; i < cctx->sesscount; i++) {
1289                if (!cctx->session[i].used && cctx->session[i].valid) {
1290                        cctx->session[i].used = true;
1291                        session = &cctx->session[i];
1292                        break;
1293                }
1294        }
1295        spin_unlock_irqrestore(&cctx->lock, flags);
1296
1297        return session;
1298}
1299
1300static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1301                                 struct fastrpc_session_ctx *session)
1302{
1303        unsigned long flags;
1304
1305        spin_lock_irqsave(&cctx->lock, flags);
1306        session->used = false;
1307        spin_unlock_irqrestore(&cctx->lock, flags);
1308}
1309
1310static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1311{
1312        struct fastrpc_invoke_args args[1];
1313        int tgid = 0;
1314        u32 sc;
1315
1316        tgid = fl->tgid;
1317        args[0].ptr = (u64)(uintptr_t) &tgid;
1318        args[0].length = sizeof(tgid);
1319        args[0].fd = -1;
1320        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1321
1322        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1323                                       sc, &args[0]);
1324}
1325
1326static int fastrpc_device_release(struct inode *inode, struct file *file)
1327{
1328        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1329        struct fastrpc_channel_ctx *cctx = fl->cctx;
1330        struct fastrpc_invoke_ctx *ctx, *n;
1331        struct fastrpc_map *map, *m;
1332        struct fastrpc_buf *buf, *b;
1333        unsigned long flags;
1334
1335        fastrpc_release_current_dsp_process(fl);
1336
1337        spin_lock_irqsave(&cctx->lock, flags);
1338        list_del(&fl->user);
1339        spin_unlock_irqrestore(&cctx->lock, flags);
1340
1341        if (fl->init_mem)
1342                fastrpc_buf_free(fl->init_mem);
1343
1344        list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1345                list_del(&ctx->node);
1346                fastrpc_context_put(ctx);
1347        }
1348
1349        list_for_each_entry_safe(map, m, &fl->maps, node) {
1350                list_del(&map->node);
1351                fastrpc_map_put(map);
1352        }
1353
1354        list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1355                list_del(&buf->node);
1356                fastrpc_buf_free(buf);
1357        }
1358
1359        fastrpc_session_free(cctx, fl->sctx);
1360        fastrpc_channel_ctx_put(cctx);
1361
1362        mutex_destroy(&fl->mutex);
1363        kfree(fl);
1364        file->private_data = NULL;
1365
1366        return 0;
1367}
1368
1369static int fastrpc_device_open(struct inode *inode, struct file *filp)
1370{
1371        struct fastrpc_channel_ctx *cctx;
1372        struct fastrpc_device *fdevice;
1373        struct fastrpc_user *fl = NULL;
1374        unsigned long flags;
1375
1376        fdevice = miscdev_to_fdevice(filp->private_data);
1377        cctx = fdevice->cctx;
1378
1379        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1380        if (!fl)
1381                return -ENOMEM;
1382
1383        /* Released in fastrpc_device_release() */
1384        fastrpc_channel_ctx_get(cctx);
1385
1386        filp->private_data = fl;
1387        spin_lock_init(&fl->lock);
1388        mutex_init(&fl->mutex);
1389        INIT_LIST_HEAD(&fl->pending);
1390        INIT_LIST_HEAD(&fl->maps);
1391        INIT_LIST_HEAD(&fl->mmaps);
1392        INIT_LIST_HEAD(&fl->user);
1393        fl->tgid = current->tgid;
1394        fl->cctx = cctx;
1395        fl->is_secure_dev = fdevice->secure;
1396
1397        fl->sctx = fastrpc_session_alloc(cctx);
1398        if (!fl->sctx) {
1399                dev_err(&cctx->rpdev->dev, "No session available\n");
1400                mutex_destroy(&fl->mutex);
1401                kfree(fl);
1402
1403                return -EBUSY;
1404        }
1405
1406        spin_lock_irqsave(&cctx->lock, flags);
1407        list_add_tail(&fl->user, &cctx->users);
1408        spin_unlock_irqrestore(&cctx->lock, flags);
1409
1410        return 0;
1411}
1412
1413static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1414{
1415        struct fastrpc_alloc_dma_buf bp;
1416        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1417        struct fastrpc_buf *buf = NULL;
1418        int err;
1419
1420        if (copy_from_user(&bp, argp, sizeof(bp)))
1421                return -EFAULT;
1422
1423        err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1424        if (err)
1425                return err;
1426        exp_info.ops = &fastrpc_dma_buf_ops;
1427        exp_info.size = bp.size;
1428        exp_info.flags = O_RDWR;
1429        exp_info.priv = buf;
1430        buf->dmabuf = dma_buf_export(&exp_info);
1431        if (IS_ERR(buf->dmabuf)) {
1432                err = PTR_ERR(buf->dmabuf);
1433                fastrpc_buf_free(buf);
1434                return err;
1435        }
1436
1437        bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1438        if (bp.fd < 0) {
1439                dma_buf_put(buf->dmabuf);
1440                return -EINVAL;
1441        }
1442
1443        if (copy_to_user(argp, &bp, sizeof(bp))) {
1444                /*
1445                 * The usercopy failed, but we can't do much about it, as
1446                 * dma_buf_fd() already called fd_install() and made the
1447                 * file descriptor accessible for the current process. It
1448                 * might already be closed and dmabuf no longer valid when
1449                 * we reach this point. Therefore "leak" the fd and rely on
1450                 * the process exit path to do any required cleanup.
1451                 */
1452                return -EFAULT;
1453        }
1454
1455        return 0;
1456}
1457
1458static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1459{
1460        struct fastrpc_invoke_args args[1];
1461        int tgid = fl->tgid;
1462        u32 sc;
1463
1464        args[0].ptr = (u64)(uintptr_t) &tgid;
1465        args[0].length = sizeof(tgid);
1466        args[0].fd = -1;
1467        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1468        fl->pd = pd;
1469
1470        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1471                                       sc, &args[0]);
1472}
1473
1474static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1475{
1476        struct fastrpc_invoke_args *args = NULL;
1477        struct fastrpc_invoke inv;
1478        u32 nscalars;
1479        int err;
1480
1481        if (copy_from_user(&inv, argp, sizeof(inv)))
1482                return -EFAULT;
1483
1484        /* nscalars is truncated here to max supported value */
1485        nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1486        if (nscalars) {
1487                args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1488                if (!args)
1489                        return -ENOMEM;
1490
1491                if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1492                                   nscalars * sizeof(*args))) {
1493                        kfree(args);
1494                        return -EFAULT;
1495                }
1496        }
1497
1498        err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1499        kfree(args);
1500
1501        return err;
1502}
1503
1504static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1505                                     uint32_t dsp_attr_buf_len)
1506{
1507        struct fastrpc_invoke_args args[2] = { 0 };
1508
1509        /* Capability filled in userspace */
1510        dsp_attr_buf[0] = 0;
1511
1512        args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1513        args[0].length = sizeof(dsp_attr_buf_len);
1514        args[0].fd = -1;
1515        args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1516        args[1].length = dsp_attr_buf_len;
1517        args[1].fd = -1;
1518        fl->pd = 1;
1519
1520        return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1521                                       FASTRPC_SCALARS(0, 1, 1), args);
1522}
1523
1524static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1525                                        struct fastrpc_user *fl)
1526{
1527        struct fastrpc_channel_ctx *cctx = fl->cctx;
1528        uint32_t attribute_id = cap->attribute_id;
1529        uint32_t *dsp_attributes;
1530        unsigned long flags;
1531        uint32_t domain = cap->domain;
1532        int err;
1533
1534        spin_lock_irqsave(&cctx->lock, flags);
1535        /* check if we already have queried dsp for attributes */
1536        if (cctx->valid_attributes) {
1537                spin_unlock_irqrestore(&cctx->lock, flags);
1538                goto done;
1539        }
1540        spin_unlock_irqrestore(&cctx->lock, flags);
1541
1542        dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1543        if (!dsp_attributes)
1544                return -ENOMEM;
1545
1546        err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1547        if (err == DSP_UNSUPPORTED_API) {
1548                dev_info(&cctx->rpdev->dev,
1549                         "Warning: DSP capabilities not supported on domain: %d\n", domain);
1550                kfree(dsp_attributes);
1551                return -EOPNOTSUPP;
1552        } else if (err) {
1553                dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1554                kfree(dsp_attributes);
1555                return err;
1556        }
1557
1558        spin_lock_irqsave(&cctx->lock, flags);
1559        memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1560        cctx->valid_attributes = true;
1561        spin_unlock_irqrestore(&cctx->lock, flags);
1562        kfree(dsp_attributes);
1563done:
1564        cap->capability = cctx->dsp_attributes[attribute_id];
1565        return 0;
1566}
1567
1568static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1569{
1570        struct fastrpc_ioctl_capability cap = {0};
1571        int err = 0;
1572
1573        if (copy_from_user(&cap, argp, sizeof(cap)))
1574                return  -EFAULT;
1575
1576        cap.capability = 0;
1577        if (cap.domain >= FASTRPC_DEV_MAX) {
1578                dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1579                        cap.domain, err);
1580                return -ECHRNG;
1581        }
1582
1583        /* Fastrpc Capablities does not support modem domain */
1584        if (cap.domain == MDSP_DOMAIN_ID) {
1585                dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1586                return -ECHRNG;
1587        }
1588
1589        if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1590                dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1591                        cap.attribute_id, err);
1592                return -EOVERFLOW;
1593        }
1594
1595        err = fastrpc_get_info_from_kernel(&cap, fl);
1596        if (err)
1597                return err;
1598
1599        if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
1600                return -EFAULT;
1601
1602        return 0;
1603}
1604
1605static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
1606                                   struct fastrpc_req_munmap *req)
1607{
1608        struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1609        struct fastrpc_buf *buf = NULL, *iter, *b;
1610        struct fastrpc_munmap_req_msg req_msg;
1611        struct device *dev = fl->sctx->dev;
1612        int err;
1613        u32 sc;
1614
1615        spin_lock(&fl->lock);
1616        list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1617                if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
1618                        buf = iter;
1619                        break;
1620                }
1621        }
1622        spin_unlock(&fl->lock);
1623
1624        if (!buf) {
1625                dev_err(dev, "mmap not in list\n");
1626                return -EINVAL;
1627        }
1628
1629        req_msg.pgid = fl->tgid;
1630        req_msg.size = buf->size;
1631        req_msg.vaddr = buf->raddr;
1632
1633        args[0].ptr = (u64) (uintptr_t) &req_msg;
1634        args[0].length = sizeof(req_msg);
1635
1636        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1637        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1638                                      &args[0]);
1639        if (!err) {
1640                dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1641                spin_lock(&fl->lock);
1642                list_del(&buf->node);
1643                spin_unlock(&fl->lock);
1644                fastrpc_buf_free(buf);
1645        } else {
1646                dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1647        }
1648
1649        return err;
1650}
1651
1652static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1653{
1654        struct fastrpc_req_munmap req;
1655
1656        if (copy_from_user(&req, argp, sizeof(req)))
1657                return -EFAULT;
1658
1659        return fastrpc_req_munmap_impl(fl, &req);
1660}
1661
1662static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1663{
1664        struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1665        struct fastrpc_buf *buf = NULL;
1666        struct fastrpc_mmap_req_msg req_msg;
1667        struct fastrpc_mmap_rsp_msg rsp_msg;
1668        struct fastrpc_req_munmap req_unmap;
1669        struct fastrpc_phy_page pages;
1670        struct fastrpc_req_mmap req;
1671        struct device *dev = fl->sctx->dev;
1672        int err;
1673        u32 sc;
1674
1675        if (copy_from_user(&req, argp, sizeof(req)))
1676                return -EFAULT;
1677
1678        if (req.flags != ADSP_MMAP_ADD_PAGES) {
1679                dev_err(dev, "flag not supported 0x%x\n", req.flags);
1680                return -EINVAL;
1681        }
1682
1683        if (req.vaddrin) {
1684                dev_err(dev, "adding user allocated pages is not supported\n");
1685                return -EINVAL;
1686        }
1687
1688        err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1689        if (err) {
1690                dev_err(dev, "failed to allocate buffer\n");
1691                return err;
1692        }
1693
1694        req_msg.pgid = fl->tgid;
1695        req_msg.flags = req.flags;
1696        req_msg.vaddr = req.vaddrin;
1697        req_msg.num = sizeof(pages);
1698
1699        args[0].ptr = (u64) (uintptr_t) &req_msg;
1700        args[0].length = sizeof(req_msg);
1701
1702        pages.addr = buf->phys;
1703        pages.size = buf->size;
1704
1705        args[1].ptr = (u64) (uintptr_t) &pages;
1706        args[1].length = sizeof(pages);
1707
1708        args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1709        args[2].length = sizeof(rsp_msg);
1710
1711        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1712        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1713                                      &args[0]);
1714        if (err) {
1715                dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1716                goto err_invoke;
1717        }
1718
1719        /* update the buffer to be able to deallocate the memory on the DSP */
1720        buf->raddr = (uintptr_t) rsp_msg.vaddr;
1721
1722        /* let the client know the address to use */
1723        req.vaddrout = rsp_msg.vaddr;
1724
1725        spin_lock(&fl->lock);
1726        list_add_tail(&buf->node, &fl->mmaps);
1727        spin_unlock(&fl->lock);
1728
1729        if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1730                /* unmap the memory and release the buffer */
1731                req_unmap.vaddrout = buf->raddr;
1732                req_unmap.size = buf->size;
1733                fastrpc_req_munmap_impl(fl, &req_unmap);
1734                return -EFAULT;
1735        }
1736
1737        dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1738                buf->raddr, buf->size);
1739
1740        return 0;
1741
1742err_invoke:
1743        fastrpc_buf_free(buf);
1744
1745        return err;
1746}
1747
1748static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1749{
1750        struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1751        struct fastrpc_map *map = NULL, *iter, *m;
1752        struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
1753        int err = 0;
1754        u32 sc;
1755        struct device *dev = fl->sctx->dev;
1756
1757        spin_lock(&fl->lock);
1758        list_for_each_entry_safe(iter, m, &fl->maps, node) {
1759                if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
1760                        map = iter;
1761                        break;
1762                }
1763        }
1764
1765        spin_unlock(&fl->lock);
1766
1767        if (!map) {
1768                dev_err(dev, "map not in list\n");
1769                return -EINVAL;
1770        }
1771
1772        req_msg.pgid = fl->tgid;
1773        req_msg.len = map->len;
1774        req_msg.vaddrin = map->raddr;
1775        req_msg.fd = map->fd;
1776
1777        args[0].ptr = (u64) (uintptr_t) &req_msg;
1778        args[0].length = sizeof(req_msg);
1779
1780        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
1781        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1782                                      &args[0]);
1783        fastrpc_map_put(map);
1784        if (err)
1785                dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n",  map->fd, map->raddr);
1786
1787        return err;
1788}
1789
1790static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
1791{
1792        struct fastrpc_mem_unmap req;
1793
1794        if (copy_from_user(&req, argp, sizeof(req)))
1795                return -EFAULT;
1796
1797        return fastrpc_req_mem_unmap_impl(fl, &req);
1798}
1799
1800static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
1801{
1802        struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
1803        struct fastrpc_mem_map_req_msg req_msg = { 0 };
1804        struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
1805        struct fastrpc_mem_unmap req_unmap = { 0 };
1806        struct fastrpc_phy_page pages = { 0 };
1807        struct fastrpc_mem_map req;
1808        struct device *dev = fl->sctx->dev;
1809        struct fastrpc_map *map = NULL;
1810        int err;
1811        u32 sc;
1812
1813        if (copy_from_user(&req, argp, sizeof(req)))
1814                return -EFAULT;
1815
1816        /* create SMMU mapping */
1817        err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
1818        if (err) {
1819                dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
1820                return err;
1821        }
1822
1823        req_msg.pgid = fl->tgid;
1824        req_msg.fd = req.fd;
1825        req_msg.offset = req.offset;
1826        req_msg.vaddrin = req.vaddrin;
1827        map->va = (void *) (uintptr_t) req.vaddrin;
1828        req_msg.flags = req.flags;
1829        req_msg.num = sizeof(pages);
1830        req_msg.data_len = 0;
1831
1832        args[0].ptr = (u64) (uintptr_t) &req_msg;
1833        args[0].length = sizeof(req_msg);
1834
1835        pages.addr = map->phys;
1836        pages.size = map->size;
1837
1838        args[1].ptr = (u64) (uintptr_t) &pages;
1839        args[1].length = sizeof(pages);
1840
1841        args[2].ptr = (u64) (uintptr_t) &pages;
1842        args[2].length = 0;
1843
1844        args[3].ptr = (u64) (uintptr_t) &rsp_msg;
1845        args[3].length = sizeof(rsp_msg);
1846
1847        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
1848        err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
1849        if (err) {
1850                dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
1851                        req.fd, req.vaddrin, map->size);
1852                goto err_invoke;
1853        }
1854
1855        /* update the buffer to be able to deallocate the memory on the DSP */
1856        map->raddr = rsp_msg.vaddr;
1857
1858        /* let the client know the address to use */
1859        req.vaddrout = rsp_msg.vaddr;
1860
1861        if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1862                /* unmap the memory and release the buffer */
1863                req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
1864                req_unmap.length = map->size;
1865                fastrpc_req_mem_unmap_impl(fl, &req_unmap);
1866                return -EFAULT;
1867        }
1868
1869        return 0;
1870
1871err_invoke:
1872        fastrpc_map_put(map);
1873
1874        return err;
1875}
1876
1877static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1878                                 unsigned long arg)
1879{
1880        struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1881        char __user *argp = (char __user *)arg;
1882        int err;
1883
1884        switch (cmd) {
1885        case FASTRPC_IOCTL_INVOKE:
1886                err = fastrpc_invoke(fl, argp);
1887                break;
1888        case FASTRPC_IOCTL_INIT_ATTACH:
1889                err = fastrpc_init_attach(fl, AUDIO_PD);
1890                break;
1891        case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1892                err = fastrpc_init_attach(fl, SENSORS_PD);
1893                break;
1894        case FASTRPC_IOCTL_INIT_CREATE:
1895                err = fastrpc_init_create_process(fl, argp);
1896                break;
1897        case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1898                err = fastrpc_dmabuf_alloc(fl, argp);
1899                break;
1900        case FASTRPC_IOCTL_MMAP:
1901                err = fastrpc_req_mmap(fl, argp);
1902                break;
1903        case FASTRPC_IOCTL_MUNMAP:
1904                err = fastrpc_req_munmap(fl, argp);
1905                break;
1906        case FASTRPC_IOCTL_MEM_MAP:
1907                err = fastrpc_req_mem_map(fl, argp);
1908                break;
1909        case FASTRPC_IOCTL_MEM_UNMAP:
1910                err = fastrpc_req_mem_unmap(fl, argp);
1911                break;
1912        case FASTRPC_IOCTL_GET_DSP_INFO:
1913                err = fastrpc_get_dsp_info(fl, argp);
1914                break;
1915        default:
1916                err = -ENOTTY;
1917                break;
1918        }
1919
1920        return err;
1921}
1922
1923static const struct file_operations fastrpc_fops = {
1924        .open = fastrpc_device_open,
1925        .release = fastrpc_device_release,
1926        .unlocked_ioctl = fastrpc_device_ioctl,
1927        .compat_ioctl = fastrpc_device_ioctl,
1928};
1929
1930static int fastrpc_cb_probe(struct platform_device *pdev)
1931{
1932        struct fastrpc_channel_ctx *cctx;
1933        struct fastrpc_session_ctx *sess;
1934        struct device *dev = &pdev->dev;
1935        int i, sessions = 0;
1936        unsigned long flags;
1937        int rc;
1938
1939        cctx = dev_get_drvdata(dev->parent);
1940        if (!cctx)
1941                return -EINVAL;
1942
1943        of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1944
1945        spin_lock_irqsave(&cctx->lock, flags);
1946        sess = &cctx->session[cctx->sesscount];
1947        sess->used = false;
1948        sess->valid = true;
1949        sess->dev = dev;
1950        dev_set_drvdata(dev, sess);
1951
1952        if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1953                dev_info(dev, "FastRPC Session ID not specified in DT\n");
1954
1955        if (sessions > 0) {
1956                struct fastrpc_session_ctx *dup_sess;
1957
1958                for (i = 1; i < sessions; i++) {
1959                        if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1960                                break;
1961                        dup_sess = &cctx->session[cctx->sesscount];
1962                        memcpy(dup_sess, sess, sizeof(*dup_sess));
1963                }
1964        }
1965        cctx->sesscount++;
1966        spin_unlock_irqrestore(&cctx->lock, flags);
1967        rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1968        if (rc) {
1969                dev_err(dev, "32-bit DMA enable failed\n");
1970                return rc;
1971        }
1972
1973        return 0;
1974}
1975
1976static int fastrpc_cb_remove(struct platform_device *pdev)
1977{
1978        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1979        struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1980        unsigned long flags;
1981        int i;
1982
1983        spin_lock_irqsave(&cctx->lock, flags);
1984        for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1985                if (cctx->session[i].sid == sess->sid) {
1986                        cctx->session[i].valid = false;
1987                        cctx->sesscount--;
1988                }
1989        }
1990        spin_unlock_irqrestore(&cctx->lock, flags);
1991
1992        return 0;
1993}
1994
1995static const struct of_device_id fastrpc_match_table[] = {
1996        { .compatible = "qcom,fastrpc-compute-cb", },
1997        {}
1998};
1999
2000static struct platform_driver fastrpc_cb_driver = {
2001        .probe = fastrpc_cb_probe,
2002        .remove = fastrpc_cb_remove,
2003        .driver = {
2004                .name = "qcom,fastrpc-cb",
2005                .of_match_table = fastrpc_match_table,
2006                .suppress_bind_attrs = true,
2007        },
2008};
2009
2010static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
2011                                   bool is_secured, const char *domain)
2012{
2013        struct fastrpc_device *fdev;
2014        int err;
2015
2016        fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2017        if (!fdev)
2018                return -ENOMEM;
2019
2020        fdev->secure = is_secured;
2021        fdev->cctx = cctx;
2022        fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2023        fdev->miscdev.fops = &fastrpc_fops;
2024        fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2025                                            domain, is_secured ? "-secure" : "");
2026        err = misc_register(&fdev->miscdev);
2027        if (!err) {
2028                if (is_secured)
2029                        cctx->secure_fdevice = fdev;
2030                else
2031                        cctx->fdevice = fdev;
2032        }
2033
2034        return err;
2035}
2036
2037static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2038{
2039        struct device *rdev = &rpdev->dev;
2040        struct fastrpc_channel_ctx *data;
2041        int i, err, domain_id = -1, vmcount;
2042        const char *domain;
2043        bool secure_dsp;
2044        unsigned int vmids[FASTRPC_MAX_VMIDS];
2045
2046        err = of_property_read_string(rdev->of_node, "label", &domain);
2047        if (err) {
2048                dev_info(rdev, "FastRPC Domain not specified in DT\n");
2049                return err;
2050        }
2051
2052        for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
2053                if (!strcmp(domains[i], domain)) {
2054                        domain_id = i;
2055                        break;
2056                }
2057        }
2058
2059        if (domain_id < 0) {
2060                dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
2061                return -EINVAL;
2062        }
2063
2064        vmcount = of_property_read_variable_u32_array(rdev->of_node,
2065                                "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2066        if (vmcount < 0)
2067                vmcount = 0;
2068        else if (!qcom_scm_is_available())
2069                return -EPROBE_DEFER;
2070
2071        data = kzalloc(sizeof(*data), GFP_KERNEL);
2072        if (!data)
2073                return -ENOMEM;
2074
2075        if (vmcount) {
2076                data->vmcount = vmcount;
2077                data->perms = BIT(QCOM_SCM_VMID_HLOS);
2078                for (i = 0; i < data->vmcount; i++) {
2079                        data->vmperms[i].vmid = vmids[i];
2080                        data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2081                }
2082        }
2083
2084        secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2085        data->secure = secure_dsp;
2086
2087        switch (domain_id) {
2088        case ADSP_DOMAIN_ID:
2089        case MDSP_DOMAIN_ID:
2090        case SDSP_DOMAIN_ID:
2091                /* Unsigned PD offloading is only supported on CDSP*/
2092                data->unsigned_support = false;
2093                err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
2094                if (err)
2095                        goto fdev_error;
2096                break;
2097        case CDSP_DOMAIN_ID:
2098                data->unsigned_support = true;
2099                /* Create both device nodes so that we can allow both Signed and Unsigned PD */
2100                err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
2101                if (err)
2102                        goto fdev_error;
2103
2104                err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
2105                if (err)
2106                        goto fdev_error;
2107                break;
2108        default:
2109                err = -EINVAL;
2110                goto fdev_error;
2111        }
2112
2113        kref_init(&data->refcount);
2114
2115        dev_set_drvdata(&rpdev->dev, data);
2116        dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2117        INIT_LIST_HEAD(&data->users);
2118        spin_lock_init(&data->lock);
2119        idr_init(&data->ctx_idr);
2120        data->domain_id = domain_id;
2121        data->rpdev = rpdev;
2122
2123        return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
2124fdev_error:
2125        kfree(data);
2126        return err;
2127}
2128
2129static void fastrpc_notify_users(struct fastrpc_user *user)
2130{
2131        struct fastrpc_invoke_ctx *ctx;
2132
2133        spin_lock(&user->lock);
2134        list_for_each_entry(ctx, &user->pending, node)
2135                complete(&ctx->work);
2136        spin_unlock(&user->lock);
2137}
2138
2139static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2140{
2141        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2142        struct fastrpc_user *user;
2143        unsigned long flags;
2144
2145        spin_lock_irqsave(&cctx->lock, flags);
2146        list_for_each_entry(user, &cctx->users, user)
2147                fastrpc_notify_users(user);
2148        spin_unlock_irqrestore(&cctx->lock, flags);
2149
2150        if (cctx->fdevice)
2151                misc_deregister(&cctx->fdevice->miscdev);
2152
2153        if (cctx->secure_fdevice)
2154                misc_deregister(&cctx->secure_fdevice->miscdev);
2155
2156        of_platform_depopulate(&rpdev->dev);
2157
2158        cctx->rpdev = NULL;
2159        fastrpc_channel_ctx_put(cctx);
2160}
2161
2162static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2163                                  int len, void *priv, u32 addr)
2164{
2165        struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2166        struct fastrpc_invoke_rsp *rsp = data;
2167        struct fastrpc_invoke_ctx *ctx;
2168        unsigned long flags;
2169        unsigned long ctxid;
2170
2171        if (len < sizeof(*rsp))
2172                return -EINVAL;
2173
2174        ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2175
2176        spin_lock_irqsave(&cctx->lock, flags);
2177        ctx = idr_find(&cctx->ctx_idr, ctxid);
2178        spin_unlock_irqrestore(&cctx->lock, flags);
2179
2180        if (!ctx) {
2181                dev_err(&rpdev->dev, "No context ID matches response\n");
2182                return -ENOENT;
2183        }
2184
2185        ctx->retval = rsp->retval;
2186        complete(&ctx->work);
2187
2188        /*
2189         * The DMA buffer associated with the context cannot be freed in
2190         * interrupt context so schedule it through a worker thread to
2191         * avoid a kernel BUG.
2192         */
2193        schedule_work(&ctx->put_work);
2194
2195        return 0;
2196}
2197
2198static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2199        { .compatible = "qcom,fastrpc" },
2200        { },
2201};
2202MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2203
2204static struct rpmsg_driver fastrpc_driver = {
2205        .probe = fastrpc_rpmsg_probe,
2206        .remove = fastrpc_rpmsg_remove,
2207        .callback = fastrpc_rpmsg_callback,
2208        .drv = {
2209                .name = "qcom,fastrpc",
2210                .of_match_table = fastrpc_rpmsg_of_match,
2211        },
2212};
2213
2214static int fastrpc_init(void)
2215{
2216        int ret;
2217
2218        ret = platform_driver_register(&fastrpc_cb_driver);
2219        if (ret < 0) {
2220                pr_err("fastrpc: failed to register cb driver\n");
2221                return ret;
2222        }
2223
2224        ret = register_rpmsg_driver(&fastrpc_driver);
2225        if (ret < 0) {
2226                pr_err("fastrpc: failed to register rpmsg driver\n");
2227                platform_driver_unregister(&fastrpc_cb_driver);
2228                return ret;
2229        }
2230
2231        return 0;
2232}
2233module_init(fastrpc_init);
2234
2235static void fastrpc_exit(void)
2236{
2237        platform_driver_unregister(&fastrpc_cb_driver);
2238        unregister_rpmsg_driver(&fastrpc_driver);
2239}
2240module_exit(fastrpc_exit);
2241
2242MODULE_LICENSE("GPL v2");
2243MODULE_IMPORT_NS(DMA_BUF);
2244