1
2
3
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/sort.h>
16#include <linux/of_platform.h>
17#include <linux/rpmsg.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <uapi/misc/fastrpc.h>
21
22#define ADSP_DOMAIN_ID (0)
23#define MDSP_DOMAIN_ID (1)
24#define SDSP_DOMAIN_ID (2)
25#define CDSP_DOMAIN_ID (3)
26#define FASTRPC_DEV_MAX 4
27#define FASTRPC_MAX_SESSIONS 9
28#define FASTRPC_ALIGN 128
29#define FASTRPC_MAX_FDLIST 16
30#define FASTRPC_MAX_CRCLIST 64
31#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32#define FASTRPC_CTX_MAX (256)
33#define FASTRPC_INIT_HANDLE 1
34#define FASTRPC_CTXID_MASK (0xFF0)
35#define INIT_FILELEN_MAX (2 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc"
37#define ADSP_MMAP_ADD_PAGES 0x1000
38
39
40#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
41
42
43#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
44
45
46#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
47
48
49#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
50
51#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
52 REMOTE_SCALARS_OUTBUFS(sc) + \
53 REMOTE_SCALARS_INHANDLES(sc)+ \
54 REMOTE_SCALARS_OUTHANDLES(sc))
55#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
56 (((attr & 0x07) << 29) | \
57 ((method & 0x1f) << 24) | \
58 ((in & 0xff) << 16) | \
59 ((out & 0xff) << 8) | \
60 ((oin & 0x0f) << 4) | \
61 (oout & 0x0f))
62
63#define FASTRPC_SCALARS(method, in, out) \
64 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
65
66#define FASTRPC_CREATE_PROCESS_NARGS 6
67
68#define FASTRPC_RMID_INIT_ATTACH 0
69#define FASTRPC_RMID_INIT_RELEASE 1
70#define FASTRPC_RMID_INIT_MMAP 4
71#define FASTRPC_RMID_INIT_MUNMAP 5
72#define FASTRPC_RMID_INIT_CREATE 6
73#define FASTRPC_RMID_INIT_CREATE_ATTR 7
74#define FASTRPC_RMID_INIT_CREATE_STATIC 8
75
76
77#define AUDIO_PD (0)
78#define USER_PD (1)
79#define SENSORS_PD (2)
80
81#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
82
83static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
84 "sdsp", "cdsp"};
85struct fastrpc_phy_page {
86 u64 addr;
87 u64 size;
88};
89
90struct fastrpc_invoke_buf {
91 u32 num;
92 u32 pgidx;
93};
94
95struct fastrpc_remote_arg {
96 u64 pv;
97 u64 len;
98};
99
100struct fastrpc_mmap_rsp_msg {
101 u64 vaddr;
102};
103
104struct fastrpc_mmap_req_msg {
105 s32 pgid;
106 u32 flags;
107 u64 vaddr;
108 s32 num;
109};
110
111struct fastrpc_munmap_req_msg {
112 s32 pgid;
113 u64 vaddr;
114 u64 size;
115};
116
117struct fastrpc_msg {
118 int pid;
119 int tid;
120 u64 ctx;
121 u32 handle;
122 u32 sc;
123 u64 addr;
124 u64 size;
125};
126
127struct fastrpc_invoke_rsp {
128 u64 ctx;
129 int retval;
130};
131
132struct fastrpc_buf_overlap {
133 u64 start;
134 u64 end;
135 int raix;
136 u64 mstart;
137 u64 mend;
138 u64 offset;
139};
140
141struct fastrpc_buf {
142 struct fastrpc_user *fl;
143 struct dma_buf *dmabuf;
144 struct device *dev;
145 void *virt;
146 u64 phys;
147 u64 size;
148
149 struct mutex lock;
150 struct list_head attachments;
151
152 struct list_head node;
153 uintptr_t raddr;
154};
155
156struct fastrpc_dma_buf_attachment {
157 struct device *dev;
158 struct sg_table sgt;
159 struct list_head node;
160};
161
162struct fastrpc_map {
163 struct list_head node;
164 struct fastrpc_user *fl;
165 int fd;
166 struct dma_buf *buf;
167 struct sg_table *table;
168 struct dma_buf_attachment *attach;
169 u64 phys;
170 u64 size;
171 void *va;
172 u64 len;
173 struct kref refcount;
174};
175
176struct fastrpc_invoke_ctx {
177 int nscalars;
178 int nbufs;
179 int retval;
180 int pid;
181 int tgid;
182 u32 sc;
183 u32 *crc;
184 u64 ctxid;
185 u64 msg_sz;
186 struct kref refcount;
187 struct list_head node;
188 struct completion work;
189 struct work_struct put_work;
190 struct fastrpc_msg msg;
191 struct fastrpc_user *fl;
192 struct fastrpc_remote_arg *rpra;
193 struct fastrpc_map **maps;
194 struct fastrpc_buf *buf;
195 struct fastrpc_invoke_args *args;
196 struct fastrpc_buf_overlap *olaps;
197 struct fastrpc_channel_ctx *cctx;
198};
199
200struct fastrpc_session_ctx {
201 struct device *dev;
202 int sid;
203 bool used;
204 bool valid;
205};
206
207struct fastrpc_channel_ctx {
208 int domain_id;
209 int sesscount;
210 struct rpmsg_device *rpdev;
211 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
212 spinlock_t lock;
213 struct idr ctx_idr;
214 struct list_head users;
215 struct miscdevice miscdev;
216 struct kref refcount;
217};
218
219struct fastrpc_user {
220 struct list_head user;
221 struct list_head maps;
222 struct list_head pending;
223 struct list_head mmaps;
224
225 struct fastrpc_channel_ctx *cctx;
226 struct fastrpc_session_ctx *sctx;
227 struct fastrpc_buf *init_mem;
228
229 int tgid;
230 int pd;
231
232 spinlock_t lock;
233
234 struct mutex mutex;
235};
236
237static void fastrpc_free_map(struct kref *ref)
238{
239 struct fastrpc_map *map;
240
241 map = container_of(ref, struct fastrpc_map, refcount);
242
243 if (map->table) {
244 dma_buf_unmap_attachment(map->attach, map->table,
245 DMA_BIDIRECTIONAL);
246 dma_buf_detach(map->buf, map->attach);
247 dma_buf_put(map->buf);
248 }
249
250 kfree(map);
251}
252
253static void fastrpc_map_put(struct fastrpc_map *map)
254{
255 if (map)
256 kref_put(&map->refcount, fastrpc_free_map);
257}
258
259static void fastrpc_map_get(struct fastrpc_map *map)
260{
261 if (map)
262 kref_get(&map->refcount);
263}
264
265static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
266 struct fastrpc_map **ppmap)
267{
268 struct fastrpc_map *map = NULL;
269
270 mutex_lock(&fl->mutex);
271 list_for_each_entry(map, &fl->maps, node) {
272 if (map->fd == fd) {
273 fastrpc_map_get(map);
274 *ppmap = map;
275 mutex_unlock(&fl->mutex);
276 return 0;
277 }
278 }
279 mutex_unlock(&fl->mutex);
280
281 return -ENOENT;
282}
283
284static void fastrpc_buf_free(struct fastrpc_buf *buf)
285{
286 dma_free_coherent(buf->dev, buf->size, buf->virt,
287 FASTRPC_PHYS(buf->phys));
288 kfree(buf);
289}
290
291static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
292 u64 size, struct fastrpc_buf **obuf)
293{
294 struct fastrpc_buf *buf;
295
296 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
297 if (!buf)
298 return -ENOMEM;
299
300 INIT_LIST_HEAD(&buf->attachments);
301 INIT_LIST_HEAD(&buf->node);
302 mutex_init(&buf->lock);
303
304 buf->fl = fl;
305 buf->virt = NULL;
306 buf->phys = 0;
307 buf->size = size;
308 buf->dev = dev;
309 buf->raddr = 0;
310
311 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
312 GFP_KERNEL);
313 if (!buf->virt) {
314 mutex_destroy(&buf->lock);
315 kfree(buf);
316 return -ENOMEM;
317 }
318
319 if (fl->sctx && fl->sctx->sid)
320 buf->phys += ((u64)fl->sctx->sid << 32);
321
322 *obuf = buf;
323
324 return 0;
325}
326
327static void fastrpc_channel_ctx_free(struct kref *ref)
328{
329 struct fastrpc_channel_ctx *cctx;
330
331 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
332
333 kfree(cctx);
334}
335
336static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
337{
338 kref_get(&cctx->refcount);
339}
340
341static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
342{
343 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
344}
345
346static void fastrpc_context_free(struct kref *ref)
347{
348 struct fastrpc_invoke_ctx *ctx;
349 struct fastrpc_channel_ctx *cctx;
350 unsigned long flags;
351 int i;
352
353 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
354 cctx = ctx->cctx;
355
356 for (i = 0; i < ctx->nscalars; i++)
357 fastrpc_map_put(ctx->maps[i]);
358
359 if (ctx->buf)
360 fastrpc_buf_free(ctx->buf);
361
362 spin_lock_irqsave(&cctx->lock, flags);
363 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
364 spin_unlock_irqrestore(&cctx->lock, flags);
365
366 kfree(ctx->maps);
367 kfree(ctx->olaps);
368 kfree(ctx);
369
370 fastrpc_channel_ctx_put(cctx);
371}
372
373static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
374{
375 kref_get(&ctx->refcount);
376}
377
378static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
379{
380 kref_put(&ctx->refcount, fastrpc_context_free);
381}
382
383static void fastrpc_context_put_wq(struct work_struct *work)
384{
385 struct fastrpc_invoke_ctx *ctx =
386 container_of(work, struct fastrpc_invoke_ctx, put_work);
387
388 fastrpc_context_put(ctx);
389}
390
391#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
392static int olaps_cmp(const void *a, const void *b)
393{
394 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
395 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
396
397 int st = CMP(pa->start, pb->start);
398
399 int ed = CMP(pb->end, pa->end);
400
401 return st == 0 ? ed : st;
402}
403
404static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
405{
406 u64 max_end = 0;
407 int i;
408
409 for (i = 0; i < ctx->nbufs; ++i) {
410 ctx->olaps[i].start = ctx->args[i].ptr;
411 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
412 ctx->olaps[i].raix = i;
413 }
414
415 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
416
417 for (i = 0; i < ctx->nbufs; ++i) {
418
419 if (ctx->olaps[i].start < max_end) {
420 ctx->olaps[i].mstart = max_end;
421 ctx->olaps[i].mend = ctx->olaps[i].end;
422 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
423
424 if (ctx->olaps[i].end > max_end) {
425 max_end = ctx->olaps[i].end;
426 } else {
427 ctx->olaps[i].mend = 0;
428 ctx->olaps[i].mstart = 0;
429 }
430
431 } else {
432 ctx->olaps[i].mend = ctx->olaps[i].end;
433 ctx->olaps[i].mstart = ctx->olaps[i].start;
434 ctx->olaps[i].offset = 0;
435 max_end = ctx->olaps[i].end;
436 }
437 }
438}
439
440static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
441 struct fastrpc_user *user, u32 kernel, u32 sc,
442 struct fastrpc_invoke_args *args)
443{
444 struct fastrpc_channel_ctx *cctx = user->cctx;
445 struct fastrpc_invoke_ctx *ctx = NULL;
446 unsigned long flags;
447 int ret;
448
449 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
450 if (!ctx)
451 return ERR_PTR(-ENOMEM);
452
453 INIT_LIST_HEAD(&ctx->node);
454 ctx->fl = user;
455 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
456 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
457 REMOTE_SCALARS_OUTBUFS(sc);
458
459 if (ctx->nscalars) {
460 ctx->maps = kcalloc(ctx->nscalars,
461 sizeof(*ctx->maps), GFP_KERNEL);
462 if (!ctx->maps) {
463 kfree(ctx);
464 return ERR_PTR(-ENOMEM);
465 }
466 ctx->olaps = kcalloc(ctx->nscalars,
467 sizeof(*ctx->olaps), GFP_KERNEL);
468 if (!ctx->olaps) {
469 kfree(ctx->maps);
470 kfree(ctx);
471 return ERR_PTR(-ENOMEM);
472 }
473 ctx->args = args;
474 fastrpc_get_buff_overlaps(ctx);
475 }
476
477
478 fastrpc_channel_ctx_get(cctx);
479
480 ctx->sc = sc;
481 ctx->retval = -1;
482 ctx->pid = current->pid;
483 ctx->tgid = user->tgid;
484 ctx->cctx = cctx;
485 init_completion(&ctx->work);
486 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
487
488 spin_lock(&user->lock);
489 list_add_tail(&ctx->node, &user->pending);
490 spin_unlock(&user->lock);
491
492 spin_lock_irqsave(&cctx->lock, flags);
493 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
494 FASTRPC_CTX_MAX, GFP_ATOMIC);
495 if (ret < 0) {
496 spin_unlock_irqrestore(&cctx->lock, flags);
497 goto err_idr;
498 }
499 ctx->ctxid = ret << 4;
500 spin_unlock_irqrestore(&cctx->lock, flags);
501
502 kref_init(&ctx->refcount);
503
504 return ctx;
505err_idr:
506 spin_lock(&user->lock);
507 list_del(&ctx->node);
508 spin_unlock(&user->lock);
509 fastrpc_channel_ctx_put(cctx);
510 kfree(ctx->maps);
511 kfree(ctx->olaps);
512 kfree(ctx);
513
514 return ERR_PTR(ret);
515}
516
517static struct sg_table *
518fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
519 enum dma_data_direction dir)
520{
521 struct fastrpc_dma_buf_attachment *a = attachment->priv;
522 struct sg_table *table;
523
524 table = &a->sgt;
525
526 if (!dma_map_sgtable(attachment->dev, table, dir, 0))
527 return ERR_PTR(-ENOMEM);
528
529 return table;
530}
531
532static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
533 struct sg_table *table,
534 enum dma_data_direction dir)
535{
536 dma_unmap_sgtable(attach->dev, table, dir, 0);
537}
538
539static void fastrpc_release(struct dma_buf *dmabuf)
540{
541 struct fastrpc_buf *buffer = dmabuf->priv;
542
543 fastrpc_buf_free(buffer);
544}
545
546static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
547 struct dma_buf_attachment *attachment)
548{
549 struct fastrpc_dma_buf_attachment *a;
550 struct fastrpc_buf *buffer = dmabuf->priv;
551 int ret;
552
553 a = kzalloc(sizeof(*a), GFP_KERNEL);
554 if (!a)
555 return -ENOMEM;
556
557 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
558 FASTRPC_PHYS(buffer->phys), buffer->size);
559 if (ret < 0) {
560 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
561 kfree(a);
562 return -EINVAL;
563 }
564
565 a->dev = attachment->dev;
566 INIT_LIST_HEAD(&a->node);
567 attachment->priv = a;
568
569 mutex_lock(&buffer->lock);
570 list_add(&a->node, &buffer->attachments);
571 mutex_unlock(&buffer->lock);
572
573 return 0;
574}
575
576static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
577 struct dma_buf_attachment *attachment)
578{
579 struct fastrpc_dma_buf_attachment *a = attachment->priv;
580 struct fastrpc_buf *buffer = dmabuf->priv;
581
582 mutex_lock(&buffer->lock);
583 list_del(&a->node);
584 mutex_unlock(&buffer->lock);
585 sg_free_table(&a->sgt);
586 kfree(a);
587}
588
589static void *fastrpc_vmap(struct dma_buf *dmabuf)
590{
591 struct fastrpc_buf *buf = dmabuf->priv;
592
593 return buf->virt;
594}
595
596static int fastrpc_mmap(struct dma_buf *dmabuf,
597 struct vm_area_struct *vma)
598{
599 struct fastrpc_buf *buf = dmabuf->priv;
600 size_t size = vma->vm_end - vma->vm_start;
601
602 return dma_mmap_coherent(buf->dev, vma, buf->virt,
603 FASTRPC_PHYS(buf->phys), size);
604}
605
606static const struct dma_buf_ops fastrpc_dma_buf_ops = {
607 .attach = fastrpc_dma_buf_attach,
608 .detach = fastrpc_dma_buf_detatch,
609 .map_dma_buf = fastrpc_map_dma_buf,
610 .unmap_dma_buf = fastrpc_unmap_dma_buf,
611 .mmap = fastrpc_mmap,
612 .vmap = fastrpc_vmap,
613 .release = fastrpc_release,
614};
615
616static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
617 u64 len, struct fastrpc_map **ppmap)
618{
619 struct fastrpc_session_ctx *sess = fl->sctx;
620 struct fastrpc_map *map = NULL;
621 int err = 0;
622
623 if (!fastrpc_map_find(fl, fd, ppmap))
624 return 0;
625
626 map = kzalloc(sizeof(*map), GFP_KERNEL);
627 if (!map)
628 return -ENOMEM;
629
630 INIT_LIST_HEAD(&map->node);
631 map->fl = fl;
632 map->fd = fd;
633 map->buf = dma_buf_get(fd);
634 if (IS_ERR(map->buf)) {
635 err = PTR_ERR(map->buf);
636 goto get_err;
637 }
638
639 map->attach = dma_buf_attach(map->buf, sess->dev);
640 if (IS_ERR(map->attach)) {
641 dev_err(sess->dev, "Failed to attach dmabuf\n");
642 err = PTR_ERR(map->attach);
643 goto attach_err;
644 }
645
646 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
647 if (IS_ERR(map->table)) {
648 err = PTR_ERR(map->table);
649 goto map_err;
650 }
651
652 map->phys = sg_dma_address(map->table->sgl);
653 map->phys += ((u64)fl->sctx->sid << 32);
654 map->size = len;
655 map->va = sg_virt(map->table->sgl);
656 map->len = len;
657 kref_init(&map->refcount);
658
659 spin_lock(&fl->lock);
660 list_add_tail(&map->node, &fl->maps);
661 spin_unlock(&fl->lock);
662 *ppmap = map;
663
664 return 0;
665
666map_err:
667 dma_buf_detach(map->buf, map->attach);
668attach_err:
669 dma_buf_put(map->buf);
670get_err:
671 kfree(map);
672
673 return err;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
704{
705 int size = 0;
706
707 size = (sizeof(struct fastrpc_remote_arg) +
708 sizeof(struct fastrpc_invoke_buf) +
709 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
710 sizeof(u64) * FASTRPC_MAX_FDLIST +
711 sizeof(u32) * FASTRPC_MAX_CRCLIST;
712
713 return size;
714}
715
716static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
717{
718 u64 size = 0;
719 int i;
720
721 size = ALIGN(metalen, FASTRPC_ALIGN);
722 for (i = 0; i < ctx->nscalars; i++) {
723 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
724
725 if (ctx->olaps[i].offset == 0)
726 size = ALIGN(size, FASTRPC_ALIGN);
727
728 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
729 }
730 }
731
732 return size;
733}
734
735static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
736{
737 struct device *dev = ctx->fl->sctx->dev;
738 int i, err;
739
740 for (i = 0; i < ctx->nscalars; ++i) {
741
742 if (ctx->args[i].reserved)
743 return -EINVAL;
744
745 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
746 ctx->args[i].length == 0)
747 continue;
748
749 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
750 ctx->args[i].length, &ctx->maps[i]);
751 if (err) {
752 dev_err(dev, "Error Creating map %d\n", err);
753 return -EINVAL;
754 }
755
756 }
757 return 0;
758}
759
760static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
761{
762 struct device *dev = ctx->fl->sctx->dev;
763 struct fastrpc_remote_arg *rpra;
764 struct fastrpc_invoke_buf *list;
765 struct fastrpc_phy_page *pages;
766 int inbufs, i, oix, err = 0;
767 u64 len, rlen, pkt_size;
768 u64 pg_start, pg_end;
769 uintptr_t args;
770 int metalen;
771
772 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
773 metalen = fastrpc_get_meta_size(ctx);
774 pkt_size = fastrpc_get_payload_size(ctx, metalen);
775
776 err = fastrpc_create_maps(ctx);
777 if (err)
778 return err;
779
780 ctx->msg_sz = pkt_size;
781
782 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
783 if (err)
784 return err;
785
786 rpra = ctx->buf->virt;
787 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
788 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
789 sizeof(*rpra));
790 args = (uintptr_t)ctx->buf->virt + metalen;
791 rlen = pkt_size - metalen;
792 ctx->rpra = rpra;
793
794 for (oix = 0; oix < ctx->nbufs; ++oix) {
795 int mlen;
796
797 i = ctx->olaps[oix].raix;
798 len = ctx->args[i].length;
799
800 rpra[i].pv = 0;
801 rpra[i].len = len;
802 list[i].num = len ? 1 : 0;
803 list[i].pgidx = i;
804
805 if (!len)
806 continue;
807
808 if (ctx->maps[i]) {
809 struct vm_area_struct *vma = NULL;
810
811 rpra[i].pv = (u64) ctx->args[i].ptr;
812 pages[i].addr = ctx->maps[i]->phys;
813
814 vma = find_vma(current->mm, ctx->args[i].ptr);
815 if (vma)
816 pages[i].addr += ctx->args[i].ptr -
817 vma->vm_start;
818
819 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
820 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
821 PAGE_SHIFT;
822 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
823
824 } else {
825
826 if (ctx->olaps[oix].offset == 0) {
827 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
828 args = ALIGN(args, FASTRPC_ALIGN);
829 }
830
831 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
832
833 if (rlen < mlen)
834 goto bail;
835
836 rpra[i].pv = args - ctx->olaps[oix].offset;
837 pages[i].addr = ctx->buf->phys -
838 ctx->olaps[oix].offset +
839 (pkt_size - rlen);
840 pages[i].addr = pages[i].addr & PAGE_MASK;
841
842 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
843 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
844 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
845 args = args + mlen;
846 rlen -= mlen;
847 }
848
849 if (i < inbufs && !ctx->maps[i]) {
850 void *dst = (void *)(uintptr_t)rpra[i].pv;
851 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
852
853 if (!kernel) {
854 if (copy_from_user(dst, (void __user *)src,
855 len)) {
856 err = -EFAULT;
857 goto bail;
858 }
859 } else {
860 memcpy(dst, src, len);
861 }
862 }
863 }
864
865 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
866 rpra[i].pv = (u64) ctx->args[i].ptr;
867 rpra[i].len = ctx->args[i].length;
868 list[i].num = ctx->args[i].length ? 1 : 0;
869 list[i].pgidx = i;
870 pages[i].addr = ctx->maps[i]->phys;
871 pages[i].size = ctx->maps[i]->size;
872 }
873
874bail:
875 if (err)
876 dev_err(dev, "Error: get invoke args failed:%d\n", err);
877
878 return err;
879}
880
881static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
882 u32 kernel)
883{
884 struct fastrpc_remote_arg *rpra = ctx->rpra;
885 int i, inbufs;
886
887 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
888
889 for (i = inbufs; i < ctx->nbufs; ++i) {
890 void *src = (void *)(uintptr_t)rpra[i].pv;
891 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
892 u64 len = rpra[i].len;
893
894 if (!kernel) {
895 if (copy_to_user((void __user *)dst, src, len))
896 return -EFAULT;
897 } else {
898 memcpy(dst, src, len);
899 }
900 }
901
902 return 0;
903}
904
905static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
906 struct fastrpc_invoke_ctx *ctx,
907 u32 kernel, uint32_t handle)
908{
909 struct fastrpc_channel_ctx *cctx;
910 struct fastrpc_user *fl = ctx->fl;
911 struct fastrpc_msg *msg = &ctx->msg;
912 int ret;
913
914 cctx = fl->cctx;
915 msg->pid = fl->tgid;
916 msg->tid = current->pid;
917
918 if (kernel)
919 msg->pid = 0;
920
921 msg->ctx = ctx->ctxid | fl->pd;
922 msg->handle = handle;
923 msg->sc = ctx->sc;
924 msg->addr = ctx->buf ? ctx->buf->phys : 0;
925 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
926 fastrpc_context_get(ctx);
927
928 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
929
930 if (ret)
931 fastrpc_context_put(ctx);
932
933 return ret;
934
935}
936
937static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
938 u32 handle, u32 sc,
939 struct fastrpc_invoke_args *args)
940{
941 struct fastrpc_invoke_ctx *ctx = NULL;
942 int err = 0;
943
944 if (!fl->sctx)
945 return -EINVAL;
946
947 if (!fl->cctx->rpdev)
948 return -EPIPE;
949
950 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
951 if (IS_ERR(ctx))
952 return PTR_ERR(ctx);
953
954 if (ctx->nscalars) {
955 err = fastrpc_get_args(kernel, ctx);
956 if (err)
957 goto bail;
958 }
959
960
961 dma_wmb();
962
963 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
964 if (err)
965 goto bail;
966
967 if (kernel) {
968 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
969 err = -ETIMEDOUT;
970 } else {
971 err = wait_for_completion_interruptible(&ctx->work);
972 }
973
974 if (err)
975 goto bail;
976
977
978 err = ctx->retval;
979 if (err)
980 goto bail;
981
982 if (ctx->nscalars) {
983
984 dma_rmb();
985
986 err = fastrpc_put_args(ctx, kernel);
987 if (err)
988 goto bail;
989 }
990
991bail:
992 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
993
994 spin_lock(&fl->lock);
995 list_del(&ctx->node);
996 spin_unlock(&fl->lock);
997 fastrpc_context_put(ctx);
998 }
999 if (err)
1000 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1001
1002 return err;
1003}
1004
1005static int fastrpc_init_create_process(struct fastrpc_user *fl,
1006 char __user *argp)
1007{
1008 struct fastrpc_init_create init;
1009 struct fastrpc_invoke_args *args;
1010 struct fastrpc_phy_page pages[1];
1011 struct fastrpc_map *map = NULL;
1012 struct fastrpc_buf *imem = NULL;
1013 int memlen;
1014 int err;
1015 struct {
1016 int pgid;
1017 u32 namelen;
1018 u32 filelen;
1019 u32 pageslen;
1020 u32 attrs;
1021 u32 siglen;
1022 } inbuf;
1023 u32 sc;
1024
1025 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1026 if (!args)
1027 return -ENOMEM;
1028
1029 if (copy_from_user(&init, argp, sizeof(init))) {
1030 err = -EFAULT;
1031 goto err;
1032 }
1033
1034 if (init.filelen > INIT_FILELEN_MAX) {
1035 err = -EINVAL;
1036 goto err;
1037 }
1038
1039 inbuf.pgid = fl->tgid;
1040 inbuf.namelen = strlen(current->comm) + 1;
1041 inbuf.filelen = init.filelen;
1042 inbuf.pageslen = 1;
1043 inbuf.attrs = init.attrs;
1044 inbuf.siglen = init.siglen;
1045 fl->pd = USER_PD;
1046
1047 if (init.filelen && init.filefd) {
1048 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1049 if (err)
1050 goto err;
1051 }
1052
1053 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1054 1024 * 1024);
1055 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1056 &imem);
1057 if (err)
1058 goto err_alloc;
1059
1060 fl->init_mem = imem;
1061 args[0].ptr = (u64)(uintptr_t)&inbuf;
1062 args[0].length = sizeof(inbuf);
1063 args[0].fd = -1;
1064
1065 args[1].ptr = (u64)(uintptr_t)current->comm;
1066 args[1].length = inbuf.namelen;
1067 args[1].fd = -1;
1068
1069 args[2].ptr = (u64) init.file;
1070 args[2].length = inbuf.filelen;
1071 args[2].fd = init.filefd;
1072
1073 pages[0].addr = imem->phys;
1074 pages[0].size = imem->size;
1075
1076 args[3].ptr = (u64)(uintptr_t) pages;
1077 args[3].length = 1 * sizeof(*pages);
1078 args[3].fd = -1;
1079
1080 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1081 args[4].length = sizeof(inbuf.attrs);
1082 args[4].fd = -1;
1083
1084 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1085 args[5].length = sizeof(inbuf.siglen);
1086 args[5].fd = -1;
1087
1088 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1089 if (init.attrs)
1090 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1091
1092 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1093 sc, args);
1094 if (err)
1095 goto err_invoke;
1096
1097 kfree(args);
1098
1099 return 0;
1100
1101err_invoke:
1102 fl->init_mem = NULL;
1103 fastrpc_buf_free(imem);
1104err_alloc:
1105 if (map) {
1106 spin_lock(&fl->lock);
1107 list_del(&map->node);
1108 spin_unlock(&fl->lock);
1109 fastrpc_map_put(map);
1110 }
1111err:
1112 kfree(args);
1113
1114 return err;
1115}
1116
1117static struct fastrpc_session_ctx *fastrpc_session_alloc(
1118 struct fastrpc_channel_ctx *cctx)
1119{
1120 struct fastrpc_session_ctx *session = NULL;
1121 unsigned long flags;
1122 int i;
1123
1124 spin_lock_irqsave(&cctx->lock, flags);
1125 for (i = 0; i < cctx->sesscount; i++) {
1126 if (!cctx->session[i].used && cctx->session[i].valid) {
1127 cctx->session[i].used = true;
1128 session = &cctx->session[i];
1129 break;
1130 }
1131 }
1132 spin_unlock_irqrestore(&cctx->lock, flags);
1133
1134 return session;
1135}
1136
1137static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1138 struct fastrpc_session_ctx *session)
1139{
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&cctx->lock, flags);
1143 session->used = false;
1144 spin_unlock_irqrestore(&cctx->lock, flags);
1145}
1146
1147static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1148{
1149 struct fastrpc_invoke_args args[1];
1150 int tgid = 0;
1151 u32 sc;
1152
1153 tgid = fl->tgid;
1154 args[0].ptr = (u64)(uintptr_t) &tgid;
1155 args[0].length = sizeof(tgid);
1156 args[0].fd = -1;
1157 args[0].reserved = 0;
1158 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1159
1160 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1161 sc, &args[0]);
1162}
1163
1164static int fastrpc_device_release(struct inode *inode, struct file *file)
1165{
1166 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1167 struct fastrpc_channel_ctx *cctx = fl->cctx;
1168 struct fastrpc_invoke_ctx *ctx, *n;
1169 struct fastrpc_map *map, *m;
1170 struct fastrpc_buf *buf, *b;
1171 unsigned long flags;
1172
1173 fastrpc_release_current_dsp_process(fl);
1174
1175 spin_lock_irqsave(&cctx->lock, flags);
1176 list_del(&fl->user);
1177 spin_unlock_irqrestore(&cctx->lock, flags);
1178
1179 if (fl->init_mem)
1180 fastrpc_buf_free(fl->init_mem);
1181
1182 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1183 list_del(&ctx->node);
1184 fastrpc_context_put(ctx);
1185 }
1186
1187 list_for_each_entry_safe(map, m, &fl->maps, node) {
1188 list_del(&map->node);
1189 fastrpc_map_put(map);
1190 }
1191
1192 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1193 list_del(&buf->node);
1194 fastrpc_buf_free(buf);
1195 }
1196
1197 fastrpc_session_free(cctx, fl->sctx);
1198 fastrpc_channel_ctx_put(cctx);
1199
1200 mutex_destroy(&fl->mutex);
1201 kfree(fl);
1202 file->private_data = NULL;
1203
1204 return 0;
1205}
1206
1207static int fastrpc_device_open(struct inode *inode, struct file *filp)
1208{
1209 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1210 struct fastrpc_user *fl = NULL;
1211 unsigned long flags;
1212
1213 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1214 if (!fl)
1215 return -ENOMEM;
1216
1217
1218 fastrpc_channel_ctx_get(cctx);
1219
1220 filp->private_data = fl;
1221 spin_lock_init(&fl->lock);
1222 mutex_init(&fl->mutex);
1223 INIT_LIST_HEAD(&fl->pending);
1224 INIT_LIST_HEAD(&fl->maps);
1225 INIT_LIST_HEAD(&fl->mmaps);
1226 INIT_LIST_HEAD(&fl->user);
1227 fl->tgid = current->tgid;
1228 fl->cctx = cctx;
1229
1230 fl->sctx = fastrpc_session_alloc(cctx);
1231 if (!fl->sctx) {
1232 dev_err(&cctx->rpdev->dev, "No session available\n");
1233 mutex_destroy(&fl->mutex);
1234 kfree(fl);
1235
1236 return -EBUSY;
1237 }
1238
1239 spin_lock_irqsave(&cctx->lock, flags);
1240 list_add_tail(&fl->user, &cctx->users);
1241 spin_unlock_irqrestore(&cctx->lock, flags);
1242
1243 return 0;
1244}
1245
1246static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1247{
1248 struct fastrpc_alloc_dma_buf bp;
1249 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1250 struct fastrpc_buf *buf = NULL;
1251 int err;
1252
1253 if (copy_from_user(&bp, argp, sizeof(bp)))
1254 return -EFAULT;
1255
1256 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1257 if (err)
1258 return err;
1259 exp_info.ops = &fastrpc_dma_buf_ops;
1260 exp_info.size = bp.size;
1261 exp_info.flags = O_RDWR;
1262 exp_info.priv = buf;
1263 buf->dmabuf = dma_buf_export(&exp_info);
1264 if (IS_ERR(buf->dmabuf)) {
1265 err = PTR_ERR(buf->dmabuf);
1266 fastrpc_buf_free(buf);
1267 return err;
1268 }
1269
1270 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1271 if (bp.fd < 0) {
1272 dma_buf_put(buf->dmabuf);
1273 return -EINVAL;
1274 }
1275
1276 if (copy_to_user(argp, &bp, sizeof(bp))) {
1277 dma_buf_put(buf->dmabuf);
1278 return -EFAULT;
1279 }
1280
1281 return 0;
1282}
1283
1284static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1285{
1286 struct fastrpc_invoke_args args[1];
1287 int tgid = fl->tgid;
1288 u32 sc;
1289
1290 args[0].ptr = (u64)(uintptr_t) &tgid;
1291 args[0].length = sizeof(tgid);
1292 args[0].fd = -1;
1293 args[0].reserved = 0;
1294 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1295 fl->pd = pd;
1296
1297 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1298 sc, &args[0]);
1299}
1300
1301static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1302{
1303 struct fastrpc_invoke_args *args = NULL;
1304 struct fastrpc_invoke inv;
1305 u32 nscalars;
1306 int err;
1307
1308 if (copy_from_user(&inv, argp, sizeof(inv)))
1309 return -EFAULT;
1310
1311
1312 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1313 if (nscalars) {
1314 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1315 if (!args)
1316 return -ENOMEM;
1317
1318 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1319 nscalars * sizeof(*args))) {
1320 kfree(args);
1321 return -EFAULT;
1322 }
1323 }
1324
1325 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1326 kfree(args);
1327
1328 return err;
1329}
1330
1331static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
1332 struct fastrpc_req_munmap *req)
1333{
1334 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1335 struct fastrpc_buf *buf, *b;
1336 struct fastrpc_munmap_req_msg req_msg;
1337 struct device *dev = fl->sctx->dev;
1338 int err;
1339 u32 sc;
1340
1341 spin_lock(&fl->lock);
1342 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1343 if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
1344 break;
1345 buf = NULL;
1346 }
1347 spin_unlock(&fl->lock);
1348
1349 if (!buf) {
1350 dev_err(dev, "mmap not in list\n");
1351 return -EINVAL;
1352 }
1353
1354 req_msg.pgid = fl->tgid;
1355 req_msg.size = buf->size;
1356 req_msg.vaddr = buf->raddr;
1357
1358 args[0].ptr = (u64) (uintptr_t) &req_msg;
1359 args[0].length = sizeof(req_msg);
1360
1361 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1362 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1363 &args[0]);
1364 if (!err) {
1365 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1366 spin_lock(&fl->lock);
1367 list_del(&buf->node);
1368 spin_unlock(&fl->lock);
1369 fastrpc_buf_free(buf);
1370 } else {
1371 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1372 }
1373
1374 return err;
1375}
1376
1377static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1378{
1379 struct fastrpc_req_munmap req;
1380
1381 if (copy_from_user(&req, argp, sizeof(req)))
1382 return -EFAULT;
1383
1384 return fastrpc_req_munmap_impl(fl, &req);
1385}
1386
1387static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1388{
1389 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1390 struct fastrpc_buf *buf = NULL;
1391 struct fastrpc_mmap_req_msg req_msg;
1392 struct fastrpc_mmap_rsp_msg rsp_msg;
1393 struct fastrpc_req_munmap req_unmap;
1394 struct fastrpc_phy_page pages;
1395 struct fastrpc_req_mmap req;
1396 struct device *dev = fl->sctx->dev;
1397 int err;
1398 u32 sc;
1399
1400 if (copy_from_user(&req, argp, sizeof(req)))
1401 return -EFAULT;
1402
1403 if (req.flags != ADSP_MMAP_ADD_PAGES) {
1404 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1405 return -EINVAL;
1406 }
1407
1408 if (req.vaddrin) {
1409 dev_err(dev, "adding user allocated pages is not supported\n");
1410 return -EINVAL;
1411 }
1412
1413 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1414 if (err) {
1415 dev_err(dev, "failed to allocate buffer\n");
1416 return err;
1417 }
1418
1419 req_msg.pgid = fl->tgid;
1420 req_msg.flags = req.flags;
1421 req_msg.vaddr = req.vaddrin;
1422 req_msg.num = sizeof(pages);
1423
1424 args[0].ptr = (u64) (uintptr_t) &req_msg;
1425 args[0].length = sizeof(req_msg);
1426
1427 pages.addr = buf->phys;
1428 pages.size = buf->size;
1429
1430 args[1].ptr = (u64) (uintptr_t) &pages;
1431 args[1].length = sizeof(pages);
1432
1433 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1434 args[2].length = sizeof(rsp_msg);
1435
1436 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1437 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1438 &args[0]);
1439 if (err) {
1440 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1441 goto err_invoke;
1442 }
1443
1444
1445 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1446
1447
1448 req.vaddrout = rsp_msg.vaddr;
1449
1450 spin_lock(&fl->lock);
1451 list_add_tail(&buf->node, &fl->mmaps);
1452 spin_unlock(&fl->lock);
1453
1454 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1455
1456 req_unmap.vaddrout = buf->raddr;
1457 req_unmap.size = buf->size;
1458 fastrpc_req_munmap_impl(fl, &req_unmap);
1459 return -EFAULT;
1460 }
1461
1462 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1463 buf->raddr, buf->size);
1464
1465 return 0;
1466
1467err_invoke:
1468 fastrpc_buf_free(buf);
1469
1470 return err;
1471}
1472
1473static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1474 unsigned long arg)
1475{
1476 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1477 char __user *argp = (char __user *)arg;
1478 int err;
1479
1480 switch (cmd) {
1481 case FASTRPC_IOCTL_INVOKE:
1482 err = fastrpc_invoke(fl, argp);
1483 break;
1484 case FASTRPC_IOCTL_INIT_ATTACH:
1485 err = fastrpc_init_attach(fl, AUDIO_PD);
1486 break;
1487 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1488 err = fastrpc_init_attach(fl, SENSORS_PD);
1489 break;
1490 case FASTRPC_IOCTL_INIT_CREATE:
1491 err = fastrpc_init_create_process(fl, argp);
1492 break;
1493 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1494 err = fastrpc_dmabuf_alloc(fl, argp);
1495 break;
1496 case FASTRPC_IOCTL_MMAP:
1497 err = fastrpc_req_mmap(fl, argp);
1498 break;
1499 case FASTRPC_IOCTL_MUNMAP:
1500 err = fastrpc_req_munmap(fl, argp);
1501 break;
1502 default:
1503 err = -ENOTTY;
1504 break;
1505 }
1506
1507 return err;
1508}
1509
1510static const struct file_operations fastrpc_fops = {
1511 .open = fastrpc_device_open,
1512 .release = fastrpc_device_release,
1513 .unlocked_ioctl = fastrpc_device_ioctl,
1514 .compat_ioctl = fastrpc_device_ioctl,
1515};
1516
1517static int fastrpc_cb_probe(struct platform_device *pdev)
1518{
1519 struct fastrpc_channel_ctx *cctx;
1520 struct fastrpc_session_ctx *sess;
1521 struct device *dev = &pdev->dev;
1522 int i, sessions = 0;
1523 unsigned long flags;
1524 int rc;
1525
1526 cctx = dev_get_drvdata(dev->parent);
1527 if (!cctx)
1528 return -EINVAL;
1529
1530 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1531
1532 spin_lock_irqsave(&cctx->lock, flags);
1533 sess = &cctx->session[cctx->sesscount];
1534 sess->used = false;
1535 sess->valid = true;
1536 sess->dev = dev;
1537 dev_set_drvdata(dev, sess);
1538
1539 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1540 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1541
1542 if (sessions > 0) {
1543 struct fastrpc_session_ctx *dup_sess;
1544
1545 for (i = 1; i < sessions; i++) {
1546 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1547 break;
1548 dup_sess = &cctx->session[cctx->sesscount];
1549 memcpy(dup_sess, sess, sizeof(*dup_sess));
1550 }
1551 }
1552 cctx->sesscount++;
1553 spin_unlock_irqrestore(&cctx->lock, flags);
1554 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1555 if (rc) {
1556 dev_err(dev, "32-bit DMA enable failed\n");
1557 return rc;
1558 }
1559
1560 return 0;
1561}
1562
1563static int fastrpc_cb_remove(struct platform_device *pdev)
1564{
1565 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1566 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1567 unsigned long flags;
1568 int i;
1569
1570 spin_lock_irqsave(&cctx->lock, flags);
1571 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1572 if (cctx->session[i].sid == sess->sid) {
1573 cctx->session[i].valid = false;
1574 cctx->sesscount--;
1575 }
1576 }
1577 spin_unlock_irqrestore(&cctx->lock, flags);
1578
1579 return 0;
1580}
1581
1582static const struct of_device_id fastrpc_match_table[] = {
1583 { .compatible = "qcom,fastrpc-compute-cb", },
1584 {}
1585};
1586
1587static struct platform_driver fastrpc_cb_driver = {
1588 .probe = fastrpc_cb_probe,
1589 .remove = fastrpc_cb_remove,
1590 .driver = {
1591 .name = "qcom,fastrpc-cb",
1592 .of_match_table = fastrpc_match_table,
1593 .suppress_bind_attrs = true,
1594 },
1595};
1596
1597static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1598{
1599 struct device *rdev = &rpdev->dev;
1600 struct fastrpc_channel_ctx *data;
1601 int i, err, domain_id = -1;
1602 const char *domain;
1603
1604 err = of_property_read_string(rdev->of_node, "label", &domain);
1605 if (err) {
1606 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1607 return err;
1608 }
1609
1610 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1611 if (!strcmp(domains[i], domain)) {
1612 domain_id = i;
1613 break;
1614 }
1615 }
1616
1617 if (domain_id < 0) {
1618 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1619 return -EINVAL;
1620 }
1621
1622 data = kzalloc(sizeof(*data), GFP_KERNEL);
1623 if (!data)
1624 return -ENOMEM;
1625
1626 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1627 data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
1628 domains[domain_id]);
1629 data->miscdev.fops = &fastrpc_fops;
1630 err = misc_register(&data->miscdev);
1631 if (err) {
1632 kfree(data);
1633 return err;
1634 }
1635
1636 kref_init(&data->refcount);
1637
1638 dev_set_drvdata(&rpdev->dev, data);
1639 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1640 INIT_LIST_HEAD(&data->users);
1641 spin_lock_init(&data->lock);
1642 idr_init(&data->ctx_idr);
1643 data->domain_id = domain_id;
1644 data->rpdev = rpdev;
1645
1646 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1647}
1648
1649static void fastrpc_notify_users(struct fastrpc_user *user)
1650{
1651 struct fastrpc_invoke_ctx *ctx;
1652
1653 spin_lock(&user->lock);
1654 list_for_each_entry(ctx, &user->pending, node)
1655 complete(&ctx->work);
1656 spin_unlock(&user->lock);
1657}
1658
1659static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1660{
1661 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1662 struct fastrpc_user *user;
1663 unsigned long flags;
1664
1665 spin_lock_irqsave(&cctx->lock, flags);
1666 list_for_each_entry(user, &cctx->users, user)
1667 fastrpc_notify_users(user);
1668 spin_unlock_irqrestore(&cctx->lock, flags);
1669
1670 misc_deregister(&cctx->miscdev);
1671 of_platform_depopulate(&rpdev->dev);
1672
1673 cctx->rpdev = NULL;
1674 fastrpc_channel_ctx_put(cctx);
1675}
1676
1677static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1678 int len, void *priv, u32 addr)
1679{
1680 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1681 struct fastrpc_invoke_rsp *rsp = data;
1682 struct fastrpc_invoke_ctx *ctx;
1683 unsigned long flags;
1684 unsigned long ctxid;
1685
1686 if (len < sizeof(*rsp))
1687 return -EINVAL;
1688
1689 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1690
1691 spin_lock_irqsave(&cctx->lock, flags);
1692 ctx = idr_find(&cctx->ctx_idr, ctxid);
1693 spin_unlock_irqrestore(&cctx->lock, flags);
1694
1695 if (!ctx) {
1696 dev_err(&rpdev->dev, "No context ID matches response\n");
1697 return -ENOENT;
1698 }
1699
1700 ctx->retval = rsp->retval;
1701 complete(&ctx->work);
1702
1703
1704
1705
1706
1707
1708 schedule_work(&ctx->put_work);
1709
1710 return 0;
1711}
1712
1713static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1714 { .compatible = "qcom,fastrpc" },
1715 { },
1716};
1717MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1718
1719static struct rpmsg_driver fastrpc_driver = {
1720 .probe = fastrpc_rpmsg_probe,
1721 .remove = fastrpc_rpmsg_remove,
1722 .callback = fastrpc_rpmsg_callback,
1723 .drv = {
1724 .name = "qcom,fastrpc",
1725 .of_match_table = fastrpc_rpmsg_of_match,
1726 },
1727};
1728
1729static int fastrpc_init(void)
1730{
1731 int ret;
1732
1733 ret = platform_driver_register(&fastrpc_cb_driver);
1734 if (ret < 0) {
1735 pr_err("fastrpc: failed to register cb driver\n");
1736 return ret;
1737 }
1738
1739 ret = register_rpmsg_driver(&fastrpc_driver);
1740 if (ret < 0) {
1741 pr_err("fastrpc: failed to register rpmsg driver\n");
1742 platform_driver_unregister(&fastrpc_cb_driver);
1743 return ret;
1744 }
1745
1746 return 0;
1747}
1748module_init(fastrpc_init);
1749
1750static void fastrpc_exit(void)
1751{
1752 platform_driver_unregister(&fastrpc_cb_driver);
1753 unregister_rpmsg_driver(&fastrpc_driver);
1754}
1755module_exit(fastrpc_exit);
1756
1757MODULE_LICENSE("GPL v2");
1758