1
2
3
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/sort.h>
16#include <linux/of_platform.h>
17#include <linux/rpmsg.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <uapi/misc/fastrpc.h>
21
22#define ADSP_DOMAIN_ID (0)
23#define MDSP_DOMAIN_ID (1)
24#define SDSP_DOMAIN_ID (2)
25#define CDSP_DOMAIN_ID (3)
26#define FASTRPC_DEV_MAX 4
27#define FASTRPC_MAX_SESSIONS 13
28#define FASTRPC_ALIGN 128
29#define FASTRPC_MAX_FDLIST 16
30#define FASTRPC_MAX_CRCLIST 64
31#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32#define FASTRPC_CTX_MAX (256)
33#define FASTRPC_INIT_HANDLE 1
34#define FASTRPC_CTXID_MASK (0xFF0)
35#define INIT_FILELEN_MAX (2 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc"
37#define ADSP_MMAP_ADD_PAGES 0x1000
38
39
40#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
41
42
43#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
44
45
46#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
47
48
49#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
50
51#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
52 REMOTE_SCALARS_OUTBUFS(sc) + \
53 REMOTE_SCALARS_INHANDLES(sc)+ \
54 REMOTE_SCALARS_OUTHANDLES(sc))
55#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
56 (((attr & 0x07) << 29) | \
57 ((method & 0x1f) << 24) | \
58 ((in & 0xff) << 16) | \
59 ((out & 0xff) << 8) | \
60 ((oin & 0x0f) << 4) | \
61 (oout & 0x0f))
62
63#define FASTRPC_SCALARS(method, in, out) \
64 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
65
66#define FASTRPC_CREATE_PROCESS_NARGS 6
67
68#define FASTRPC_RMID_INIT_ATTACH 0
69#define FASTRPC_RMID_INIT_RELEASE 1
70#define FASTRPC_RMID_INIT_MMAP 4
71#define FASTRPC_RMID_INIT_MUNMAP 5
72#define FASTRPC_RMID_INIT_CREATE 6
73#define FASTRPC_RMID_INIT_CREATE_ATTR 7
74#define FASTRPC_RMID_INIT_CREATE_STATIC 8
75
76
77#define AUDIO_PD (0)
78#define USER_PD (1)
79#define SENSORS_PD (2)
80
81#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
82
83static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
84 "sdsp", "cdsp"};
85struct fastrpc_phy_page {
86 u64 addr;
87 u64 size;
88};
89
90struct fastrpc_invoke_buf {
91 u32 num;
92 u32 pgidx;
93};
94
95struct fastrpc_remote_arg {
96 u64 pv;
97 u64 len;
98};
99
100struct fastrpc_mmap_rsp_msg {
101 u64 vaddr;
102};
103
104struct fastrpc_mmap_req_msg {
105 s32 pgid;
106 u32 flags;
107 u64 vaddr;
108 s32 num;
109};
110
111struct fastrpc_munmap_req_msg {
112 s32 pgid;
113 u64 vaddr;
114 u64 size;
115};
116
117struct fastrpc_msg {
118 int pid;
119 int tid;
120 u64 ctx;
121 u32 handle;
122 u32 sc;
123 u64 addr;
124 u64 size;
125};
126
127struct fastrpc_invoke_rsp {
128 u64 ctx;
129 int retval;
130};
131
132struct fastrpc_buf_overlap {
133 u64 start;
134 u64 end;
135 int raix;
136 u64 mstart;
137 u64 mend;
138 u64 offset;
139};
140
141struct fastrpc_buf {
142 struct fastrpc_user *fl;
143 struct dma_buf *dmabuf;
144 struct device *dev;
145 void *virt;
146 u64 phys;
147 u64 size;
148
149 struct mutex lock;
150 struct list_head attachments;
151
152 struct list_head node;
153 uintptr_t raddr;
154};
155
156struct fastrpc_dma_buf_attachment {
157 struct device *dev;
158 struct sg_table sgt;
159 struct list_head node;
160};
161
162struct fastrpc_map {
163 struct list_head node;
164 struct fastrpc_user *fl;
165 int fd;
166 struct dma_buf *buf;
167 struct sg_table *table;
168 struct dma_buf_attachment *attach;
169 u64 phys;
170 u64 size;
171 void *va;
172 u64 len;
173 struct kref refcount;
174};
175
176struct fastrpc_invoke_ctx {
177 int nscalars;
178 int nbufs;
179 int retval;
180 int pid;
181 int tgid;
182 u32 sc;
183 u32 *crc;
184 u64 ctxid;
185 u64 msg_sz;
186 struct kref refcount;
187 struct list_head node;
188 struct completion work;
189 struct work_struct put_work;
190 struct fastrpc_msg msg;
191 struct fastrpc_user *fl;
192 struct fastrpc_remote_arg *rpra;
193 struct fastrpc_map **maps;
194 struct fastrpc_buf *buf;
195 struct fastrpc_invoke_args *args;
196 struct fastrpc_buf_overlap *olaps;
197 struct fastrpc_channel_ctx *cctx;
198};
199
200struct fastrpc_session_ctx {
201 struct device *dev;
202 int sid;
203 bool used;
204 bool valid;
205};
206
207struct fastrpc_channel_ctx {
208 int domain_id;
209 int sesscount;
210 struct rpmsg_device *rpdev;
211 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
212 spinlock_t lock;
213 struct idr ctx_idr;
214 struct list_head users;
215 struct miscdevice miscdev;
216 struct kref refcount;
217};
218
219struct fastrpc_user {
220 struct list_head user;
221 struct list_head maps;
222 struct list_head pending;
223 struct list_head mmaps;
224
225 struct fastrpc_channel_ctx *cctx;
226 struct fastrpc_session_ctx *sctx;
227 struct fastrpc_buf *init_mem;
228
229 int tgid;
230 int pd;
231
232 spinlock_t lock;
233
234 struct mutex mutex;
235};
236
237static void fastrpc_free_map(struct kref *ref)
238{
239 struct fastrpc_map *map;
240
241 map = container_of(ref, struct fastrpc_map, refcount);
242
243 if (map->table) {
244 dma_buf_unmap_attachment(map->attach, map->table,
245 DMA_BIDIRECTIONAL);
246 dma_buf_detach(map->buf, map->attach);
247 dma_buf_put(map->buf);
248 }
249
250 kfree(map);
251}
252
253static void fastrpc_map_put(struct fastrpc_map *map)
254{
255 if (map)
256 kref_put(&map->refcount, fastrpc_free_map);
257}
258
259static void fastrpc_map_get(struct fastrpc_map *map)
260{
261 if (map)
262 kref_get(&map->refcount);
263}
264
265static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
266 struct fastrpc_map **ppmap)
267{
268 struct fastrpc_map *map = NULL;
269
270 mutex_lock(&fl->mutex);
271 list_for_each_entry(map, &fl->maps, node) {
272 if (map->fd == fd) {
273 fastrpc_map_get(map);
274 *ppmap = map;
275 mutex_unlock(&fl->mutex);
276 return 0;
277 }
278 }
279 mutex_unlock(&fl->mutex);
280
281 return -ENOENT;
282}
283
284static void fastrpc_buf_free(struct fastrpc_buf *buf)
285{
286 dma_free_coherent(buf->dev, buf->size, buf->virt,
287 FASTRPC_PHYS(buf->phys));
288 kfree(buf);
289}
290
291static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
292 u64 size, struct fastrpc_buf **obuf)
293{
294 struct fastrpc_buf *buf;
295
296 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
297 if (!buf)
298 return -ENOMEM;
299
300 INIT_LIST_HEAD(&buf->attachments);
301 INIT_LIST_HEAD(&buf->node);
302 mutex_init(&buf->lock);
303
304 buf->fl = fl;
305 buf->virt = NULL;
306 buf->phys = 0;
307 buf->size = size;
308 buf->dev = dev;
309 buf->raddr = 0;
310
311 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
312 GFP_KERNEL);
313 if (!buf->virt) {
314 mutex_destroy(&buf->lock);
315 kfree(buf);
316 return -ENOMEM;
317 }
318
319 if (fl->sctx && fl->sctx->sid)
320 buf->phys += ((u64)fl->sctx->sid << 32);
321
322 *obuf = buf;
323
324 return 0;
325}
326
327static void fastrpc_channel_ctx_free(struct kref *ref)
328{
329 struct fastrpc_channel_ctx *cctx;
330
331 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
332
333 kfree(cctx);
334}
335
336static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
337{
338 kref_get(&cctx->refcount);
339}
340
341static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
342{
343 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
344}
345
346static void fastrpc_context_free(struct kref *ref)
347{
348 struct fastrpc_invoke_ctx *ctx;
349 struct fastrpc_channel_ctx *cctx;
350 unsigned long flags;
351 int i;
352
353 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
354 cctx = ctx->cctx;
355
356 for (i = 0; i < ctx->nscalars; i++)
357 fastrpc_map_put(ctx->maps[i]);
358
359 if (ctx->buf)
360 fastrpc_buf_free(ctx->buf);
361
362 spin_lock_irqsave(&cctx->lock, flags);
363 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
364 spin_unlock_irqrestore(&cctx->lock, flags);
365
366 kfree(ctx->maps);
367 kfree(ctx->olaps);
368 kfree(ctx);
369
370 fastrpc_channel_ctx_put(cctx);
371}
372
373static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
374{
375 kref_get(&ctx->refcount);
376}
377
378static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
379{
380 kref_put(&ctx->refcount, fastrpc_context_free);
381}
382
383static void fastrpc_context_put_wq(struct work_struct *work)
384{
385 struct fastrpc_invoke_ctx *ctx =
386 container_of(work, struct fastrpc_invoke_ctx, put_work);
387
388 fastrpc_context_put(ctx);
389}
390
391#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
392static int olaps_cmp(const void *a, const void *b)
393{
394 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
395 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
396
397 int st = CMP(pa->start, pb->start);
398
399 int ed = CMP(pb->end, pa->end);
400
401 return st == 0 ? ed : st;
402}
403
404static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
405{
406 u64 max_end = 0;
407 int i;
408
409 for (i = 0; i < ctx->nbufs; ++i) {
410 ctx->olaps[i].start = ctx->args[i].ptr;
411 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
412 ctx->olaps[i].raix = i;
413 }
414
415 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
416
417 for (i = 0; i < ctx->nbufs; ++i) {
418
419 if (ctx->olaps[i].start < max_end) {
420 ctx->olaps[i].mstart = max_end;
421 ctx->olaps[i].mend = ctx->olaps[i].end;
422 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
423
424 if (ctx->olaps[i].end > max_end) {
425 max_end = ctx->olaps[i].end;
426 } else {
427 ctx->olaps[i].mend = 0;
428 ctx->olaps[i].mstart = 0;
429 }
430
431 } else {
432 ctx->olaps[i].mend = ctx->olaps[i].end;
433 ctx->olaps[i].mstart = ctx->olaps[i].start;
434 ctx->olaps[i].offset = 0;
435 max_end = ctx->olaps[i].end;
436 }
437 }
438}
439
440static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
441 struct fastrpc_user *user, u32 kernel, u32 sc,
442 struct fastrpc_invoke_args *args)
443{
444 struct fastrpc_channel_ctx *cctx = user->cctx;
445 struct fastrpc_invoke_ctx *ctx = NULL;
446 unsigned long flags;
447 int ret;
448
449 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
450 if (!ctx)
451 return ERR_PTR(-ENOMEM);
452
453 INIT_LIST_HEAD(&ctx->node);
454 ctx->fl = user;
455 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
456 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
457 REMOTE_SCALARS_OUTBUFS(sc);
458
459 if (ctx->nscalars) {
460 ctx->maps = kcalloc(ctx->nscalars,
461 sizeof(*ctx->maps), GFP_KERNEL);
462 if (!ctx->maps) {
463 kfree(ctx);
464 return ERR_PTR(-ENOMEM);
465 }
466 ctx->olaps = kcalloc(ctx->nscalars,
467 sizeof(*ctx->olaps), GFP_KERNEL);
468 if (!ctx->olaps) {
469 kfree(ctx->maps);
470 kfree(ctx);
471 return ERR_PTR(-ENOMEM);
472 }
473 ctx->args = args;
474 fastrpc_get_buff_overlaps(ctx);
475 }
476
477
478 fastrpc_channel_ctx_get(cctx);
479
480 ctx->sc = sc;
481 ctx->retval = -1;
482 ctx->pid = current->pid;
483 ctx->tgid = user->tgid;
484 ctx->cctx = cctx;
485 init_completion(&ctx->work);
486 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
487
488 spin_lock(&user->lock);
489 list_add_tail(&ctx->node, &user->pending);
490 spin_unlock(&user->lock);
491
492 spin_lock_irqsave(&cctx->lock, flags);
493 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
494 FASTRPC_CTX_MAX, GFP_ATOMIC);
495 if (ret < 0) {
496 spin_unlock_irqrestore(&cctx->lock, flags);
497 goto err_idr;
498 }
499 ctx->ctxid = ret << 4;
500 spin_unlock_irqrestore(&cctx->lock, flags);
501
502 kref_init(&ctx->refcount);
503
504 return ctx;
505err_idr:
506 spin_lock(&user->lock);
507 list_del(&ctx->node);
508 spin_unlock(&user->lock);
509 fastrpc_channel_ctx_put(cctx);
510 kfree(ctx->maps);
511 kfree(ctx->olaps);
512 kfree(ctx);
513
514 return ERR_PTR(ret);
515}
516
517static struct sg_table *
518fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
519 enum dma_data_direction dir)
520{
521 struct fastrpc_dma_buf_attachment *a = attachment->priv;
522 struct sg_table *table;
523 int ret;
524
525 table = &a->sgt;
526
527 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
528 if (ret)
529 table = ERR_PTR(ret);
530 return table;
531}
532
533static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
534 struct sg_table *table,
535 enum dma_data_direction dir)
536{
537 dma_unmap_sgtable(attach->dev, table, dir, 0);
538}
539
540static void fastrpc_release(struct dma_buf *dmabuf)
541{
542 struct fastrpc_buf *buffer = dmabuf->priv;
543
544 fastrpc_buf_free(buffer);
545}
546
547static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
548 struct dma_buf_attachment *attachment)
549{
550 struct fastrpc_dma_buf_attachment *a;
551 struct fastrpc_buf *buffer = dmabuf->priv;
552 int ret;
553
554 a = kzalloc(sizeof(*a), GFP_KERNEL);
555 if (!a)
556 return -ENOMEM;
557
558 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
559 FASTRPC_PHYS(buffer->phys), buffer->size);
560 if (ret < 0) {
561 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
562 kfree(a);
563 return -EINVAL;
564 }
565
566 a->dev = attachment->dev;
567 INIT_LIST_HEAD(&a->node);
568 attachment->priv = a;
569
570 mutex_lock(&buffer->lock);
571 list_add(&a->node, &buffer->attachments);
572 mutex_unlock(&buffer->lock);
573
574 return 0;
575}
576
577static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
578 struct dma_buf_attachment *attachment)
579{
580 struct fastrpc_dma_buf_attachment *a = attachment->priv;
581 struct fastrpc_buf *buffer = dmabuf->priv;
582
583 mutex_lock(&buffer->lock);
584 list_del(&a->node);
585 mutex_unlock(&buffer->lock);
586 sg_free_table(&a->sgt);
587 kfree(a);
588}
589
590static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
591{
592 struct fastrpc_buf *buf = dmabuf->priv;
593
594 dma_buf_map_set_vaddr(map, buf->virt);
595
596 return 0;
597}
598
599static int fastrpc_mmap(struct dma_buf *dmabuf,
600 struct vm_area_struct *vma)
601{
602 struct fastrpc_buf *buf = dmabuf->priv;
603 size_t size = vma->vm_end - vma->vm_start;
604
605 return dma_mmap_coherent(buf->dev, vma, buf->virt,
606 FASTRPC_PHYS(buf->phys), size);
607}
608
609static const struct dma_buf_ops fastrpc_dma_buf_ops = {
610 .attach = fastrpc_dma_buf_attach,
611 .detach = fastrpc_dma_buf_detatch,
612 .map_dma_buf = fastrpc_map_dma_buf,
613 .unmap_dma_buf = fastrpc_unmap_dma_buf,
614 .mmap = fastrpc_mmap,
615 .vmap = fastrpc_vmap,
616 .release = fastrpc_release,
617};
618
619static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
620 u64 len, struct fastrpc_map **ppmap)
621{
622 struct fastrpc_session_ctx *sess = fl->sctx;
623 struct fastrpc_map *map = NULL;
624 int err = 0;
625
626 if (!fastrpc_map_find(fl, fd, ppmap))
627 return 0;
628
629 map = kzalloc(sizeof(*map), GFP_KERNEL);
630 if (!map)
631 return -ENOMEM;
632
633 INIT_LIST_HEAD(&map->node);
634 map->fl = fl;
635 map->fd = fd;
636 map->buf = dma_buf_get(fd);
637 if (IS_ERR(map->buf)) {
638 err = PTR_ERR(map->buf);
639 goto get_err;
640 }
641
642 map->attach = dma_buf_attach(map->buf, sess->dev);
643 if (IS_ERR(map->attach)) {
644 dev_err(sess->dev, "Failed to attach dmabuf\n");
645 err = PTR_ERR(map->attach);
646 goto attach_err;
647 }
648
649 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
650 if (IS_ERR(map->table)) {
651 err = PTR_ERR(map->table);
652 goto map_err;
653 }
654
655 map->phys = sg_dma_address(map->table->sgl);
656 map->phys += ((u64)fl->sctx->sid << 32);
657 map->size = len;
658 map->va = sg_virt(map->table->sgl);
659 map->len = len;
660 kref_init(&map->refcount);
661
662 spin_lock(&fl->lock);
663 list_add_tail(&map->node, &fl->maps);
664 spin_unlock(&fl->lock);
665 *ppmap = map;
666
667 return 0;
668
669map_err:
670 dma_buf_detach(map->buf, map->attach);
671attach_err:
672 dma_buf_put(map->buf);
673get_err:
674 kfree(map);
675
676 return err;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
707{
708 int size = 0;
709
710 size = (sizeof(struct fastrpc_remote_arg) +
711 sizeof(struct fastrpc_invoke_buf) +
712 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
713 sizeof(u64) * FASTRPC_MAX_FDLIST +
714 sizeof(u32) * FASTRPC_MAX_CRCLIST;
715
716 return size;
717}
718
719static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
720{
721 u64 size = 0;
722 int oix;
723
724 size = ALIGN(metalen, FASTRPC_ALIGN);
725 for (oix = 0; oix < ctx->nbufs; oix++) {
726 int i = ctx->olaps[oix].raix;
727
728 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
729
730 if (ctx->olaps[oix].offset == 0)
731 size = ALIGN(size, FASTRPC_ALIGN);
732
733 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
734 }
735 }
736
737 return size;
738}
739
740static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
741{
742 struct device *dev = ctx->fl->sctx->dev;
743 int i, err;
744
745 for (i = 0; i < ctx->nscalars; ++i) {
746
747 if (ctx->args[i].reserved)
748 return -EINVAL;
749
750 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
751 ctx->args[i].length == 0)
752 continue;
753
754 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
755 ctx->args[i].length, &ctx->maps[i]);
756 if (err) {
757 dev_err(dev, "Error Creating map %d\n", err);
758 return -EINVAL;
759 }
760
761 }
762 return 0;
763}
764
765static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
766{
767 struct device *dev = ctx->fl->sctx->dev;
768 struct fastrpc_remote_arg *rpra;
769 struct fastrpc_invoke_buf *list;
770 struct fastrpc_phy_page *pages;
771 int inbufs, i, oix, err = 0;
772 u64 len, rlen, pkt_size;
773 u64 pg_start, pg_end;
774 uintptr_t args;
775 int metalen;
776
777 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
778 metalen = fastrpc_get_meta_size(ctx);
779 pkt_size = fastrpc_get_payload_size(ctx, metalen);
780
781 err = fastrpc_create_maps(ctx);
782 if (err)
783 return err;
784
785 ctx->msg_sz = pkt_size;
786
787 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
788 if (err)
789 return err;
790
791 rpra = ctx->buf->virt;
792 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
793 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
794 sizeof(*rpra));
795 args = (uintptr_t)ctx->buf->virt + metalen;
796 rlen = pkt_size - metalen;
797 ctx->rpra = rpra;
798
799 for (oix = 0; oix < ctx->nbufs; ++oix) {
800 int mlen;
801
802 i = ctx->olaps[oix].raix;
803 len = ctx->args[i].length;
804
805 rpra[i].pv = 0;
806 rpra[i].len = len;
807 list[i].num = len ? 1 : 0;
808 list[i].pgidx = i;
809
810 if (!len)
811 continue;
812
813 if (ctx->maps[i]) {
814 struct vm_area_struct *vma = NULL;
815
816 rpra[i].pv = (u64) ctx->args[i].ptr;
817 pages[i].addr = ctx->maps[i]->phys;
818
819 mmap_read_lock(current->mm);
820 vma = find_vma(current->mm, ctx->args[i].ptr);
821 if (vma)
822 pages[i].addr += ctx->args[i].ptr -
823 vma->vm_start;
824 mmap_read_unlock(current->mm);
825
826 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
827 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
828 PAGE_SHIFT;
829 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
830
831 } else {
832
833 if (ctx->olaps[oix].offset == 0) {
834 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
835 args = ALIGN(args, FASTRPC_ALIGN);
836 }
837
838 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
839
840 if (rlen < mlen)
841 goto bail;
842
843 rpra[i].pv = args - ctx->olaps[oix].offset;
844 pages[i].addr = ctx->buf->phys -
845 ctx->olaps[oix].offset +
846 (pkt_size - rlen);
847 pages[i].addr = pages[i].addr & PAGE_MASK;
848
849 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
850 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
851 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
852 args = args + mlen;
853 rlen -= mlen;
854 }
855
856 if (i < inbufs && !ctx->maps[i]) {
857 void *dst = (void *)(uintptr_t)rpra[i].pv;
858 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
859
860 if (!kernel) {
861 if (copy_from_user(dst, (void __user *)src,
862 len)) {
863 err = -EFAULT;
864 goto bail;
865 }
866 } else {
867 memcpy(dst, src, len);
868 }
869 }
870 }
871
872 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
873 rpra[i].pv = (u64) ctx->args[i].ptr;
874 rpra[i].len = ctx->args[i].length;
875 list[i].num = ctx->args[i].length ? 1 : 0;
876 list[i].pgidx = i;
877 pages[i].addr = ctx->maps[i]->phys;
878 pages[i].size = ctx->maps[i]->size;
879 }
880
881bail:
882 if (err)
883 dev_err(dev, "Error: get invoke args failed:%d\n", err);
884
885 return err;
886}
887
888static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
889 u32 kernel)
890{
891 struct fastrpc_remote_arg *rpra = ctx->rpra;
892 int i, inbufs;
893
894 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
895
896 for (i = inbufs; i < ctx->nbufs; ++i) {
897 if (!ctx->maps[i]) {
898 void *src = (void *)(uintptr_t)rpra[i].pv;
899 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
900 u64 len = rpra[i].len;
901
902 if (!kernel) {
903 if (copy_to_user((void __user *)dst, src, len))
904 return -EFAULT;
905 } else {
906 memcpy(dst, src, len);
907 }
908 }
909 }
910
911 return 0;
912}
913
914static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
915 struct fastrpc_invoke_ctx *ctx,
916 u32 kernel, uint32_t handle)
917{
918 struct fastrpc_channel_ctx *cctx;
919 struct fastrpc_user *fl = ctx->fl;
920 struct fastrpc_msg *msg = &ctx->msg;
921 int ret;
922
923 cctx = fl->cctx;
924 msg->pid = fl->tgid;
925 msg->tid = current->pid;
926
927 if (kernel)
928 msg->pid = 0;
929
930 msg->ctx = ctx->ctxid | fl->pd;
931 msg->handle = handle;
932 msg->sc = ctx->sc;
933 msg->addr = ctx->buf ? ctx->buf->phys : 0;
934 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
935 fastrpc_context_get(ctx);
936
937 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
938
939 if (ret)
940 fastrpc_context_put(ctx);
941
942 return ret;
943
944}
945
946static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
947 u32 handle, u32 sc,
948 struct fastrpc_invoke_args *args)
949{
950 struct fastrpc_invoke_ctx *ctx = NULL;
951 int err = 0;
952
953 if (!fl->sctx)
954 return -EINVAL;
955
956 if (!fl->cctx->rpdev)
957 return -EPIPE;
958
959 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
960 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
961 return -EPERM;
962 }
963
964 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
965 if (IS_ERR(ctx))
966 return PTR_ERR(ctx);
967
968 if (ctx->nscalars) {
969 err = fastrpc_get_args(kernel, ctx);
970 if (err)
971 goto bail;
972 }
973
974
975 dma_wmb();
976
977 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
978 if (err)
979 goto bail;
980
981 if (kernel) {
982 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
983 err = -ETIMEDOUT;
984 } else {
985 err = wait_for_completion_interruptible(&ctx->work);
986 }
987
988 if (err)
989 goto bail;
990
991
992 err = ctx->retval;
993 if (err)
994 goto bail;
995
996 if (ctx->nscalars) {
997
998 dma_rmb();
999
1000 err = fastrpc_put_args(ctx, kernel);
1001 if (err)
1002 goto bail;
1003 }
1004
1005bail:
1006 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1007
1008 spin_lock(&fl->lock);
1009 list_del(&ctx->node);
1010 spin_unlock(&fl->lock);
1011 fastrpc_context_put(ctx);
1012 }
1013 if (err)
1014 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1015
1016 return err;
1017}
1018
1019static int fastrpc_init_create_process(struct fastrpc_user *fl,
1020 char __user *argp)
1021{
1022 struct fastrpc_init_create init;
1023 struct fastrpc_invoke_args *args;
1024 struct fastrpc_phy_page pages[1];
1025 struct fastrpc_map *map = NULL;
1026 struct fastrpc_buf *imem = NULL;
1027 int memlen;
1028 int err;
1029 struct {
1030 int pgid;
1031 u32 namelen;
1032 u32 filelen;
1033 u32 pageslen;
1034 u32 attrs;
1035 u32 siglen;
1036 } inbuf;
1037 u32 sc;
1038
1039 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1040 if (!args)
1041 return -ENOMEM;
1042
1043 if (copy_from_user(&init, argp, sizeof(init))) {
1044 err = -EFAULT;
1045 goto err;
1046 }
1047
1048 if (init.filelen > INIT_FILELEN_MAX) {
1049 err = -EINVAL;
1050 goto err;
1051 }
1052
1053 inbuf.pgid = fl->tgid;
1054 inbuf.namelen = strlen(current->comm) + 1;
1055 inbuf.filelen = init.filelen;
1056 inbuf.pageslen = 1;
1057 inbuf.attrs = init.attrs;
1058 inbuf.siglen = init.siglen;
1059 fl->pd = USER_PD;
1060
1061 if (init.filelen && init.filefd) {
1062 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1063 if (err)
1064 goto err;
1065 }
1066
1067 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1068 1024 * 1024);
1069 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1070 &imem);
1071 if (err)
1072 goto err_alloc;
1073
1074 fl->init_mem = imem;
1075 args[0].ptr = (u64)(uintptr_t)&inbuf;
1076 args[0].length = sizeof(inbuf);
1077 args[0].fd = -1;
1078
1079 args[1].ptr = (u64)(uintptr_t)current->comm;
1080 args[1].length = inbuf.namelen;
1081 args[1].fd = -1;
1082
1083 args[2].ptr = (u64) init.file;
1084 args[2].length = inbuf.filelen;
1085 args[2].fd = init.filefd;
1086
1087 pages[0].addr = imem->phys;
1088 pages[0].size = imem->size;
1089
1090 args[3].ptr = (u64)(uintptr_t) pages;
1091 args[3].length = 1 * sizeof(*pages);
1092 args[3].fd = -1;
1093
1094 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1095 args[4].length = sizeof(inbuf.attrs);
1096 args[4].fd = -1;
1097
1098 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1099 args[5].length = sizeof(inbuf.siglen);
1100 args[5].fd = -1;
1101
1102 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1103 if (init.attrs)
1104 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1105
1106 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1107 sc, args);
1108 if (err)
1109 goto err_invoke;
1110
1111 kfree(args);
1112
1113 return 0;
1114
1115err_invoke:
1116 fl->init_mem = NULL;
1117 fastrpc_buf_free(imem);
1118err_alloc:
1119 if (map) {
1120 spin_lock(&fl->lock);
1121 list_del(&map->node);
1122 spin_unlock(&fl->lock);
1123 fastrpc_map_put(map);
1124 }
1125err:
1126 kfree(args);
1127
1128 return err;
1129}
1130
1131static struct fastrpc_session_ctx *fastrpc_session_alloc(
1132 struct fastrpc_channel_ctx *cctx)
1133{
1134 struct fastrpc_session_ctx *session = NULL;
1135 unsigned long flags;
1136 int i;
1137
1138 spin_lock_irqsave(&cctx->lock, flags);
1139 for (i = 0; i < cctx->sesscount; i++) {
1140 if (!cctx->session[i].used && cctx->session[i].valid) {
1141 cctx->session[i].used = true;
1142 session = &cctx->session[i];
1143 break;
1144 }
1145 }
1146 spin_unlock_irqrestore(&cctx->lock, flags);
1147
1148 return session;
1149}
1150
1151static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1152 struct fastrpc_session_ctx *session)
1153{
1154 unsigned long flags;
1155
1156 spin_lock_irqsave(&cctx->lock, flags);
1157 session->used = false;
1158 spin_unlock_irqrestore(&cctx->lock, flags);
1159}
1160
1161static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1162{
1163 struct fastrpc_invoke_args args[1];
1164 int tgid = 0;
1165 u32 sc;
1166
1167 tgid = fl->tgid;
1168 args[0].ptr = (u64)(uintptr_t) &tgid;
1169 args[0].length = sizeof(tgid);
1170 args[0].fd = -1;
1171 args[0].reserved = 0;
1172 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1173
1174 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1175 sc, &args[0]);
1176}
1177
1178static int fastrpc_device_release(struct inode *inode, struct file *file)
1179{
1180 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1181 struct fastrpc_channel_ctx *cctx = fl->cctx;
1182 struct fastrpc_invoke_ctx *ctx, *n;
1183 struct fastrpc_map *map, *m;
1184 struct fastrpc_buf *buf, *b;
1185 unsigned long flags;
1186
1187 fastrpc_release_current_dsp_process(fl);
1188
1189 spin_lock_irqsave(&cctx->lock, flags);
1190 list_del(&fl->user);
1191 spin_unlock_irqrestore(&cctx->lock, flags);
1192
1193 if (fl->init_mem)
1194 fastrpc_buf_free(fl->init_mem);
1195
1196 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1197 list_del(&ctx->node);
1198 fastrpc_context_put(ctx);
1199 }
1200
1201 list_for_each_entry_safe(map, m, &fl->maps, node) {
1202 list_del(&map->node);
1203 fastrpc_map_put(map);
1204 }
1205
1206 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1207 list_del(&buf->node);
1208 fastrpc_buf_free(buf);
1209 }
1210
1211 fastrpc_session_free(cctx, fl->sctx);
1212 fastrpc_channel_ctx_put(cctx);
1213
1214 mutex_destroy(&fl->mutex);
1215 kfree(fl);
1216 file->private_data = NULL;
1217
1218 return 0;
1219}
1220
1221static int fastrpc_device_open(struct inode *inode, struct file *filp)
1222{
1223 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1224 struct fastrpc_user *fl = NULL;
1225 unsigned long flags;
1226
1227 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1228 if (!fl)
1229 return -ENOMEM;
1230
1231
1232 fastrpc_channel_ctx_get(cctx);
1233
1234 filp->private_data = fl;
1235 spin_lock_init(&fl->lock);
1236 mutex_init(&fl->mutex);
1237 INIT_LIST_HEAD(&fl->pending);
1238 INIT_LIST_HEAD(&fl->maps);
1239 INIT_LIST_HEAD(&fl->mmaps);
1240 INIT_LIST_HEAD(&fl->user);
1241 fl->tgid = current->tgid;
1242 fl->cctx = cctx;
1243
1244 fl->sctx = fastrpc_session_alloc(cctx);
1245 if (!fl->sctx) {
1246 dev_err(&cctx->rpdev->dev, "No session available\n");
1247 mutex_destroy(&fl->mutex);
1248 kfree(fl);
1249
1250 return -EBUSY;
1251 }
1252
1253 spin_lock_irqsave(&cctx->lock, flags);
1254 list_add_tail(&fl->user, &cctx->users);
1255 spin_unlock_irqrestore(&cctx->lock, flags);
1256
1257 return 0;
1258}
1259
1260static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1261{
1262 struct fastrpc_alloc_dma_buf bp;
1263 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1264 struct fastrpc_buf *buf = NULL;
1265 int err;
1266
1267 if (copy_from_user(&bp, argp, sizeof(bp)))
1268 return -EFAULT;
1269
1270 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1271 if (err)
1272 return err;
1273 exp_info.ops = &fastrpc_dma_buf_ops;
1274 exp_info.size = bp.size;
1275 exp_info.flags = O_RDWR;
1276 exp_info.priv = buf;
1277 buf->dmabuf = dma_buf_export(&exp_info);
1278 if (IS_ERR(buf->dmabuf)) {
1279 err = PTR_ERR(buf->dmabuf);
1280 fastrpc_buf_free(buf);
1281 return err;
1282 }
1283
1284 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1285 if (bp.fd < 0) {
1286 dma_buf_put(buf->dmabuf);
1287 return -EINVAL;
1288 }
1289
1290 if (copy_to_user(argp, &bp, sizeof(bp))) {
1291
1292
1293
1294
1295
1296
1297
1298
1299 return -EFAULT;
1300 }
1301
1302 return 0;
1303}
1304
1305static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1306{
1307 struct fastrpc_invoke_args args[1];
1308 int tgid = fl->tgid;
1309 u32 sc;
1310
1311 args[0].ptr = (u64)(uintptr_t) &tgid;
1312 args[0].length = sizeof(tgid);
1313 args[0].fd = -1;
1314 args[0].reserved = 0;
1315 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1316 fl->pd = pd;
1317
1318 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1319 sc, &args[0]);
1320}
1321
1322static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1323{
1324 struct fastrpc_invoke_args *args = NULL;
1325 struct fastrpc_invoke inv;
1326 u32 nscalars;
1327 int err;
1328
1329 if (copy_from_user(&inv, argp, sizeof(inv)))
1330 return -EFAULT;
1331
1332
1333 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1334 if (nscalars) {
1335 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1336 if (!args)
1337 return -ENOMEM;
1338
1339 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1340 nscalars * sizeof(*args))) {
1341 kfree(args);
1342 return -EFAULT;
1343 }
1344 }
1345
1346 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1347 kfree(args);
1348
1349 return err;
1350}
1351
1352static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
1353 struct fastrpc_req_munmap *req)
1354{
1355 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1356 struct fastrpc_buf *buf, *b;
1357 struct fastrpc_munmap_req_msg req_msg;
1358 struct device *dev = fl->sctx->dev;
1359 int err;
1360 u32 sc;
1361
1362 spin_lock(&fl->lock);
1363 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1364 if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
1365 break;
1366 buf = NULL;
1367 }
1368 spin_unlock(&fl->lock);
1369
1370 if (!buf) {
1371 dev_err(dev, "mmap not in list\n");
1372 return -EINVAL;
1373 }
1374
1375 req_msg.pgid = fl->tgid;
1376 req_msg.size = buf->size;
1377 req_msg.vaddr = buf->raddr;
1378
1379 args[0].ptr = (u64) (uintptr_t) &req_msg;
1380 args[0].length = sizeof(req_msg);
1381
1382 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1383 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1384 &args[0]);
1385 if (!err) {
1386 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1387 spin_lock(&fl->lock);
1388 list_del(&buf->node);
1389 spin_unlock(&fl->lock);
1390 fastrpc_buf_free(buf);
1391 } else {
1392 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1393 }
1394
1395 return err;
1396}
1397
1398static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1399{
1400 struct fastrpc_req_munmap req;
1401
1402 if (copy_from_user(&req, argp, sizeof(req)))
1403 return -EFAULT;
1404
1405 return fastrpc_req_munmap_impl(fl, &req);
1406}
1407
1408static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1409{
1410 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1411 struct fastrpc_buf *buf = NULL;
1412 struct fastrpc_mmap_req_msg req_msg;
1413 struct fastrpc_mmap_rsp_msg rsp_msg;
1414 struct fastrpc_req_munmap req_unmap;
1415 struct fastrpc_phy_page pages;
1416 struct fastrpc_req_mmap req;
1417 struct device *dev = fl->sctx->dev;
1418 int err;
1419 u32 sc;
1420
1421 if (copy_from_user(&req, argp, sizeof(req)))
1422 return -EFAULT;
1423
1424 if (req.flags != ADSP_MMAP_ADD_PAGES) {
1425 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1426 return -EINVAL;
1427 }
1428
1429 if (req.vaddrin) {
1430 dev_err(dev, "adding user allocated pages is not supported\n");
1431 return -EINVAL;
1432 }
1433
1434 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1435 if (err) {
1436 dev_err(dev, "failed to allocate buffer\n");
1437 return err;
1438 }
1439
1440 req_msg.pgid = fl->tgid;
1441 req_msg.flags = req.flags;
1442 req_msg.vaddr = req.vaddrin;
1443 req_msg.num = sizeof(pages);
1444
1445 args[0].ptr = (u64) (uintptr_t) &req_msg;
1446 args[0].length = sizeof(req_msg);
1447
1448 pages.addr = buf->phys;
1449 pages.size = buf->size;
1450
1451 args[1].ptr = (u64) (uintptr_t) &pages;
1452 args[1].length = sizeof(pages);
1453
1454 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1455 args[2].length = sizeof(rsp_msg);
1456
1457 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1458 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1459 &args[0]);
1460 if (err) {
1461 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1462 goto err_invoke;
1463 }
1464
1465
1466 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1467
1468
1469 req.vaddrout = rsp_msg.vaddr;
1470
1471 spin_lock(&fl->lock);
1472 list_add_tail(&buf->node, &fl->mmaps);
1473 spin_unlock(&fl->lock);
1474
1475 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1476
1477 req_unmap.vaddrout = buf->raddr;
1478 req_unmap.size = buf->size;
1479 fastrpc_req_munmap_impl(fl, &req_unmap);
1480 return -EFAULT;
1481 }
1482
1483 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1484 buf->raddr, buf->size);
1485
1486 return 0;
1487
1488err_invoke:
1489 fastrpc_buf_free(buf);
1490
1491 return err;
1492}
1493
1494static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1495 unsigned long arg)
1496{
1497 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1498 char __user *argp = (char __user *)arg;
1499 int err;
1500
1501 switch (cmd) {
1502 case FASTRPC_IOCTL_INVOKE:
1503 err = fastrpc_invoke(fl, argp);
1504 break;
1505 case FASTRPC_IOCTL_INIT_ATTACH:
1506 err = fastrpc_init_attach(fl, AUDIO_PD);
1507 break;
1508 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1509 err = fastrpc_init_attach(fl, SENSORS_PD);
1510 break;
1511 case FASTRPC_IOCTL_INIT_CREATE:
1512 err = fastrpc_init_create_process(fl, argp);
1513 break;
1514 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1515 err = fastrpc_dmabuf_alloc(fl, argp);
1516 break;
1517 case FASTRPC_IOCTL_MMAP:
1518 err = fastrpc_req_mmap(fl, argp);
1519 break;
1520 case FASTRPC_IOCTL_MUNMAP:
1521 err = fastrpc_req_munmap(fl, argp);
1522 break;
1523 default:
1524 err = -ENOTTY;
1525 break;
1526 }
1527
1528 return err;
1529}
1530
1531static const struct file_operations fastrpc_fops = {
1532 .open = fastrpc_device_open,
1533 .release = fastrpc_device_release,
1534 .unlocked_ioctl = fastrpc_device_ioctl,
1535 .compat_ioctl = fastrpc_device_ioctl,
1536};
1537
1538static int fastrpc_cb_probe(struct platform_device *pdev)
1539{
1540 struct fastrpc_channel_ctx *cctx;
1541 struct fastrpc_session_ctx *sess;
1542 struct device *dev = &pdev->dev;
1543 int i, sessions = 0;
1544 unsigned long flags;
1545 int rc;
1546
1547 cctx = dev_get_drvdata(dev->parent);
1548 if (!cctx)
1549 return -EINVAL;
1550
1551 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1552
1553 spin_lock_irqsave(&cctx->lock, flags);
1554 sess = &cctx->session[cctx->sesscount];
1555 sess->used = false;
1556 sess->valid = true;
1557 sess->dev = dev;
1558 dev_set_drvdata(dev, sess);
1559
1560 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1561 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1562
1563 if (sessions > 0) {
1564 struct fastrpc_session_ctx *dup_sess;
1565
1566 for (i = 1; i < sessions; i++) {
1567 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1568 break;
1569 dup_sess = &cctx->session[cctx->sesscount];
1570 memcpy(dup_sess, sess, sizeof(*dup_sess));
1571 }
1572 }
1573 cctx->sesscount++;
1574 spin_unlock_irqrestore(&cctx->lock, flags);
1575 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1576 if (rc) {
1577 dev_err(dev, "32-bit DMA enable failed\n");
1578 return rc;
1579 }
1580
1581 return 0;
1582}
1583
1584static int fastrpc_cb_remove(struct platform_device *pdev)
1585{
1586 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1587 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1588 unsigned long flags;
1589 int i;
1590
1591 spin_lock_irqsave(&cctx->lock, flags);
1592 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1593 if (cctx->session[i].sid == sess->sid) {
1594 cctx->session[i].valid = false;
1595 cctx->sesscount--;
1596 }
1597 }
1598 spin_unlock_irqrestore(&cctx->lock, flags);
1599
1600 return 0;
1601}
1602
1603static const struct of_device_id fastrpc_match_table[] = {
1604 { .compatible = "qcom,fastrpc-compute-cb", },
1605 {}
1606};
1607
1608static struct platform_driver fastrpc_cb_driver = {
1609 .probe = fastrpc_cb_probe,
1610 .remove = fastrpc_cb_remove,
1611 .driver = {
1612 .name = "qcom,fastrpc-cb",
1613 .of_match_table = fastrpc_match_table,
1614 .suppress_bind_attrs = true,
1615 },
1616};
1617
1618static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1619{
1620 struct device *rdev = &rpdev->dev;
1621 struct fastrpc_channel_ctx *data;
1622 int i, err, domain_id = -1;
1623 const char *domain;
1624
1625 err = of_property_read_string(rdev->of_node, "label", &domain);
1626 if (err) {
1627 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1628 return err;
1629 }
1630
1631 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1632 if (!strcmp(domains[i], domain)) {
1633 domain_id = i;
1634 break;
1635 }
1636 }
1637
1638 if (domain_id < 0) {
1639 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1640 return -EINVAL;
1641 }
1642
1643 data = kzalloc(sizeof(*data), GFP_KERNEL);
1644 if (!data)
1645 return -ENOMEM;
1646
1647 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1648 data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
1649 domains[domain_id]);
1650 data->miscdev.fops = &fastrpc_fops;
1651 err = misc_register(&data->miscdev);
1652 if (err) {
1653 kfree(data);
1654 return err;
1655 }
1656
1657 kref_init(&data->refcount);
1658
1659 dev_set_drvdata(&rpdev->dev, data);
1660 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1661 INIT_LIST_HEAD(&data->users);
1662 spin_lock_init(&data->lock);
1663 idr_init(&data->ctx_idr);
1664 data->domain_id = domain_id;
1665 data->rpdev = rpdev;
1666
1667 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1668}
1669
1670static void fastrpc_notify_users(struct fastrpc_user *user)
1671{
1672 struct fastrpc_invoke_ctx *ctx;
1673
1674 spin_lock(&user->lock);
1675 list_for_each_entry(ctx, &user->pending, node)
1676 complete(&ctx->work);
1677 spin_unlock(&user->lock);
1678}
1679
1680static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1681{
1682 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1683 struct fastrpc_user *user;
1684 unsigned long flags;
1685
1686 spin_lock_irqsave(&cctx->lock, flags);
1687 list_for_each_entry(user, &cctx->users, user)
1688 fastrpc_notify_users(user);
1689 spin_unlock_irqrestore(&cctx->lock, flags);
1690
1691 misc_deregister(&cctx->miscdev);
1692 of_platform_depopulate(&rpdev->dev);
1693
1694 cctx->rpdev = NULL;
1695 fastrpc_channel_ctx_put(cctx);
1696}
1697
1698static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1699 int len, void *priv, u32 addr)
1700{
1701 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1702 struct fastrpc_invoke_rsp *rsp = data;
1703 struct fastrpc_invoke_ctx *ctx;
1704 unsigned long flags;
1705 unsigned long ctxid;
1706
1707 if (len < sizeof(*rsp))
1708 return -EINVAL;
1709
1710 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1711
1712 spin_lock_irqsave(&cctx->lock, flags);
1713 ctx = idr_find(&cctx->ctx_idr, ctxid);
1714 spin_unlock_irqrestore(&cctx->lock, flags);
1715
1716 if (!ctx) {
1717 dev_err(&rpdev->dev, "No context ID matches response\n");
1718 return -ENOENT;
1719 }
1720
1721 ctx->retval = rsp->retval;
1722 complete(&ctx->work);
1723
1724
1725
1726
1727
1728
1729 schedule_work(&ctx->put_work);
1730
1731 return 0;
1732}
1733
1734static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1735 { .compatible = "qcom,fastrpc" },
1736 { },
1737};
1738MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1739
1740static struct rpmsg_driver fastrpc_driver = {
1741 .probe = fastrpc_rpmsg_probe,
1742 .remove = fastrpc_rpmsg_remove,
1743 .callback = fastrpc_rpmsg_callback,
1744 .drv = {
1745 .name = "qcom,fastrpc",
1746 .of_match_table = fastrpc_rpmsg_of_match,
1747 },
1748};
1749
1750static int fastrpc_init(void)
1751{
1752 int ret;
1753
1754 ret = platform_driver_register(&fastrpc_cb_driver);
1755 if (ret < 0) {
1756 pr_err("fastrpc: failed to register cb driver\n");
1757 return ret;
1758 }
1759
1760 ret = register_rpmsg_driver(&fastrpc_driver);
1761 if (ret < 0) {
1762 pr_err("fastrpc: failed to register rpmsg driver\n");
1763 platform_driver_unregister(&fastrpc_cb_driver);
1764 return ret;
1765 }
1766
1767 return 0;
1768}
1769module_init(fastrpc_init);
1770
1771static void fastrpc_exit(void)
1772{
1773 platform_driver_unregister(&fastrpc_cb_driver);
1774 unregister_rpmsg_driver(&fastrpc_driver);
1775}
1776module_exit(fastrpc_exit);
1777
1778MODULE_LICENSE("GPL v2");
1779MODULE_IMPORT_NS(DMA_BUF);
1780