1
2
3
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/sort.h>
16#include <linux/of_platform.h>
17#include <linux/rpmsg.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <uapi/misc/fastrpc.h>
21
22#define ADSP_DOMAIN_ID (0)
23#define MDSP_DOMAIN_ID (1)
24#define SDSP_DOMAIN_ID (2)
25#define CDSP_DOMAIN_ID (3)
26#define FASTRPC_DEV_MAX 4
27#define FASTRPC_MAX_SESSIONS 9
28#define FASTRPC_ALIGN 128
29#define FASTRPC_MAX_FDLIST 16
30#define FASTRPC_MAX_CRCLIST 64
31#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32#define FASTRPC_CTX_MAX (256)
33#define FASTRPC_INIT_HANDLE 1
34#define FASTRPC_CTXID_MASK (0xFF0)
35#define INIT_FILELEN_MAX (2 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc"
37#define ADSP_MMAP_ADD_PAGES 0x1000
38
39
40#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
41
42
43#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
44
45
46#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
47
48
49#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
50
51#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
52 REMOTE_SCALARS_OUTBUFS(sc) + \
53 REMOTE_SCALARS_INHANDLES(sc)+ \
54 REMOTE_SCALARS_OUTHANDLES(sc))
55#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
56 (((attr & 0x07) << 29) | \
57 ((method & 0x1f) << 24) | \
58 ((in & 0xff) << 16) | \
59 ((out & 0xff) << 8) | \
60 ((oin & 0x0f) << 4) | \
61 (oout & 0x0f))
62
63#define FASTRPC_SCALARS(method, in, out) \
64 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
65
66#define FASTRPC_CREATE_PROCESS_NARGS 6
67
68#define FASTRPC_RMID_INIT_ATTACH 0
69#define FASTRPC_RMID_INIT_RELEASE 1
70#define FASTRPC_RMID_INIT_MMAP 4
71#define FASTRPC_RMID_INIT_MUNMAP 5
72#define FASTRPC_RMID_INIT_CREATE 6
73#define FASTRPC_RMID_INIT_CREATE_ATTR 7
74#define FASTRPC_RMID_INIT_CREATE_STATIC 8
75
76
77#define AUDIO_PD (0)
78#define USER_PD (1)
79#define SENSORS_PD (2)
80
81#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
82
83static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
84 "sdsp", "cdsp"};
85struct fastrpc_phy_page {
86 u64 addr;
87 u64 size;
88};
89
90struct fastrpc_invoke_buf {
91 u32 num;
92 u32 pgidx;
93};
94
95struct fastrpc_remote_arg {
96 u64 pv;
97 u64 len;
98};
99
100struct fastrpc_mmap_rsp_msg {
101 u64 vaddr;
102};
103
104struct fastrpc_mmap_req_msg {
105 s32 pgid;
106 u32 flags;
107 u64 vaddr;
108 s32 num;
109};
110
111struct fastrpc_munmap_req_msg {
112 s32 pgid;
113 u64 vaddr;
114 u64 size;
115};
116
117struct fastrpc_msg {
118 int pid;
119 int tid;
120 u64 ctx;
121 u32 handle;
122 u32 sc;
123 u64 addr;
124 u64 size;
125};
126
127struct fastrpc_invoke_rsp {
128 u64 ctx;
129 int retval;
130};
131
132struct fastrpc_buf_overlap {
133 u64 start;
134 u64 end;
135 int raix;
136 u64 mstart;
137 u64 mend;
138 u64 offset;
139};
140
141struct fastrpc_buf {
142 struct fastrpc_user *fl;
143 struct dma_buf *dmabuf;
144 struct device *dev;
145 void *virt;
146 u64 phys;
147 u64 size;
148
149 struct mutex lock;
150 struct list_head attachments;
151
152 struct list_head node;
153 uintptr_t raddr;
154};
155
156struct fastrpc_dma_buf_attachment {
157 struct device *dev;
158 struct sg_table sgt;
159 struct list_head node;
160};
161
162struct fastrpc_map {
163 struct list_head node;
164 struct fastrpc_user *fl;
165 int fd;
166 struct dma_buf *buf;
167 struct sg_table *table;
168 struct dma_buf_attachment *attach;
169 u64 phys;
170 u64 size;
171 void *va;
172 u64 len;
173 struct kref refcount;
174};
175
176struct fastrpc_invoke_ctx {
177 int nscalars;
178 int nbufs;
179 int retval;
180 int pid;
181 int tgid;
182 u32 sc;
183 u32 *crc;
184 u64 ctxid;
185 u64 msg_sz;
186 struct kref refcount;
187 struct list_head node;
188 struct completion work;
189 struct work_struct put_work;
190 struct fastrpc_msg msg;
191 struct fastrpc_user *fl;
192 struct fastrpc_remote_arg *rpra;
193 struct fastrpc_map **maps;
194 struct fastrpc_buf *buf;
195 struct fastrpc_invoke_args *args;
196 struct fastrpc_buf_overlap *olaps;
197 struct fastrpc_channel_ctx *cctx;
198};
199
200struct fastrpc_session_ctx {
201 struct device *dev;
202 int sid;
203 bool used;
204 bool valid;
205};
206
207struct fastrpc_channel_ctx {
208 int domain_id;
209 int sesscount;
210 struct rpmsg_device *rpdev;
211 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
212 spinlock_t lock;
213 struct idr ctx_idr;
214 struct list_head users;
215 struct miscdevice miscdev;
216 struct kref refcount;
217};
218
219struct fastrpc_user {
220 struct list_head user;
221 struct list_head maps;
222 struct list_head pending;
223 struct list_head mmaps;
224
225 struct fastrpc_channel_ctx *cctx;
226 struct fastrpc_session_ctx *sctx;
227 struct fastrpc_buf *init_mem;
228
229 int tgid;
230 int pd;
231
232 spinlock_t lock;
233
234 struct mutex mutex;
235};
236
237static void fastrpc_free_map(struct kref *ref)
238{
239 struct fastrpc_map *map;
240
241 map = container_of(ref, struct fastrpc_map, refcount);
242
243 if (map->table) {
244 dma_buf_unmap_attachment(map->attach, map->table,
245 DMA_BIDIRECTIONAL);
246 dma_buf_detach(map->buf, map->attach);
247 dma_buf_put(map->buf);
248 }
249
250 kfree(map);
251}
252
253static void fastrpc_map_put(struct fastrpc_map *map)
254{
255 if (map)
256 kref_put(&map->refcount, fastrpc_free_map);
257}
258
259static void fastrpc_map_get(struct fastrpc_map *map)
260{
261 if (map)
262 kref_get(&map->refcount);
263}
264
265static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
266 struct fastrpc_map **ppmap)
267{
268 struct fastrpc_map *map = NULL;
269
270 mutex_lock(&fl->mutex);
271 list_for_each_entry(map, &fl->maps, node) {
272 if (map->fd == fd) {
273 fastrpc_map_get(map);
274 *ppmap = map;
275 mutex_unlock(&fl->mutex);
276 return 0;
277 }
278 }
279 mutex_unlock(&fl->mutex);
280
281 return -ENOENT;
282}
283
284static void fastrpc_buf_free(struct fastrpc_buf *buf)
285{
286 dma_free_coherent(buf->dev, buf->size, buf->virt,
287 FASTRPC_PHYS(buf->phys));
288 kfree(buf);
289}
290
291static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
292 u64 size, struct fastrpc_buf **obuf)
293{
294 struct fastrpc_buf *buf;
295
296 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
297 if (!buf)
298 return -ENOMEM;
299
300 INIT_LIST_HEAD(&buf->attachments);
301 INIT_LIST_HEAD(&buf->node);
302 mutex_init(&buf->lock);
303
304 buf->fl = fl;
305 buf->virt = NULL;
306 buf->phys = 0;
307 buf->size = size;
308 buf->dev = dev;
309 buf->raddr = 0;
310
311 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
312 GFP_KERNEL);
313 if (!buf->virt) {
314 mutex_destroy(&buf->lock);
315 kfree(buf);
316 return -ENOMEM;
317 }
318
319 if (fl->sctx && fl->sctx->sid)
320 buf->phys += ((u64)fl->sctx->sid << 32);
321
322 *obuf = buf;
323
324 return 0;
325}
326
327static void fastrpc_channel_ctx_free(struct kref *ref)
328{
329 struct fastrpc_channel_ctx *cctx;
330
331 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
332
333 kfree(cctx);
334}
335
336static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
337{
338 kref_get(&cctx->refcount);
339}
340
341static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
342{
343 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
344}
345
346static void fastrpc_context_free(struct kref *ref)
347{
348 struct fastrpc_invoke_ctx *ctx;
349 struct fastrpc_channel_ctx *cctx;
350 unsigned long flags;
351 int i;
352
353 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
354 cctx = ctx->cctx;
355
356 for (i = 0; i < ctx->nscalars; i++)
357 fastrpc_map_put(ctx->maps[i]);
358
359 if (ctx->buf)
360 fastrpc_buf_free(ctx->buf);
361
362 spin_lock_irqsave(&cctx->lock, flags);
363 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
364 spin_unlock_irqrestore(&cctx->lock, flags);
365
366 kfree(ctx->maps);
367 kfree(ctx->olaps);
368 kfree(ctx);
369
370 fastrpc_channel_ctx_put(cctx);
371}
372
373static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
374{
375 kref_get(&ctx->refcount);
376}
377
378static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
379{
380 kref_put(&ctx->refcount, fastrpc_context_free);
381}
382
383static void fastrpc_context_put_wq(struct work_struct *work)
384{
385 struct fastrpc_invoke_ctx *ctx =
386 container_of(work, struct fastrpc_invoke_ctx, put_work);
387
388 fastrpc_context_put(ctx);
389}
390
391#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
392static int olaps_cmp(const void *a, const void *b)
393{
394 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
395 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
396
397 int st = CMP(pa->start, pb->start);
398
399 int ed = CMP(pb->end, pa->end);
400
401 return st == 0 ? ed : st;
402}
403
404static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
405{
406 u64 max_end = 0;
407 int i;
408
409 for (i = 0; i < ctx->nbufs; ++i) {
410 ctx->olaps[i].start = ctx->args[i].ptr;
411 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
412 ctx->olaps[i].raix = i;
413 }
414
415 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
416
417 for (i = 0; i < ctx->nbufs; ++i) {
418
419 if (ctx->olaps[i].start < max_end) {
420 ctx->olaps[i].mstart = max_end;
421 ctx->olaps[i].mend = ctx->olaps[i].end;
422 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
423
424 if (ctx->olaps[i].end > max_end) {
425 max_end = ctx->olaps[i].end;
426 } else {
427 ctx->olaps[i].mend = 0;
428 ctx->olaps[i].mstart = 0;
429 }
430
431 } else {
432 ctx->olaps[i].mend = ctx->olaps[i].end;
433 ctx->olaps[i].mstart = ctx->olaps[i].start;
434 ctx->olaps[i].offset = 0;
435 max_end = ctx->olaps[i].end;
436 }
437 }
438}
439
440static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
441 struct fastrpc_user *user, u32 kernel, u32 sc,
442 struct fastrpc_invoke_args *args)
443{
444 struct fastrpc_channel_ctx *cctx = user->cctx;
445 struct fastrpc_invoke_ctx *ctx = NULL;
446 unsigned long flags;
447 int ret;
448
449 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
450 if (!ctx)
451 return ERR_PTR(-ENOMEM);
452
453 INIT_LIST_HEAD(&ctx->node);
454 ctx->fl = user;
455 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
456 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
457 REMOTE_SCALARS_OUTBUFS(sc);
458
459 if (ctx->nscalars) {
460 ctx->maps = kcalloc(ctx->nscalars,
461 sizeof(*ctx->maps), GFP_KERNEL);
462 if (!ctx->maps) {
463 kfree(ctx);
464 return ERR_PTR(-ENOMEM);
465 }
466 ctx->olaps = kcalloc(ctx->nscalars,
467 sizeof(*ctx->olaps), GFP_KERNEL);
468 if (!ctx->olaps) {
469 kfree(ctx->maps);
470 kfree(ctx);
471 return ERR_PTR(-ENOMEM);
472 }
473 ctx->args = args;
474 fastrpc_get_buff_overlaps(ctx);
475 }
476
477
478 fastrpc_channel_ctx_get(cctx);
479
480 ctx->sc = sc;
481 ctx->retval = -1;
482 ctx->pid = current->pid;
483 ctx->tgid = user->tgid;
484 ctx->cctx = cctx;
485 init_completion(&ctx->work);
486 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
487
488 spin_lock(&user->lock);
489 list_add_tail(&ctx->node, &user->pending);
490 spin_unlock(&user->lock);
491
492 spin_lock_irqsave(&cctx->lock, flags);
493 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
494 FASTRPC_CTX_MAX, GFP_ATOMIC);
495 if (ret < 0) {
496 spin_unlock_irqrestore(&cctx->lock, flags);
497 goto err_idr;
498 }
499 ctx->ctxid = ret << 4;
500 spin_unlock_irqrestore(&cctx->lock, flags);
501
502 kref_init(&ctx->refcount);
503
504 return ctx;
505err_idr:
506 spin_lock(&user->lock);
507 list_del(&ctx->node);
508 spin_unlock(&user->lock);
509 fastrpc_channel_ctx_put(cctx);
510 kfree(ctx->maps);
511 kfree(ctx->olaps);
512 kfree(ctx);
513
514 return ERR_PTR(ret);
515}
516
517static struct sg_table *
518fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
519 enum dma_data_direction dir)
520{
521 struct fastrpc_dma_buf_attachment *a = attachment->priv;
522 struct sg_table *table;
523 int ret;
524
525 table = &a->sgt;
526
527 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
528 if (ret)
529 table = ERR_PTR(ret);
530 return table;
531}
532
533static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
534 struct sg_table *table,
535 enum dma_data_direction dir)
536{
537 dma_unmap_sgtable(attach->dev, table, dir, 0);
538}
539
540static void fastrpc_release(struct dma_buf *dmabuf)
541{
542 struct fastrpc_buf *buffer = dmabuf->priv;
543
544 fastrpc_buf_free(buffer);
545}
546
547static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
548 struct dma_buf_attachment *attachment)
549{
550 struct fastrpc_dma_buf_attachment *a;
551 struct fastrpc_buf *buffer = dmabuf->priv;
552 int ret;
553
554 a = kzalloc(sizeof(*a), GFP_KERNEL);
555 if (!a)
556 return -ENOMEM;
557
558 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
559 FASTRPC_PHYS(buffer->phys), buffer->size);
560 if (ret < 0) {
561 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
562 kfree(a);
563 return -EINVAL;
564 }
565
566 a->dev = attachment->dev;
567 INIT_LIST_HEAD(&a->node);
568 attachment->priv = a;
569
570 mutex_lock(&buffer->lock);
571 list_add(&a->node, &buffer->attachments);
572 mutex_unlock(&buffer->lock);
573
574 return 0;
575}
576
577static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
578 struct dma_buf_attachment *attachment)
579{
580 struct fastrpc_dma_buf_attachment *a = attachment->priv;
581 struct fastrpc_buf *buffer = dmabuf->priv;
582
583 mutex_lock(&buffer->lock);
584 list_del(&a->node);
585 mutex_unlock(&buffer->lock);
586 sg_free_table(&a->sgt);
587 kfree(a);
588}
589
590static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
591{
592 struct fastrpc_buf *buf = dmabuf->priv;
593
594 dma_buf_map_set_vaddr(map, buf->virt);
595
596 return 0;
597}
598
599static int fastrpc_mmap(struct dma_buf *dmabuf,
600 struct vm_area_struct *vma)
601{
602 struct fastrpc_buf *buf = dmabuf->priv;
603 size_t size = vma->vm_end - vma->vm_start;
604
605 return dma_mmap_coherent(buf->dev, vma, buf->virt,
606 FASTRPC_PHYS(buf->phys), size);
607}
608
609static const struct dma_buf_ops fastrpc_dma_buf_ops = {
610 .attach = fastrpc_dma_buf_attach,
611 .detach = fastrpc_dma_buf_detatch,
612 .map_dma_buf = fastrpc_map_dma_buf,
613 .unmap_dma_buf = fastrpc_unmap_dma_buf,
614 .mmap = fastrpc_mmap,
615 .vmap = fastrpc_vmap,
616 .release = fastrpc_release,
617};
618
619static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
620 u64 len, struct fastrpc_map **ppmap)
621{
622 struct fastrpc_session_ctx *sess = fl->sctx;
623 struct fastrpc_map *map = NULL;
624 int err = 0;
625
626 if (!fastrpc_map_find(fl, fd, ppmap))
627 return 0;
628
629 map = kzalloc(sizeof(*map), GFP_KERNEL);
630 if (!map)
631 return -ENOMEM;
632
633 INIT_LIST_HEAD(&map->node);
634 map->fl = fl;
635 map->fd = fd;
636 map->buf = dma_buf_get(fd);
637 if (IS_ERR(map->buf)) {
638 err = PTR_ERR(map->buf);
639 goto get_err;
640 }
641
642 map->attach = dma_buf_attach(map->buf, sess->dev);
643 if (IS_ERR(map->attach)) {
644 dev_err(sess->dev, "Failed to attach dmabuf\n");
645 err = PTR_ERR(map->attach);
646 goto attach_err;
647 }
648
649 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
650 if (IS_ERR(map->table)) {
651 err = PTR_ERR(map->table);
652 goto map_err;
653 }
654
655 map->phys = sg_dma_address(map->table->sgl);
656 map->phys += ((u64)fl->sctx->sid << 32);
657 map->size = len;
658 map->va = sg_virt(map->table->sgl);
659 map->len = len;
660 kref_init(&map->refcount);
661
662 spin_lock(&fl->lock);
663 list_add_tail(&map->node, &fl->maps);
664 spin_unlock(&fl->lock);
665 *ppmap = map;
666
667 return 0;
668
669map_err:
670 dma_buf_detach(map->buf, map->attach);
671attach_err:
672 dma_buf_put(map->buf);
673get_err:
674 kfree(map);
675
676 return err;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
707{
708 int size = 0;
709
710 size = (sizeof(struct fastrpc_remote_arg) +
711 sizeof(struct fastrpc_invoke_buf) +
712 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
713 sizeof(u64) * FASTRPC_MAX_FDLIST +
714 sizeof(u32) * FASTRPC_MAX_CRCLIST;
715
716 return size;
717}
718
719static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
720{
721 u64 size = 0;
722 int i;
723
724 size = ALIGN(metalen, FASTRPC_ALIGN);
725 for (i = 0; i < ctx->nscalars; i++) {
726 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
727
728 if (ctx->olaps[i].offset == 0)
729 size = ALIGN(size, FASTRPC_ALIGN);
730
731 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
732 }
733 }
734
735 return size;
736}
737
738static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
739{
740 struct device *dev = ctx->fl->sctx->dev;
741 int i, err;
742
743 for (i = 0; i < ctx->nscalars; ++i) {
744
745 if (ctx->args[i].reserved)
746 return -EINVAL;
747
748 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
749 ctx->args[i].length == 0)
750 continue;
751
752 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
753 ctx->args[i].length, &ctx->maps[i]);
754 if (err) {
755 dev_err(dev, "Error Creating map %d\n", err);
756 return -EINVAL;
757 }
758
759 }
760 return 0;
761}
762
763static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
764{
765 struct device *dev = ctx->fl->sctx->dev;
766 struct fastrpc_remote_arg *rpra;
767 struct fastrpc_invoke_buf *list;
768 struct fastrpc_phy_page *pages;
769 int inbufs, i, oix, err = 0;
770 u64 len, rlen, pkt_size;
771 u64 pg_start, pg_end;
772 uintptr_t args;
773 int metalen;
774
775 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
776 metalen = fastrpc_get_meta_size(ctx);
777 pkt_size = fastrpc_get_payload_size(ctx, metalen);
778
779 err = fastrpc_create_maps(ctx);
780 if (err)
781 return err;
782
783 ctx->msg_sz = pkt_size;
784
785 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
786 if (err)
787 return err;
788
789 rpra = ctx->buf->virt;
790 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
791 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
792 sizeof(*rpra));
793 args = (uintptr_t)ctx->buf->virt + metalen;
794 rlen = pkt_size - metalen;
795 ctx->rpra = rpra;
796
797 for (oix = 0; oix < ctx->nbufs; ++oix) {
798 int mlen;
799
800 i = ctx->olaps[oix].raix;
801 len = ctx->args[i].length;
802
803 rpra[i].pv = 0;
804 rpra[i].len = len;
805 list[i].num = len ? 1 : 0;
806 list[i].pgidx = i;
807
808 if (!len)
809 continue;
810
811 if (ctx->maps[i]) {
812 struct vm_area_struct *vma = NULL;
813
814 rpra[i].pv = (u64) ctx->args[i].ptr;
815 pages[i].addr = ctx->maps[i]->phys;
816
817 vma = find_vma(current->mm, ctx->args[i].ptr);
818 if (vma)
819 pages[i].addr += ctx->args[i].ptr -
820 vma->vm_start;
821
822 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
823 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
824 PAGE_SHIFT;
825 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
826
827 } else {
828
829 if (ctx->olaps[oix].offset == 0) {
830 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
831 args = ALIGN(args, FASTRPC_ALIGN);
832 }
833
834 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
835
836 if (rlen < mlen)
837 goto bail;
838
839 rpra[i].pv = args - ctx->olaps[oix].offset;
840 pages[i].addr = ctx->buf->phys -
841 ctx->olaps[oix].offset +
842 (pkt_size - rlen);
843 pages[i].addr = pages[i].addr & PAGE_MASK;
844
845 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
846 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
847 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
848 args = args + mlen;
849 rlen -= mlen;
850 }
851
852 if (i < inbufs && !ctx->maps[i]) {
853 void *dst = (void *)(uintptr_t)rpra[i].pv;
854 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
855
856 if (!kernel) {
857 if (copy_from_user(dst, (void __user *)src,
858 len)) {
859 err = -EFAULT;
860 goto bail;
861 }
862 } else {
863 memcpy(dst, src, len);
864 }
865 }
866 }
867
868 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
869 rpra[i].pv = (u64) ctx->args[i].ptr;
870 rpra[i].len = ctx->args[i].length;
871 list[i].num = ctx->args[i].length ? 1 : 0;
872 list[i].pgidx = i;
873 pages[i].addr = ctx->maps[i]->phys;
874 pages[i].size = ctx->maps[i]->size;
875 }
876
877bail:
878 if (err)
879 dev_err(dev, "Error: get invoke args failed:%d\n", err);
880
881 return err;
882}
883
884static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
885 u32 kernel)
886{
887 struct fastrpc_remote_arg *rpra = ctx->rpra;
888 int i, inbufs;
889
890 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
891
892 for (i = inbufs; i < ctx->nbufs; ++i) {
893 void *src = (void *)(uintptr_t)rpra[i].pv;
894 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
895 u64 len = rpra[i].len;
896
897 if (!kernel) {
898 if (copy_to_user((void __user *)dst, src, len))
899 return -EFAULT;
900 } else {
901 memcpy(dst, src, len);
902 }
903 }
904
905 return 0;
906}
907
908static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
909 struct fastrpc_invoke_ctx *ctx,
910 u32 kernel, uint32_t handle)
911{
912 struct fastrpc_channel_ctx *cctx;
913 struct fastrpc_user *fl = ctx->fl;
914 struct fastrpc_msg *msg = &ctx->msg;
915 int ret;
916
917 cctx = fl->cctx;
918 msg->pid = fl->tgid;
919 msg->tid = current->pid;
920
921 if (kernel)
922 msg->pid = 0;
923
924 msg->ctx = ctx->ctxid | fl->pd;
925 msg->handle = handle;
926 msg->sc = ctx->sc;
927 msg->addr = ctx->buf ? ctx->buf->phys : 0;
928 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
929 fastrpc_context_get(ctx);
930
931 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
932
933 if (ret)
934 fastrpc_context_put(ctx);
935
936 return ret;
937
938}
939
940static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
941 u32 handle, u32 sc,
942 struct fastrpc_invoke_args *args)
943{
944 struct fastrpc_invoke_ctx *ctx = NULL;
945 int err = 0;
946
947 if (!fl->sctx)
948 return -EINVAL;
949
950 if (!fl->cctx->rpdev)
951 return -EPIPE;
952
953 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
954 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
955 return -EPERM;
956 }
957
958 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
959 if (IS_ERR(ctx))
960 return PTR_ERR(ctx);
961
962 if (ctx->nscalars) {
963 err = fastrpc_get_args(kernel, ctx);
964 if (err)
965 goto bail;
966 }
967
968
969 dma_wmb();
970
971 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
972 if (err)
973 goto bail;
974
975 if (kernel) {
976 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
977 err = -ETIMEDOUT;
978 } else {
979 err = wait_for_completion_interruptible(&ctx->work);
980 }
981
982 if (err)
983 goto bail;
984
985
986 err = ctx->retval;
987 if (err)
988 goto bail;
989
990 if (ctx->nscalars) {
991
992 dma_rmb();
993
994 err = fastrpc_put_args(ctx, kernel);
995 if (err)
996 goto bail;
997 }
998
999bail:
1000 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1001
1002 spin_lock(&fl->lock);
1003 list_del(&ctx->node);
1004 spin_unlock(&fl->lock);
1005 fastrpc_context_put(ctx);
1006 }
1007 if (err)
1008 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1009
1010 return err;
1011}
1012
1013static int fastrpc_init_create_process(struct fastrpc_user *fl,
1014 char __user *argp)
1015{
1016 struct fastrpc_init_create init;
1017 struct fastrpc_invoke_args *args;
1018 struct fastrpc_phy_page pages[1];
1019 struct fastrpc_map *map = NULL;
1020 struct fastrpc_buf *imem = NULL;
1021 int memlen;
1022 int err;
1023 struct {
1024 int pgid;
1025 u32 namelen;
1026 u32 filelen;
1027 u32 pageslen;
1028 u32 attrs;
1029 u32 siglen;
1030 } inbuf;
1031 u32 sc;
1032
1033 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1034 if (!args)
1035 return -ENOMEM;
1036
1037 if (copy_from_user(&init, argp, sizeof(init))) {
1038 err = -EFAULT;
1039 goto err;
1040 }
1041
1042 if (init.filelen > INIT_FILELEN_MAX) {
1043 err = -EINVAL;
1044 goto err;
1045 }
1046
1047 inbuf.pgid = fl->tgid;
1048 inbuf.namelen = strlen(current->comm) + 1;
1049 inbuf.filelen = init.filelen;
1050 inbuf.pageslen = 1;
1051 inbuf.attrs = init.attrs;
1052 inbuf.siglen = init.siglen;
1053 fl->pd = USER_PD;
1054
1055 if (init.filelen && init.filefd) {
1056 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1057 if (err)
1058 goto err;
1059 }
1060
1061 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1062 1024 * 1024);
1063 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1064 &imem);
1065 if (err)
1066 goto err_alloc;
1067
1068 fl->init_mem = imem;
1069 args[0].ptr = (u64)(uintptr_t)&inbuf;
1070 args[0].length = sizeof(inbuf);
1071 args[0].fd = -1;
1072
1073 args[1].ptr = (u64)(uintptr_t)current->comm;
1074 args[1].length = inbuf.namelen;
1075 args[1].fd = -1;
1076
1077 args[2].ptr = (u64) init.file;
1078 args[2].length = inbuf.filelen;
1079 args[2].fd = init.filefd;
1080
1081 pages[0].addr = imem->phys;
1082 pages[0].size = imem->size;
1083
1084 args[3].ptr = (u64)(uintptr_t) pages;
1085 args[3].length = 1 * sizeof(*pages);
1086 args[3].fd = -1;
1087
1088 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1089 args[4].length = sizeof(inbuf.attrs);
1090 args[4].fd = -1;
1091
1092 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1093 args[5].length = sizeof(inbuf.siglen);
1094 args[5].fd = -1;
1095
1096 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1097 if (init.attrs)
1098 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1099
1100 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1101 sc, args);
1102 if (err)
1103 goto err_invoke;
1104
1105 kfree(args);
1106
1107 return 0;
1108
1109err_invoke:
1110 fl->init_mem = NULL;
1111 fastrpc_buf_free(imem);
1112err_alloc:
1113 if (map) {
1114 spin_lock(&fl->lock);
1115 list_del(&map->node);
1116 spin_unlock(&fl->lock);
1117 fastrpc_map_put(map);
1118 }
1119err:
1120 kfree(args);
1121
1122 return err;
1123}
1124
1125static struct fastrpc_session_ctx *fastrpc_session_alloc(
1126 struct fastrpc_channel_ctx *cctx)
1127{
1128 struct fastrpc_session_ctx *session = NULL;
1129 unsigned long flags;
1130 int i;
1131
1132 spin_lock_irqsave(&cctx->lock, flags);
1133 for (i = 0; i < cctx->sesscount; i++) {
1134 if (!cctx->session[i].used && cctx->session[i].valid) {
1135 cctx->session[i].used = true;
1136 session = &cctx->session[i];
1137 break;
1138 }
1139 }
1140 spin_unlock_irqrestore(&cctx->lock, flags);
1141
1142 return session;
1143}
1144
1145static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1146 struct fastrpc_session_ctx *session)
1147{
1148 unsigned long flags;
1149
1150 spin_lock_irqsave(&cctx->lock, flags);
1151 session->used = false;
1152 spin_unlock_irqrestore(&cctx->lock, flags);
1153}
1154
1155static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1156{
1157 struct fastrpc_invoke_args args[1];
1158 int tgid = 0;
1159 u32 sc;
1160
1161 tgid = fl->tgid;
1162 args[0].ptr = (u64)(uintptr_t) &tgid;
1163 args[0].length = sizeof(tgid);
1164 args[0].fd = -1;
1165 args[0].reserved = 0;
1166 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1167
1168 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1169 sc, &args[0]);
1170}
1171
1172static int fastrpc_device_release(struct inode *inode, struct file *file)
1173{
1174 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1175 struct fastrpc_channel_ctx *cctx = fl->cctx;
1176 struct fastrpc_invoke_ctx *ctx, *n;
1177 struct fastrpc_map *map, *m;
1178 struct fastrpc_buf *buf, *b;
1179 unsigned long flags;
1180
1181 fastrpc_release_current_dsp_process(fl);
1182
1183 spin_lock_irqsave(&cctx->lock, flags);
1184 list_del(&fl->user);
1185 spin_unlock_irqrestore(&cctx->lock, flags);
1186
1187 if (fl->init_mem)
1188 fastrpc_buf_free(fl->init_mem);
1189
1190 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1191 list_del(&ctx->node);
1192 fastrpc_context_put(ctx);
1193 }
1194
1195 list_for_each_entry_safe(map, m, &fl->maps, node) {
1196 list_del(&map->node);
1197 fastrpc_map_put(map);
1198 }
1199
1200 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1201 list_del(&buf->node);
1202 fastrpc_buf_free(buf);
1203 }
1204
1205 fastrpc_session_free(cctx, fl->sctx);
1206 fastrpc_channel_ctx_put(cctx);
1207
1208 mutex_destroy(&fl->mutex);
1209 kfree(fl);
1210 file->private_data = NULL;
1211
1212 return 0;
1213}
1214
1215static int fastrpc_device_open(struct inode *inode, struct file *filp)
1216{
1217 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1218 struct fastrpc_user *fl = NULL;
1219 unsigned long flags;
1220
1221 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1222 if (!fl)
1223 return -ENOMEM;
1224
1225
1226 fastrpc_channel_ctx_get(cctx);
1227
1228 filp->private_data = fl;
1229 spin_lock_init(&fl->lock);
1230 mutex_init(&fl->mutex);
1231 INIT_LIST_HEAD(&fl->pending);
1232 INIT_LIST_HEAD(&fl->maps);
1233 INIT_LIST_HEAD(&fl->mmaps);
1234 INIT_LIST_HEAD(&fl->user);
1235 fl->tgid = current->tgid;
1236 fl->cctx = cctx;
1237
1238 fl->sctx = fastrpc_session_alloc(cctx);
1239 if (!fl->sctx) {
1240 dev_err(&cctx->rpdev->dev, "No session available\n");
1241 mutex_destroy(&fl->mutex);
1242 kfree(fl);
1243
1244 return -EBUSY;
1245 }
1246
1247 spin_lock_irqsave(&cctx->lock, flags);
1248 list_add_tail(&fl->user, &cctx->users);
1249 spin_unlock_irqrestore(&cctx->lock, flags);
1250
1251 return 0;
1252}
1253
1254static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1255{
1256 struct fastrpc_alloc_dma_buf bp;
1257 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1258 struct fastrpc_buf *buf = NULL;
1259 int err;
1260
1261 if (copy_from_user(&bp, argp, sizeof(bp)))
1262 return -EFAULT;
1263
1264 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1265 if (err)
1266 return err;
1267 exp_info.ops = &fastrpc_dma_buf_ops;
1268 exp_info.size = bp.size;
1269 exp_info.flags = O_RDWR;
1270 exp_info.priv = buf;
1271 buf->dmabuf = dma_buf_export(&exp_info);
1272 if (IS_ERR(buf->dmabuf)) {
1273 err = PTR_ERR(buf->dmabuf);
1274 fastrpc_buf_free(buf);
1275 return err;
1276 }
1277
1278 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1279 if (bp.fd < 0) {
1280 dma_buf_put(buf->dmabuf);
1281 return -EINVAL;
1282 }
1283
1284 if (copy_to_user(argp, &bp, sizeof(bp))) {
1285 dma_buf_put(buf->dmabuf);
1286 return -EFAULT;
1287 }
1288
1289 return 0;
1290}
1291
1292static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1293{
1294 struct fastrpc_invoke_args args[1];
1295 int tgid = fl->tgid;
1296 u32 sc;
1297
1298 args[0].ptr = (u64)(uintptr_t) &tgid;
1299 args[0].length = sizeof(tgid);
1300 args[0].fd = -1;
1301 args[0].reserved = 0;
1302 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1303 fl->pd = pd;
1304
1305 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1306 sc, &args[0]);
1307}
1308
1309static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1310{
1311 struct fastrpc_invoke_args *args = NULL;
1312 struct fastrpc_invoke inv;
1313 u32 nscalars;
1314 int err;
1315
1316 if (copy_from_user(&inv, argp, sizeof(inv)))
1317 return -EFAULT;
1318
1319
1320 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1321 if (nscalars) {
1322 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1323 if (!args)
1324 return -ENOMEM;
1325
1326 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1327 nscalars * sizeof(*args))) {
1328 kfree(args);
1329 return -EFAULT;
1330 }
1331 }
1332
1333 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1334 kfree(args);
1335
1336 return err;
1337}
1338
1339static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
1340 struct fastrpc_req_munmap *req)
1341{
1342 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1343 struct fastrpc_buf *buf, *b;
1344 struct fastrpc_munmap_req_msg req_msg;
1345 struct device *dev = fl->sctx->dev;
1346 int err;
1347 u32 sc;
1348
1349 spin_lock(&fl->lock);
1350 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1351 if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
1352 break;
1353 buf = NULL;
1354 }
1355 spin_unlock(&fl->lock);
1356
1357 if (!buf) {
1358 dev_err(dev, "mmap not in list\n");
1359 return -EINVAL;
1360 }
1361
1362 req_msg.pgid = fl->tgid;
1363 req_msg.size = buf->size;
1364 req_msg.vaddr = buf->raddr;
1365
1366 args[0].ptr = (u64) (uintptr_t) &req_msg;
1367 args[0].length = sizeof(req_msg);
1368
1369 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1370 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1371 &args[0]);
1372 if (!err) {
1373 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1374 spin_lock(&fl->lock);
1375 list_del(&buf->node);
1376 spin_unlock(&fl->lock);
1377 fastrpc_buf_free(buf);
1378 } else {
1379 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1380 }
1381
1382 return err;
1383}
1384
1385static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1386{
1387 struct fastrpc_req_munmap req;
1388
1389 if (copy_from_user(&req, argp, sizeof(req)))
1390 return -EFAULT;
1391
1392 return fastrpc_req_munmap_impl(fl, &req);
1393}
1394
1395static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1396{
1397 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1398 struct fastrpc_buf *buf = NULL;
1399 struct fastrpc_mmap_req_msg req_msg;
1400 struct fastrpc_mmap_rsp_msg rsp_msg;
1401 struct fastrpc_req_munmap req_unmap;
1402 struct fastrpc_phy_page pages;
1403 struct fastrpc_req_mmap req;
1404 struct device *dev = fl->sctx->dev;
1405 int err;
1406 u32 sc;
1407
1408 if (copy_from_user(&req, argp, sizeof(req)))
1409 return -EFAULT;
1410
1411 if (req.flags != ADSP_MMAP_ADD_PAGES) {
1412 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1413 return -EINVAL;
1414 }
1415
1416 if (req.vaddrin) {
1417 dev_err(dev, "adding user allocated pages is not supported\n");
1418 return -EINVAL;
1419 }
1420
1421 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1422 if (err) {
1423 dev_err(dev, "failed to allocate buffer\n");
1424 return err;
1425 }
1426
1427 req_msg.pgid = fl->tgid;
1428 req_msg.flags = req.flags;
1429 req_msg.vaddr = req.vaddrin;
1430 req_msg.num = sizeof(pages);
1431
1432 args[0].ptr = (u64) (uintptr_t) &req_msg;
1433 args[0].length = sizeof(req_msg);
1434
1435 pages.addr = buf->phys;
1436 pages.size = buf->size;
1437
1438 args[1].ptr = (u64) (uintptr_t) &pages;
1439 args[1].length = sizeof(pages);
1440
1441 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1442 args[2].length = sizeof(rsp_msg);
1443
1444 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1445 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1446 &args[0]);
1447 if (err) {
1448 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1449 goto err_invoke;
1450 }
1451
1452
1453 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1454
1455
1456 req.vaddrout = rsp_msg.vaddr;
1457
1458 spin_lock(&fl->lock);
1459 list_add_tail(&buf->node, &fl->mmaps);
1460 spin_unlock(&fl->lock);
1461
1462 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1463
1464 req_unmap.vaddrout = buf->raddr;
1465 req_unmap.size = buf->size;
1466 fastrpc_req_munmap_impl(fl, &req_unmap);
1467 return -EFAULT;
1468 }
1469
1470 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1471 buf->raddr, buf->size);
1472
1473 return 0;
1474
1475err_invoke:
1476 fastrpc_buf_free(buf);
1477
1478 return err;
1479}
1480
1481static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1482 unsigned long arg)
1483{
1484 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1485 char __user *argp = (char __user *)arg;
1486 int err;
1487
1488 switch (cmd) {
1489 case FASTRPC_IOCTL_INVOKE:
1490 err = fastrpc_invoke(fl, argp);
1491 break;
1492 case FASTRPC_IOCTL_INIT_ATTACH:
1493 err = fastrpc_init_attach(fl, AUDIO_PD);
1494 break;
1495 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1496 err = fastrpc_init_attach(fl, SENSORS_PD);
1497 break;
1498 case FASTRPC_IOCTL_INIT_CREATE:
1499 err = fastrpc_init_create_process(fl, argp);
1500 break;
1501 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1502 err = fastrpc_dmabuf_alloc(fl, argp);
1503 break;
1504 case FASTRPC_IOCTL_MMAP:
1505 err = fastrpc_req_mmap(fl, argp);
1506 break;
1507 case FASTRPC_IOCTL_MUNMAP:
1508 err = fastrpc_req_munmap(fl, argp);
1509 break;
1510 default:
1511 err = -ENOTTY;
1512 break;
1513 }
1514
1515 return err;
1516}
1517
1518static const struct file_operations fastrpc_fops = {
1519 .open = fastrpc_device_open,
1520 .release = fastrpc_device_release,
1521 .unlocked_ioctl = fastrpc_device_ioctl,
1522 .compat_ioctl = fastrpc_device_ioctl,
1523};
1524
1525static int fastrpc_cb_probe(struct platform_device *pdev)
1526{
1527 struct fastrpc_channel_ctx *cctx;
1528 struct fastrpc_session_ctx *sess;
1529 struct device *dev = &pdev->dev;
1530 int i, sessions = 0;
1531 unsigned long flags;
1532 int rc;
1533
1534 cctx = dev_get_drvdata(dev->parent);
1535 if (!cctx)
1536 return -EINVAL;
1537
1538 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1539
1540 spin_lock_irqsave(&cctx->lock, flags);
1541 sess = &cctx->session[cctx->sesscount];
1542 sess->used = false;
1543 sess->valid = true;
1544 sess->dev = dev;
1545 dev_set_drvdata(dev, sess);
1546
1547 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1548 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1549
1550 if (sessions > 0) {
1551 struct fastrpc_session_ctx *dup_sess;
1552
1553 for (i = 1; i < sessions; i++) {
1554 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1555 break;
1556 dup_sess = &cctx->session[cctx->sesscount];
1557 memcpy(dup_sess, sess, sizeof(*dup_sess));
1558 }
1559 }
1560 cctx->sesscount++;
1561 spin_unlock_irqrestore(&cctx->lock, flags);
1562 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1563 if (rc) {
1564 dev_err(dev, "32-bit DMA enable failed\n");
1565 return rc;
1566 }
1567
1568 return 0;
1569}
1570
1571static int fastrpc_cb_remove(struct platform_device *pdev)
1572{
1573 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1574 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1575 unsigned long flags;
1576 int i;
1577
1578 spin_lock_irqsave(&cctx->lock, flags);
1579 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1580 if (cctx->session[i].sid == sess->sid) {
1581 cctx->session[i].valid = false;
1582 cctx->sesscount--;
1583 }
1584 }
1585 spin_unlock_irqrestore(&cctx->lock, flags);
1586
1587 return 0;
1588}
1589
1590static const struct of_device_id fastrpc_match_table[] = {
1591 { .compatible = "qcom,fastrpc-compute-cb", },
1592 {}
1593};
1594
1595static struct platform_driver fastrpc_cb_driver = {
1596 .probe = fastrpc_cb_probe,
1597 .remove = fastrpc_cb_remove,
1598 .driver = {
1599 .name = "qcom,fastrpc-cb",
1600 .of_match_table = fastrpc_match_table,
1601 .suppress_bind_attrs = true,
1602 },
1603};
1604
1605static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1606{
1607 struct device *rdev = &rpdev->dev;
1608 struct fastrpc_channel_ctx *data;
1609 int i, err, domain_id = -1;
1610 const char *domain;
1611
1612 err = of_property_read_string(rdev->of_node, "label", &domain);
1613 if (err) {
1614 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1615 return err;
1616 }
1617
1618 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1619 if (!strcmp(domains[i], domain)) {
1620 domain_id = i;
1621 break;
1622 }
1623 }
1624
1625 if (domain_id < 0) {
1626 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1627 return -EINVAL;
1628 }
1629
1630 data = kzalloc(sizeof(*data), GFP_KERNEL);
1631 if (!data)
1632 return -ENOMEM;
1633
1634 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1635 data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
1636 domains[domain_id]);
1637 data->miscdev.fops = &fastrpc_fops;
1638 err = misc_register(&data->miscdev);
1639 if (err) {
1640 kfree(data);
1641 return err;
1642 }
1643
1644 kref_init(&data->refcount);
1645
1646 dev_set_drvdata(&rpdev->dev, data);
1647 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1648 INIT_LIST_HEAD(&data->users);
1649 spin_lock_init(&data->lock);
1650 idr_init(&data->ctx_idr);
1651 data->domain_id = domain_id;
1652 data->rpdev = rpdev;
1653
1654 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1655}
1656
1657static void fastrpc_notify_users(struct fastrpc_user *user)
1658{
1659 struct fastrpc_invoke_ctx *ctx;
1660
1661 spin_lock(&user->lock);
1662 list_for_each_entry(ctx, &user->pending, node)
1663 complete(&ctx->work);
1664 spin_unlock(&user->lock);
1665}
1666
1667static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1668{
1669 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1670 struct fastrpc_user *user;
1671 unsigned long flags;
1672
1673 spin_lock_irqsave(&cctx->lock, flags);
1674 list_for_each_entry(user, &cctx->users, user)
1675 fastrpc_notify_users(user);
1676 spin_unlock_irqrestore(&cctx->lock, flags);
1677
1678 misc_deregister(&cctx->miscdev);
1679 of_platform_depopulate(&rpdev->dev);
1680
1681 cctx->rpdev = NULL;
1682 fastrpc_channel_ctx_put(cctx);
1683}
1684
1685static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1686 int len, void *priv, u32 addr)
1687{
1688 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1689 struct fastrpc_invoke_rsp *rsp = data;
1690 struct fastrpc_invoke_ctx *ctx;
1691 unsigned long flags;
1692 unsigned long ctxid;
1693
1694 if (len < sizeof(*rsp))
1695 return -EINVAL;
1696
1697 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1698
1699 spin_lock_irqsave(&cctx->lock, flags);
1700 ctx = idr_find(&cctx->ctx_idr, ctxid);
1701 spin_unlock_irqrestore(&cctx->lock, flags);
1702
1703 if (!ctx) {
1704 dev_err(&rpdev->dev, "No context ID matches response\n");
1705 return -ENOENT;
1706 }
1707
1708 ctx->retval = rsp->retval;
1709 complete(&ctx->work);
1710
1711
1712
1713
1714
1715
1716 schedule_work(&ctx->put_work);
1717
1718 return 0;
1719}
1720
1721static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1722 { .compatible = "qcom,fastrpc" },
1723 { },
1724};
1725MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1726
1727static struct rpmsg_driver fastrpc_driver = {
1728 .probe = fastrpc_rpmsg_probe,
1729 .remove = fastrpc_rpmsg_remove,
1730 .callback = fastrpc_rpmsg_callback,
1731 .drv = {
1732 .name = "qcom,fastrpc",
1733 .of_match_table = fastrpc_rpmsg_of_match,
1734 },
1735};
1736
1737static int fastrpc_init(void)
1738{
1739 int ret;
1740
1741 ret = platform_driver_register(&fastrpc_cb_driver);
1742 if (ret < 0) {
1743 pr_err("fastrpc: failed to register cb driver\n");
1744 return ret;
1745 }
1746
1747 ret = register_rpmsg_driver(&fastrpc_driver);
1748 if (ret < 0) {
1749 pr_err("fastrpc: failed to register rpmsg driver\n");
1750 platform_driver_unregister(&fastrpc_cb_driver);
1751 return ret;
1752 }
1753
1754 return 0;
1755}
1756module_init(fastrpc_init);
1757
1758static void fastrpc_exit(void)
1759{
1760 platform_driver_unregister(&fastrpc_cb_driver);
1761 unregister_rpmsg_driver(&fastrpc_driver);
1762}
1763module_exit(fastrpc_exit);
1764
1765MODULE_LICENSE("GPL v2");
1766