1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/arm-smccc.h>
10#include <linux/errno.h>
11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_platform.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/tee_drv.h>
21#include <linux/types.h>
22#include <linux/workqueue.h>
23#include "optee_private.h"
24#include "optee_smc.h"
25#include "optee_rpc_cmd.h"
26#include <linux/kmemleak.h>
27#define CREATE_TRACE_POINTS
28#include "optee_trace.h"
29
30
31
32
33
34
35
36
37
38
39
40
41#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
42
43
44
45
46
47
48
49
50static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
51 const struct optee_msg_param *mp)
52{
53 struct tee_shm *shm;
54 phys_addr_t pa;
55 int rc;
56
57 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
58 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
59 p->u.memref.size = mp->u.tmem.size;
60 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
61 if (!shm) {
62 p->u.memref.shm_offs = 0;
63 p->u.memref.shm = NULL;
64 return 0;
65 }
66
67 rc = tee_shm_get_pa(shm, 0, &pa);
68 if (rc)
69 return rc;
70
71 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
72 p->u.memref.shm = shm;
73
74
75 if (p->u.memref.size) {
76 size_t o = p->u.memref.shm_offs +
77 p->u.memref.size - 1;
78
79 rc = tee_shm_get_pa(shm, o, NULL);
80 if (rc)
81 return rc;
82 }
83
84 return 0;
85}
86
87static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
88 const struct optee_msg_param *mp)
89{
90 struct tee_shm *shm;
91
92 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
93 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
94 p->u.memref.size = mp->u.rmem.size;
95 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
96
97 if (shm) {
98 p->u.memref.shm_offs = mp->u.rmem.offs;
99 p->u.memref.shm = shm;
100 } else {
101 p->u.memref.shm_offs = 0;
102 p->u.memref.shm = NULL;
103 }
104}
105
106
107
108
109
110
111
112
113
114
115static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
116 size_t num_params,
117 const struct optee_msg_param *msg_params)
118{
119 int rc;
120 size_t n;
121
122 for (n = 0; n < num_params; n++) {
123 struct tee_param *p = params + n;
124 const struct optee_msg_param *mp = msg_params + n;
125 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
126
127 switch (attr) {
128 case OPTEE_MSG_ATTR_TYPE_NONE:
129 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
130 memset(&p->u, 0, sizeof(p->u));
131 break;
132 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
133 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
134 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
135 optee_from_msg_param_value(p, attr, mp);
136 break;
137 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
138 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
139 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
140 rc = from_msg_param_tmp_mem(p, attr, mp);
141 if (rc)
142 return rc;
143 break;
144 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
145 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
146 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
147 from_msg_param_reg_mem(p, attr, mp);
148 break;
149
150 default:
151 return -EINVAL;
152 }
153 }
154 return 0;
155}
156
157static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
158 const struct tee_param *p)
159{
160 int rc;
161 phys_addr_t pa;
162
163 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
164 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
165
166 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
167 mp->u.tmem.size = p->u.memref.size;
168
169 if (!p->u.memref.shm) {
170 mp->u.tmem.buf_ptr = 0;
171 return 0;
172 }
173
174 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
175 if (rc)
176 return rc;
177
178 mp->u.tmem.buf_ptr = pa;
179 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
180 OPTEE_MSG_ATTR_CACHE_SHIFT;
181
182 return 0;
183}
184
185static int to_msg_param_reg_mem(struct optee_msg_param *mp,
186 const struct tee_param *p)
187{
188 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
189 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
190
191 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
192 mp->u.rmem.size = p->u.memref.size;
193 mp->u.rmem.offs = p->u.memref.shm_offs;
194 return 0;
195}
196
197
198
199
200
201
202
203
204
205static int optee_to_msg_param(struct optee *optee,
206 struct optee_msg_param *msg_params,
207 size_t num_params, const struct tee_param *params)
208{
209 int rc;
210 size_t n;
211
212 for (n = 0; n < num_params; n++) {
213 const struct tee_param *p = params + n;
214 struct optee_msg_param *mp = msg_params + n;
215
216 switch (p->attr) {
217 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
218 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
219 memset(&mp->u, 0, sizeof(mp->u));
220 break;
221 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
222 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
223 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
224 optee_to_msg_param_value(mp, p);
225 break;
226 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
227 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
228 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
229 if (tee_shm_is_registered(p->u.memref.shm))
230 rc = to_msg_param_reg_mem(mp, p);
231 else
232 rc = to_msg_param_tmp_mem(mp, p);
233 if (rc)
234 return rc;
235 break;
236 default:
237 return -EINVAL;
238 }
239 }
240 return 0;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static void optee_enable_shm_cache(struct optee *optee)
260{
261 struct optee_call_waiter w;
262
263
264 optee_cq_wait_init(&optee->call_queue, &w);
265 while (true) {
266 struct arm_smccc_res res;
267
268 optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
269 0, 0, 0, 0, 0, 0, 0, &res);
270 if (res.a0 == OPTEE_SMC_RETURN_OK)
271 break;
272 optee_cq_wait_for_completion(&optee->call_queue, &w);
273 }
274 optee_cq_wait_final(&optee->call_queue, &w);
275}
276
277
278
279
280
281
282
283
284static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
285{
286 struct optee_call_waiter w;
287
288
289 optee_cq_wait_init(&optee->call_queue, &w);
290 while (true) {
291 union {
292 struct arm_smccc_res smccc;
293 struct optee_smc_disable_shm_cache_result result;
294 } res;
295
296 optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
297 0, 0, 0, 0, 0, 0, 0, &res.smccc);
298 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
299 break;
300 if (res.result.status == OPTEE_SMC_RETURN_OK) {
301 struct tee_shm *shm;
302
303
304
305
306
307 if (!is_mapped)
308 continue;
309
310 shm = reg_pair_to_ptr(res.result.shm_upper32,
311 res.result.shm_lower32);
312 tee_shm_free(shm);
313 } else {
314 optee_cq_wait_for_completion(&optee->call_queue, &w);
315 }
316 }
317 optee_cq_wait_final(&optee->call_queue, &w);
318}
319
320
321
322
323
324
325static void optee_disable_shm_cache(struct optee *optee)
326{
327 return __optee_disable_shm_cache(optee, true);
328}
329
330
331
332
333
334
335
336static void optee_disable_unmapped_shm_cache(struct optee *optee)
337{
338 return __optee_disable_shm_cache(optee, false);
339}
340
341#define PAGELIST_ENTRIES_PER_PAGE \
342 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
343
344
345
346
347
348static size_t get_pages_list_size(size_t num_entries)
349{
350 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
351
352 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
353}
354
355static u64 *optee_allocate_pages_list(size_t num_entries)
356{
357 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
358}
359
360static void optee_free_pages_list(void *list, size_t num_entries)
361{
362 free_pages_exact(list, get_pages_list_size(num_entries));
363}
364
365
366
367
368
369
370
371
372
373
374
375
376
377static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
378 size_t page_offset)
379{
380 int n = 0;
381 phys_addr_t optee_page;
382
383
384
385
386 struct {
387 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
388 u64 next_page_data;
389 } *pages_data;
390
391
392
393
394
395
396
397
398
399 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
400
401 pages_data = (void *)dst;
402
403
404
405
406
407 optee_page = page_to_phys(*pages) +
408 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
409
410 while (true) {
411 pages_data->pages_list[n++] = optee_page;
412
413 if (n == PAGELIST_ENTRIES_PER_PAGE) {
414 pages_data->next_page_data =
415 virt_to_phys(pages_data + 1);
416 pages_data++;
417 n = 0;
418 }
419
420 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
421 if (!(optee_page & ~PAGE_MASK)) {
422 if (!--num_pages)
423 break;
424 pages++;
425 optee_page = page_to_phys(*pages);
426 }
427 }
428}
429
430static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
431 struct page **pages, size_t num_pages,
432 unsigned long start)
433{
434 struct optee *optee = tee_get_drvdata(ctx->teedev);
435 struct optee_msg_arg *msg_arg;
436 struct tee_shm *shm_arg;
437 u64 *pages_list;
438 int rc;
439
440 if (!num_pages)
441 return -EINVAL;
442
443 rc = optee_check_mem_type(start, num_pages);
444 if (rc)
445 return rc;
446
447 pages_list = optee_allocate_pages_list(num_pages);
448 if (!pages_list)
449 return -ENOMEM;
450
451 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
452 if (IS_ERR(shm_arg)) {
453 rc = PTR_ERR(shm_arg);
454 goto out;
455 }
456
457 optee_fill_pages_list(pages_list, pages, num_pages,
458 tee_shm_get_page_offset(shm));
459
460 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
461 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
462 OPTEE_MSG_ATTR_NONCONTIG;
463 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
464 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
465
466
467
468
469 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
470 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
471
472 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
473 msg_arg->ret != TEEC_SUCCESS)
474 rc = -EINVAL;
475
476 tee_shm_free(shm_arg);
477out:
478 optee_free_pages_list(pages_list, num_pages);
479 return rc;
480}
481
482static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
483{
484 struct optee *optee = tee_get_drvdata(ctx->teedev);
485 struct optee_msg_arg *msg_arg;
486 struct tee_shm *shm_arg;
487 int rc = 0;
488
489 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
490 if (IS_ERR(shm_arg))
491 return PTR_ERR(shm_arg);
492
493 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
494
495 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
496 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
497
498 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
499 msg_arg->ret != TEEC_SUCCESS)
500 rc = -EINVAL;
501 tee_shm_free(shm_arg);
502 return rc;
503}
504
505static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
506 struct page **pages, size_t num_pages,
507 unsigned long start)
508{
509
510
511
512
513 return optee_check_mem_type(start, num_pages);
514}
515
516static int optee_shm_unregister_supp(struct tee_context *ctx,
517 struct tee_shm *shm)
518{
519 return 0;
520}
521
522
523
524
525
526
527
528
529
530
531static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
532 struct tee_shm *shm, size_t size)
533{
534
535
536
537
538 if (shm->flags & TEE_SHM_PRIV)
539 return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
540
541 return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
542}
543
544static void pool_op_free(struct tee_shm_pool_mgr *poolm,
545 struct tee_shm *shm)
546{
547 if (!(shm->flags & TEE_SHM_PRIV))
548 optee_shm_unregister(shm->ctx, shm);
549
550 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
551 shm->kaddr = NULL;
552}
553
554static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
555{
556 kfree(poolm);
557}
558
559static const struct tee_shm_pool_mgr_ops pool_ops = {
560 .alloc = pool_op_alloc,
561 .free = pool_op_free,
562 .destroy_poolmgr = pool_op_destroy_poolmgr,
563};
564
565
566
567
568
569
570
571static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
572{
573 struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
574
575 if (!mgr)
576 return ERR_PTR(-ENOMEM);
577
578 mgr->ops = &pool_ops;
579
580 return mgr;
581}
582
583
584
585
586
587
588
589
590
591
592
593static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
594 struct optee_msg_arg *arg)
595{
596 struct tee_shm *shm;
597
598 arg->ret_origin = TEEC_ORIGIN_COMMS;
599
600 if (arg->num_params != 1 ||
601 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
602 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
603 return;
604 }
605
606 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
607 switch (arg->params[0].u.value.a) {
608 case OPTEE_RPC_SHM_TYPE_APPL:
609 optee_rpc_cmd_free_suppl(ctx, shm);
610 break;
611 case OPTEE_RPC_SHM_TYPE_KERNEL:
612 tee_shm_free(shm);
613 break;
614 default:
615 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
616 }
617 arg->ret = TEEC_SUCCESS;
618}
619
620static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
621 struct optee_msg_arg *arg,
622 struct optee_call_ctx *call_ctx)
623{
624 phys_addr_t pa;
625 struct tee_shm *shm;
626 size_t sz;
627 size_t n;
628
629 arg->ret_origin = TEEC_ORIGIN_COMMS;
630
631 if (!arg->num_params ||
632 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
633 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
634 return;
635 }
636
637 for (n = 1; n < arg->num_params; n++) {
638 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
639 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
640 return;
641 }
642 }
643
644 sz = arg->params[0].u.value.b;
645 switch (arg->params[0].u.value.a) {
646 case OPTEE_RPC_SHM_TYPE_APPL:
647 shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
648 break;
649 case OPTEE_RPC_SHM_TYPE_KERNEL:
650 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
651 break;
652 default:
653 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
654 return;
655 }
656
657 if (IS_ERR(shm)) {
658 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
659 return;
660 }
661
662 if (tee_shm_get_pa(shm, 0, &pa)) {
663 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
664 goto bad;
665 }
666
667 sz = tee_shm_get_size(shm);
668
669 if (tee_shm_is_registered(shm)) {
670 struct page **pages;
671 u64 *pages_list;
672 size_t page_num;
673
674 pages = tee_shm_get_pages(shm, &page_num);
675 if (!pages || !page_num) {
676 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
677 goto bad;
678 }
679
680 pages_list = optee_allocate_pages_list(page_num);
681 if (!pages_list) {
682 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
683 goto bad;
684 }
685
686 call_ctx->pages_list = pages_list;
687 call_ctx->num_entries = page_num;
688
689 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
690 OPTEE_MSG_ATTR_NONCONTIG;
691
692
693
694
695 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
696 (tee_shm_get_page_offset(shm) &
697 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
698 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
699 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
700
701 optee_fill_pages_list(pages_list, pages, page_num,
702 tee_shm_get_page_offset(shm));
703 } else {
704 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
705 arg->params[0].u.tmem.buf_ptr = pa;
706 arg->params[0].u.tmem.size = sz;
707 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
708 }
709
710 arg->ret = TEEC_SUCCESS;
711 return;
712bad:
713 tee_shm_free(shm);
714}
715
716static void free_pages_list(struct optee_call_ctx *call_ctx)
717{
718 if (call_ctx->pages_list) {
719 optee_free_pages_list(call_ctx->pages_list,
720 call_ctx->num_entries);
721 call_ctx->pages_list = NULL;
722 call_ctx->num_entries = 0;
723 }
724}
725
726static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
727{
728 free_pages_list(call_ctx);
729}
730
731static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
732 struct tee_shm *shm,
733 struct optee_call_ctx *call_ctx)
734{
735 struct optee_msg_arg *arg;
736
737 arg = tee_shm_get_va(shm, 0);
738 if (IS_ERR(arg)) {
739 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
740 return;
741 }
742
743 switch (arg->cmd) {
744 case OPTEE_RPC_CMD_SHM_ALLOC:
745 free_pages_list(call_ctx);
746 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
747 break;
748 case OPTEE_RPC_CMD_SHM_FREE:
749 handle_rpc_func_cmd_shm_free(ctx, arg);
750 break;
751 default:
752 optee_rpc_cmd(ctx, optee, arg);
753 }
754}
755
756
757
758
759
760
761
762
763
764static void optee_handle_rpc(struct tee_context *ctx,
765 struct optee_rpc_param *param,
766 struct optee_call_ctx *call_ctx)
767{
768 struct tee_device *teedev = ctx->teedev;
769 struct optee *optee = tee_get_drvdata(teedev);
770 struct tee_shm *shm;
771 phys_addr_t pa;
772
773 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
774 case OPTEE_SMC_RPC_FUNC_ALLOC:
775 shm = tee_shm_alloc(ctx, param->a1,
776 TEE_SHM_MAPPED | TEE_SHM_PRIV);
777 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
778 reg_pair_from_64(¶m->a1, ¶m->a2, pa);
779 reg_pair_from_64(¶m->a4, ¶m->a5,
780 (unsigned long)shm);
781 } else {
782 param->a1 = 0;
783 param->a2 = 0;
784 param->a4 = 0;
785 param->a5 = 0;
786 }
787 kmemleak_not_leak(shm);
788 break;
789 case OPTEE_SMC_RPC_FUNC_FREE:
790 shm = reg_pair_to_ptr(param->a1, param->a2);
791 tee_shm_free(shm);
792 break;
793 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
794
795
796
797
798
799
800 break;
801 case OPTEE_SMC_RPC_FUNC_CMD:
802 shm = reg_pair_to_ptr(param->a1, param->a2);
803 handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
804 break;
805 default:
806 pr_warn("Unknown RPC func 0x%x\n",
807 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
808 break;
809 }
810
811 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
812}
813
814
815
816
817
818
819
820
821
822
823
824static int optee_smc_do_call_with_arg(struct tee_context *ctx,
825 struct tee_shm *arg)
826{
827 struct optee *optee = tee_get_drvdata(ctx->teedev);
828 struct optee_call_waiter w;
829 struct optee_rpc_param param = { };
830 struct optee_call_ctx call_ctx = { };
831 phys_addr_t parg;
832 int rc;
833
834 rc = tee_shm_get_pa(arg, 0, &parg);
835 if (rc)
836 return rc;
837
838 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
839 reg_pair_from_64(¶m.a1, ¶m.a2, parg);
840
841 optee_cq_wait_init(&optee->call_queue, &w);
842 while (true) {
843 struct arm_smccc_res res;
844
845 trace_optee_invoke_fn_begin(¶m);
846 optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
847 param.a4, param.a5, param.a6, param.a7,
848 &res);
849 trace_optee_invoke_fn_end(¶m, &res);
850
851 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
852
853
854
855
856 optee_cq_wait_for_completion(&optee->call_queue, &w);
857 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
858 cond_resched();
859 param.a0 = res.a0;
860 param.a1 = res.a1;
861 param.a2 = res.a2;
862 param.a3 = res.a3;
863 optee_handle_rpc(ctx, ¶m, &call_ctx);
864 } else {
865 rc = res.a0;
866 break;
867 }
868 }
869
870 optee_rpc_finalize_call(&call_ctx);
871
872
873
874
875 optee_cq_wait_final(&optee->call_queue, &w);
876
877 return rc;
878}
879
880
881
882
883
884
885
886
887
888
889static void optee_get_version(struct tee_device *teedev,
890 struct tee_ioctl_version_data *vers)
891{
892 struct tee_ioctl_version_data v = {
893 .impl_id = TEE_IMPL_ID_OPTEE,
894 .impl_caps = TEE_OPTEE_CAP_TZ,
895 .gen_caps = TEE_GEN_CAP_GP,
896 };
897 struct optee *optee = tee_get_drvdata(teedev);
898
899 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
900 v.gen_caps |= TEE_GEN_CAP_REG_MEM;
901 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
902 v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
903 *vers = v;
904}
905
906static int optee_smc_open(struct tee_context *ctx)
907{
908 struct optee *optee = tee_get_drvdata(ctx->teedev);
909 u32 sec_caps = optee->smc.sec_caps;
910
911 return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
912}
913
914static const struct tee_driver_ops optee_clnt_ops = {
915 .get_version = optee_get_version,
916 .open = optee_smc_open,
917 .release = optee_release,
918 .open_session = optee_open_session,
919 .close_session = optee_close_session,
920 .invoke_func = optee_invoke_func,
921 .cancel_req = optee_cancel_req,
922 .shm_register = optee_shm_register,
923 .shm_unregister = optee_shm_unregister,
924};
925
926static const struct tee_desc optee_clnt_desc = {
927 .name = DRIVER_NAME "-clnt",
928 .ops = &optee_clnt_ops,
929 .owner = THIS_MODULE,
930};
931
932static const struct tee_driver_ops optee_supp_ops = {
933 .get_version = optee_get_version,
934 .open = optee_smc_open,
935 .release = optee_release_supp,
936 .supp_recv = optee_supp_recv,
937 .supp_send = optee_supp_send,
938 .shm_register = optee_shm_register_supp,
939 .shm_unregister = optee_shm_unregister_supp,
940};
941
942static const struct tee_desc optee_supp_desc = {
943 .name = DRIVER_NAME "-supp",
944 .ops = &optee_supp_ops,
945 .owner = THIS_MODULE,
946 .flags = TEE_DESC_PRIVILEGED,
947};
948
949static const struct optee_ops optee_ops = {
950 .do_call_with_arg = optee_smc_do_call_with_arg,
951 .to_msg_param = optee_to_msg_param,
952 .from_msg_param = optee_from_msg_param,
953};
954
955static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
956{
957 struct arm_smccc_res res;
958
959 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
960
961 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
962 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
963 return true;
964 return false;
965}
966
967static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
968{
969 union {
970 struct arm_smccc_res smccc;
971 struct optee_smc_call_get_os_revision_result result;
972 } res = {
973 .result = {
974 .build_id = 0
975 }
976 };
977
978 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
979 &res.smccc);
980
981 if (res.result.build_id)
982 pr_info("revision %lu.%lu (%08lx)", res.result.major,
983 res.result.minor, res.result.build_id);
984 else
985 pr_info("revision %lu.%lu", res.result.major, res.result.minor);
986}
987
988static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
989{
990 union {
991 struct arm_smccc_res smccc;
992 struct optee_smc_calls_revision_result result;
993 } res;
994
995 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
996
997 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
998 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
999 return true;
1000 return false;
1001}
1002
1003static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1004 u32 *sec_caps)
1005{
1006 union {
1007 struct arm_smccc_res smccc;
1008 struct optee_smc_exchange_capabilities_result result;
1009 } res;
1010 u32 a1 = 0;
1011
1012
1013
1014
1015
1016
1017 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1018 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1019
1020 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1021 &res.smccc);
1022
1023 if (res.result.status != OPTEE_SMC_RETURN_OK)
1024 return false;
1025
1026 *sec_caps = res.result.capabilities;
1027 return true;
1028}
1029
1030static struct tee_shm_pool *optee_config_dyn_shm(void)
1031{
1032 struct tee_shm_pool_mgr *priv_mgr;
1033 struct tee_shm_pool_mgr *dmabuf_mgr;
1034 void *rc;
1035
1036 rc = optee_shm_pool_alloc_pages();
1037 if (IS_ERR(rc))
1038 return rc;
1039 priv_mgr = rc;
1040
1041 rc = optee_shm_pool_alloc_pages();
1042 if (IS_ERR(rc)) {
1043 tee_shm_pool_mgr_destroy(priv_mgr);
1044 return rc;
1045 }
1046 dmabuf_mgr = rc;
1047
1048 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1049 if (IS_ERR(rc)) {
1050 tee_shm_pool_mgr_destroy(priv_mgr);
1051 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1052 }
1053
1054 return rc;
1055}
1056
1057static struct tee_shm_pool *
1058optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1059{
1060 union {
1061 struct arm_smccc_res smccc;
1062 struct optee_smc_get_shm_config_result result;
1063 } res;
1064 unsigned long vaddr;
1065 phys_addr_t paddr;
1066 size_t size;
1067 phys_addr_t begin;
1068 phys_addr_t end;
1069 void *va;
1070 struct tee_shm_pool_mgr *priv_mgr;
1071 struct tee_shm_pool_mgr *dmabuf_mgr;
1072 void *rc;
1073 const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1074
1075 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1076 if (res.result.status != OPTEE_SMC_RETURN_OK) {
1077 pr_err("static shm service not available\n");
1078 return ERR_PTR(-ENOENT);
1079 }
1080
1081 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1082 pr_err("only normal cached shared memory supported\n");
1083 return ERR_PTR(-EINVAL);
1084 }
1085
1086 begin = roundup(res.result.start, PAGE_SIZE);
1087 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1088 paddr = begin;
1089 size = end - begin;
1090
1091 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1092 pr_err("too small shared memory area\n");
1093 return ERR_PTR(-EINVAL);
1094 }
1095
1096 va = memremap(paddr, size, MEMREMAP_WB);
1097 if (!va) {
1098 pr_err("shared memory ioremap failed\n");
1099 return ERR_PTR(-EINVAL);
1100 }
1101 vaddr = (unsigned long)va;
1102
1103 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1104 3 );
1105 if (IS_ERR(rc))
1106 goto err_memunmap;
1107 priv_mgr = rc;
1108
1109 vaddr += sz;
1110 paddr += sz;
1111 size -= sz;
1112
1113 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1114 if (IS_ERR(rc))
1115 goto err_free_priv_mgr;
1116 dmabuf_mgr = rc;
1117
1118 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1119 if (IS_ERR(rc))
1120 goto err_free_dmabuf_mgr;
1121
1122 *memremaped_shm = va;
1123
1124 return rc;
1125
1126err_free_dmabuf_mgr:
1127 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1128err_free_priv_mgr:
1129 tee_shm_pool_mgr_destroy(priv_mgr);
1130err_memunmap:
1131 memunmap(va);
1132 return rc;
1133}
1134
1135
1136static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1137 unsigned long a2, unsigned long a3,
1138 unsigned long a4, unsigned long a5,
1139 unsigned long a6, unsigned long a7,
1140 struct arm_smccc_res *res)
1141{
1142 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1143}
1144
1145static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1146 unsigned long a2, unsigned long a3,
1147 unsigned long a4, unsigned long a5,
1148 unsigned long a6, unsigned long a7,
1149 struct arm_smccc_res *res)
1150{
1151 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1152}
1153
1154static optee_invoke_fn *get_invoke_func(struct device *dev)
1155{
1156 const char *method;
1157
1158 pr_info("probing for conduit method.\n");
1159
1160 if (device_property_read_string(dev, "method", &method)) {
1161 pr_warn("missing \"method\" property\n");
1162 return ERR_PTR(-ENXIO);
1163 }
1164
1165 if (!strcmp("hvc", method))
1166 return optee_smccc_hvc;
1167 else if (!strcmp("smc", method))
1168 return optee_smccc_smc;
1169
1170 pr_warn("invalid \"method\" property: %s\n", method);
1171 return ERR_PTR(-EINVAL);
1172}
1173
1174
1175
1176
1177
1178
1179
1180static int optee_smc_remove(struct platform_device *pdev)
1181{
1182 struct optee *optee = platform_get_drvdata(pdev);
1183
1184
1185
1186
1187
1188
1189 optee_disable_shm_cache(optee);
1190
1191 optee_remove_common(optee);
1192
1193 if (optee->smc.memremaped_shm)
1194 memunmap(optee->smc.memremaped_shm);
1195
1196 kfree(optee);
1197
1198 return 0;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208static void optee_shutdown(struct platform_device *pdev)
1209{
1210 optee_disable_shm_cache(platform_get_drvdata(pdev));
1211}
1212
1213static int optee_probe(struct platform_device *pdev)
1214{
1215 optee_invoke_fn *invoke_fn;
1216 struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1217 struct optee *optee = NULL;
1218 void *memremaped_shm = NULL;
1219 struct tee_device *teedev;
1220 u32 sec_caps;
1221 int rc;
1222
1223 invoke_fn = get_invoke_func(&pdev->dev);
1224 if (IS_ERR(invoke_fn))
1225 return PTR_ERR(invoke_fn);
1226
1227 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1228 pr_warn("api uid mismatch\n");
1229 return -EINVAL;
1230 }
1231
1232 optee_msg_get_os_revision(invoke_fn);
1233
1234 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1235 pr_warn("api revision mismatch\n");
1236 return -EINVAL;
1237 }
1238
1239 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
1240 pr_warn("capabilities mismatch\n");
1241 return -EINVAL;
1242 }
1243
1244
1245
1246
1247 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1248 pool = optee_config_dyn_shm();
1249
1250
1251
1252
1253 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1254 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1255
1256 if (IS_ERR(pool))
1257 return PTR_ERR(pool);
1258
1259 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1260 if (!optee) {
1261 rc = -ENOMEM;
1262 goto err;
1263 }
1264
1265 optee->ops = &optee_ops;
1266 optee->smc.invoke_fn = invoke_fn;
1267 optee->smc.sec_caps = sec_caps;
1268
1269 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1270 if (IS_ERR(teedev)) {
1271 rc = PTR_ERR(teedev);
1272 goto err;
1273 }
1274 optee->teedev = teedev;
1275
1276 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1277 if (IS_ERR(teedev)) {
1278 rc = PTR_ERR(teedev);
1279 goto err;
1280 }
1281 optee->supp_teedev = teedev;
1282
1283 rc = tee_device_register(optee->teedev);
1284 if (rc)
1285 goto err;
1286
1287 rc = tee_device_register(optee->supp_teedev);
1288 if (rc)
1289 goto err;
1290
1291 mutex_init(&optee->call_queue.mutex);
1292 INIT_LIST_HEAD(&optee->call_queue.waiters);
1293 optee_wait_queue_init(&optee->wait_queue);
1294 optee_supp_init(&optee->supp);
1295 optee->smc.memremaped_shm = memremaped_shm;
1296 optee->pool = pool;
1297
1298
1299
1300
1301
1302
1303
1304
1305 optee_disable_unmapped_shm_cache(optee);
1306
1307 optee_enable_shm_cache(optee);
1308
1309 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1310 pr_info("dynamic shared memory is enabled\n");
1311
1312 platform_set_drvdata(pdev, optee);
1313
1314 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1315 if (rc) {
1316 optee_smc_remove(pdev);
1317 return rc;
1318 }
1319
1320 pr_info("initialized driver\n");
1321 return 0;
1322err:
1323 if (optee) {
1324
1325
1326
1327
1328
1329 tee_device_unregister(optee->supp_teedev);
1330 tee_device_unregister(optee->teedev);
1331 kfree(optee);
1332 }
1333 if (pool)
1334 tee_shm_pool_free(pool);
1335 if (memremaped_shm)
1336 memunmap(memremaped_shm);
1337 return rc;
1338}
1339
1340static const struct of_device_id optee_dt_match[] = {
1341 { .compatible = "linaro,optee-tz" },
1342 {},
1343};
1344MODULE_DEVICE_TABLE(of, optee_dt_match);
1345
1346static struct platform_driver optee_driver = {
1347 .probe = optee_probe,
1348 .remove = optee_smc_remove,
1349 .shutdown = optee_shutdown,
1350 .driver = {
1351 .name = "optee",
1352 .of_match_table = optee_dt_match,
1353 },
1354};
1355
1356int optee_smc_abi_register(void)
1357{
1358 return platform_driver_register(&optee_driver);
1359}
1360
1361void optee_smc_abi_unregister(void)
1362{
1363 platform_driver_unregister(&optee_driver);
1364}
1365