linux/drivers/tee/optee/call.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Linaro Limited
   3 *
   4 * This software is licensed under the terms of the GNU General Public
   5 * License version 2, as published by the Free Software Foundation, and
   6 * may be copied, distributed, and modified under those terms.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 */
  14#include <linux/arm-smccc.h>
  15#include <linux/device.h>
  16#include <linux/err.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/slab.h>
  20#include <linux/tee_drv.h>
  21#include <linux/types.h>
  22#include <linux/uaccess.h>
  23#include "optee_private.h"
  24#include "optee_smc.h"
  25
  26struct optee_call_waiter {
  27        struct list_head list_node;
  28        struct completion c;
  29};
  30
  31static void optee_cq_wait_init(struct optee_call_queue *cq,
  32                               struct optee_call_waiter *w)
  33{
  34        /*
  35         * We're preparing to make a call to secure world. In case we can't
  36         * allocate a thread in secure world we'll end up waiting in
  37         * optee_cq_wait_for_completion().
  38         *
  39         * Normally if there's no contention in secure world the call will
  40         * complete and we can cleanup directly with optee_cq_wait_final().
  41         */
  42        mutex_lock(&cq->mutex);
  43
  44        /*
  45         * We add ourselves to the queue, but we don't wait. This
  46         * guarantees that we don't lose a completion if secure world
  47         * returns busy and another thread just exited and try to complete
  48         * someone.
  49         */
  50        init_completion(&w->c);
  51        list_add_tail(&w->list_node, &cq->waiters);
  52
  53        mutex_unlock(&cq->mutex);
  54}
  55
  56static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
  57                                         struct optee_call_waiter *w)
  58{
  59        wait_for_completion(&w->c);
  60
  61        mutex_lock(&cq->mutex);
  62
  63        /* Move to end of list to get out of the way for other waiters */
  64        list_del(&w->list_node);
  65        reinit_completion(&w->c);
  66        list_add_tail(&w->list_node, &cq->waiters);
  67
  68        mutex_unlock(&cq->mutex);
  69}
  70
  71static void optee_cq_complete_one(struct optee_call_queue *cq)
  72{
  73        struct optee_call_waiter *w;
  74
  75        list_for_each_entry(w, &cq->waiters, list_node) {
  76                if (!completion_done(&w->c)) {
  77                        complete(&w->c);
  78                        break;
  79                }
  80        }
  81}
  82
  83static void optee_cq_wait_final(struct optee_call_queue *cq,
  84                                struct optee_call_waiter *w)
  85{
  86        /*
  87         * We're done with the call to secure world. The thread in secure
  88         * world that was used for this call is now available for some
  89         * other task to use.
  90         */
  91        mutex_lock(&cq->mutex);
  92
  93        /* Get out of the list */
  94        list_del(&w->list_node);
  95
  96        /* Wake up one eventual waiting task */
  97        optee_cq_complete_one(cq);
  98
  99        /*
 100         * If we're completed we've got a completion from another task that
 101         * was just done with its call to secure world. Since yet another
 102         * thread now is available in secure world wake up another eventual
 103         * waiting task.
 104         */
 105        if (completion_done(&w->c))
 106                optee_cq_complete_one(cq);
 107
 108        mutex_unlock(&cq->mutex);
 109}
 110
 111/* Requires the filpstate mutex to be held */
 112static struct optee_session *find_session(struct optee_context_data *ctxdata,
 113                                          u32 session_id)
 114{
 115        struct optee_session *sess;
 116
 117        list_for_each_entry(sess, &ctxdata->sess_list, list_node)
 118                if (sess->session_id == session_id)
 119                        return sess;
 120
 121        return NULL;
 122}
 123
 124/**
 125 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
 126 * @ctx:        calling context
 127 * @parg:       physical address of message to pass to secure world
 128 *
 129 * Does and SMC to OP-TEE in secure world and handles eventual resulting
 130 * Remote Procedure Calls (RPC) from OP-TEE.
 131 *
 132 * Returns return code from secure world, 0 is OK
 133 */
 134u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
 135{
 136        struct optee *optee = tee_get_drvdata(ctx->teedev);
 137        struct optee_call_waiter w;
 138        struct optee_rpc_param param = { };
 139        struct optee_call_ctx call_ctx = { };
 140        u32 ret;
 141
 142        param.a0 = OPTEE_SMC_CALL_WITH_ARG;
 143        reg_pair_from_64(&param.a1, &param.a2, parg);
 144        /* Initialize waiter */
 145        optee_cq_wait_init(&optee->call_queue, &w);
 146        while (true) {
 147                struct arm_smccc_res res;
 148
 149                optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
 150                                 param.a4, param.a5, param.a6, param.a7,
 151                                 &res);
 152
 153                if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
 154                        /*
 155                         * Out of threads in secure world, wait for a thread
 156                         * become available.
 157                         */
 158                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 159                } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
 160                        param.a0 = res.a0;
 161                        param.a1 = res.a1;
 162                        param.a2 = res.a2;
 163                        param.a3 = res.a3;
 164                        optee_handle_rpc(ctx, &param, &call_ctx);
 165                } else {
 166                        ret = res.a0;
 167                        break;
 168                }
 169        }
 170
 171        optee_rpc_finalize_call(&call_ctx);
 172        /*
 173         * We're done with our thread in secure world, if there's any
 174         * thread waiters wake up one.
 175         */
 176        optee_cq_wait_final(&optee->call_queue, &w);
 177
 178        return ret;
 179}
 180
 181static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
 182                                   struct optee_msg_arg **msg_arg,
 183                                   phys_addr_t *msg_parg)
 184{
 185        int rc;
 186        struct tee_shm *shm;
 187        struct optee_msg_arg *ma;
 188
 189        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
 190                            TEE_SHM_MAPPED);
 191        if (IS_ERR(shm))
 192                return shm;
 193
 194        ma = tee_shm_get_va(shm, 0);
 195        if (IS_ERR(ma)) {
 196                rc = PTR_ERR(ma);
 197                goto out;
 198        }
 199
 200        rc = tee_shm_get_pa(shm, 0, msg_parg);
 201        if (rc)
 202                goto out;
 203
 204        memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
 205        ma->num_params = num_params;
 206        *msg_arg = ma;
 207out:
 208        if (rc) {
 209                tee_shm_free(shm);
 210                return ERR_PTR(rc);
 211        }
 212
 213        return shm;
 214}
 215
 216int optee_open_session(struct tee_context *ctx,
 217                       struct tee_ioctl_open_session_arg *arg,
 218                       struct tee_param *param)
 219{
 220        struct optee_context_data *ctxdata = ctx->data;
 221        int rc;
 222        struct tee_shm *shm;
 223        struct optee_msg_arg *msg_arg;
 224        phys_addr_t msg_parg;
 225        struct optee_session *sess = NULL;
 226
 227        /* +2 for the meta parameters added below */
 228        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
 229        if (IS_ERR(shm))
 230                return PTR_ERR(shm);
 231
 232        msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
 233        msg_arg->cancel_id = arg->cancel_id;
 234
 235        /*
 236         * Initialize and add the meta parameters needed when opening a
 237         * session.
 238         */
 239        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 240                                  OPTEE_MSG_ATTR_META;
 241        msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 242                                  OPTEE_MSG_ATTR_META;
 243        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
 244        memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
 245        msg_arg->params[1].u.value.c = arg->clnt_login;
 246
 247        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
 248        if (rc)
 249                goto out;
 250
 251        sess = kzalloc(sizeof(*sess), GFP_KERNEL);
 252        if (!sess) {
 253                rc = -ENOMEM;
 254                goto out;
 255        }
 256
 257        if (optee_do_call_with_arg(ctx, msg_parg)) {
 258                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 259                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 260        }
 261
 262        if (msg_arg->ret == TEEC_SUCCESS) {
 263                /* A new session has been created, add it to the list. */
 264                sess->session_id = msg_arg->session;
 265                mutex_lock(&ctxdata->mutex);
 266                list_add(&sess->list_node, &ctxdata->sess_list);
 267                mutex_unlock(&ctxdata->mutex);
 268        } else {
 269                kfree(sess);
 270        }
 271
 272        if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
 273                arg->ret = TEEC_ERROR_COMMUNICATION;
 274                arg->ret_origin = TEEC_ORIGIN_COMMS;
 275                /* Close session again to avoid leakage */
 276                optee_close_session(ctx, msg_arg->session);
 277        } else {
 278                arg->session = msg_arg->session;
 279                arg->ret = msg_arg->ret;
 280                arg->ret_origin = msg_arg->ret_origin;
 281        }
 282out:
 283        tee_shm_free(shm);
 284
 285        return rc;
 286}
 287
 288int optee_close_session(struct tee_context *ctx, u32 session)
 289{
 290        struct optee_context_data *ctxdata = ctx->data;
 291        struct tee_shm *shm;
 292        struct optee_msg_arg *msg_arg;
 293        phys_addr_t msg_parg;
 294        struct optee_session *sess;
 295
 296        /* Check that the session is valid and remove it from the list */
 297        mutex_lock(&ctxdata->mutex);
 298        sess = find_session(ctxdata, session);
 299        if (sess)
 300                list_del(&sess->list_node);
 301        mutex_unlock(&ctxdata->mutex);
 302        if (!sess)
 303                return -EINVAL;
 304        kfree(sess);
 305
 306        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 307        if (IS_ERR(shm))
 308                return PTR_ERR(shm);
 309
 310        msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
 311        msg_arg->session = session;
 312        optee_do_call_with_arg(ctx, msg_parg);
 313
 314        tee_shm_free(shm);
 315        return 0;
 316}
 317
 318int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
 319                      struct tee_param *param)
 320{
 321        struct optee_context_data *ctxdata = ctx->data;
 322        struct tee_shm *shm;
 323        struct optee_msg_arg *msg_arg;
 324        phys_addr_t msg_parg;
 325        struct optee_session *sess;
 326        int rc;
 327
 328        /* Check that the session is valid */
 329        mutex_lock(&ctxdata->mutex);
 330        sess = find_session(ctxdata, arg->session);
 331        mutex_unlock(&ctxdata->mutex);
 332        if (!sess)
 333                return -EINVAL;
 334
 335        shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
 336        if (IS_ERR(shm))
 337                return PTR_ERR(shm);
 338        msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
 339        msg_arg->func = arg->func;
 340        msg_arg->session = arg->session;
 341        msg_arg->cancel_id = arg->cancel_id;
 342
 343        rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
 344        if (rc)
 345                goto out;
 346
 347        if (optee_do_call_with_arg(ctx, msg_parg)) {
 348                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 349                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 350        }
 351
 352        if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
 353                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 354                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 355        }
 356
 357        arg->ret = msg_arg->ret;
 358        arg->ret_origin = msg_arg->ret_origin;
 359out:
 360        tee_shm_free(shm);
 361        return rc;
 362}
 363
 364int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
 365{
 366        struct optee_context_data *ctxdata = ctx->data;
 367        struct tee_shm *shm;
 368        struct optee_msg_arg *msg_arg;
 369        phys_addr_t msg_parg;
 370        struct optee_session *sess;
 371
 372        /* Check that the session is valid */
 373        mutex_lock(&ctxdata->mutex);
 374        sess = find_session(ctxdata, session);
 375        mutex_unlock(&ctxdata->mutex);
 376        if (!sess)
 377                return -EINVAL;
 378
 379        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 380        if (IS_ERR(shm))
 381                return PTR_ERR(shm);
 382
 383        msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
 384        msg_arg->session = session;
 385        msg_arg->cancel_id = cancel_id;
 386        optee_do_call_with_arg(ctx, msg_parg);
 387
 388        tee_shm_free(shm);
 389        return 0;
 390}
 391
 392/**
 393 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
 394 *                            in OP-TEE
 395 * @optee:      main service struct
 396 */
 397void optee_enable_shm_cache(struct optee *optee)
 398{
 399        struct optee_call_waiter w;
 400
 401        /* We need to retry until secure world isn't busy. */
 402        optee_cq_wait_init(&optee->call_queue, &w);
 403        while (true) {
 404                struct arm_smccc_res res;
 405
 406                optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 407                                 0, &res);
 408                if (res.a0 == OPTEE_SMC_RETURN_OK)
 409                        break;
 410                optee_cq_wait_for_completion(&optee->call_queue, &w);
 411        }
 412        optee_cq_wait_final(&optee->call_queue, &w);
 413}
 414
 415/**
 416 * optee_disable_shm_cache() - Disables caching of some shared memory allocation
 417 *                            in OP-TEE
 418 * @optee:      main service struct
 419 */
 420void optee_disable_shm_cache(struct optee *optee)
 421{
 422        struct optee_call_waiter w;
 423
 424        /* We need to retry until secure world isn't busy. */
 425        optee_cq_wait_init(&optee->call_queue, &w);
 426        while (true) {
 427                union {
 428                        struct arm_smccc_res smccc;
 429                        struct optee_smc_disable_shm_cache_result result;
 430                } res;
 431
 432                optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 433                                 0, &res.smccc);
 434                if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
 435                        break; /* All shm's freed */
 436                if (res.result.status == OPTEE_SMC_RETURN_OK) {
 437                        struct tee_shm *shm;
 438
 439                        shm = reg_pair_to_ptr(res.result.shm_upper32,
 440                                              res.result.shm_lower32);
 441                        tee_shm_free(shm);
 442                } else {
 443                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 444                }
 445        }
 446        optee_cq_wait_final(&optee->call_queue, &w);
 447}
 448
 449#define PAGELIST_ENTRIES_PER_PAGE                               \
 450        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 451
 452/**
 453 * optee_fill_pages_list() - write list of user pages to given shared
 454 * buffer.
 455 *
 456 * @dst: page-aligned buffer where list of pages will be stored
 457 * @pages: array of pages that represents shared buffer
 458 * @num_pages: number of entries in @pages
 459 * @page_offset: offset of user buffer from page start
 460 *
 461 * @dst should be big enough to hold list of user page addresses and
 462 *      links to the next pages of buffer
 463 */
 464void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 465                           size_t page_offset)
 466{
 467        int n = 0;
 468        phys_addr_t optee_page;
 469        /*
 470         * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
 471         * for details.
 472         */
 473        struct {
 474                u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
 475                u64 next_page_data;
 476        } *pages_data;
 477
 478        /*
 479         * Currently OP-TEE uses 4k page size and it does not looks
 480         * like this will change in the future.  On other hand, there are
 481         * no know ARM architectures with page size < 4k.
 482         * Thus the next built assert looks redundant. But the following
 483         * code heavily relies on this assumption, so it is better be
 484         * safe than sorry.
 485         */
 486        BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 487
 488        pages_data = (void *)dst;
 489        /*
 490         * If linux page is bigger than 4k, and user buffer offset is
 491         * larger than 4k/8k/12k/etc this will skip first 4k pages,
 492         * because they bear no value data for OP-TEE.
 493         */
 494        optee_page = page_to_phys(*pages) +
 495                round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 496
 497        while (true) {
 498                pages_data->pages_list[n++] = optee_page;
 499
 500                if (n == PAGELIST_ENTRIES_PER_PAGE) {
 501                        pages_data->next_page_data =
 502                                virt_to_phys(pages_data + 1);
 503                        pages_data++;
 504                        n = 0;
 505                }
 506
 507                optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 508                if (!(optee_page & ~PAGE_MASK)) {
 509                        if (!--num_pages)
 510                                break;
 511                        pages++;
 512                        optee_page = page_to_phys(*pages);
 513                }
 514        }
 515}
 516
 517/*
 518 * The final entry in each pagelist page is a pointer to the next
 519 * pagelist page.
 520 */
 521static size_t get_pages_list_size(size_t num_entries)
 522{
 523        int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
 524
 525        return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 526}
 527
 528u64 *optee_allocate_pages_list(size_t num_entries)
 529{
 530        return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
 531}
 532
 533void optee_free_pages_list(void *list, size_t num_entries)
 534{
 535        free_pages_exact(list, get_pages_list_size(num_entries));
 536}
 537
 538static bool is_normal_memory(pgprot_t p)
 539{
 540#if defined(CONFIG_ARM)
 541        return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
 542#elif defined(CONFIG_ARM64)
 543        return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
 544#else
 545#error "Unuspported architecture"
 546#endif
 547}
 548
 549static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
 550{
 551        while (vma && is_normal_memory(vma->vm_page_prot)) {
 552                if (vma->vm_end >= end)
 553                        return 0;
 554                vma = vma->vm_next;
 555        }
 556
 557        return -EINVAL;
 558}
 559
 560static int check_mem_type(unsigned long start, size_t num_pages)
 561{
 562        struct mm_struct *mm = current->mm;
 563        int rc;
 564
 565        down_read(&mm->mmap_sem);
 566        rc = __check_mem_type(find_vma(mm, start),
 567                              start + num_pages * PAGE_SIZE);
 568        up_read(&mm->mmap_sem);
 569
 570        return rc;
 571}
 572
 573int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
 574                       struct page **pages, size_t num_pages,
 575                       unsigned long start)
 576{
 577        struct tee_shm *shm_arg = NULL;
 578        struct optee_msg_arg *msg_arg;
 579        u64 *pages_list;
 580        phys_addr_t msg_parg;
 581        int rc;
 582
 583        if (!num_pages)
 584                return -EINVAL;
 585
 586        rc = check_mem_type(start, num_pages);
 587        if (rc)
 588                return rc;
 589
 590        pages_list = optee_allocate_pages_list(num_pages);
 591        if (!pages_list)
 592                return -ENOMEM;
 593
 594        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 595        if (IS_ERR(shm_arg)) {
 596                rc = PTR_ERR(shm_arg);
 597                goto out;
 598        }
 599
 600        optee_fill_pages_list(pages_list, pages, num_pages,
 601                              tee_shm_get_page_offset(shm));
 602
 603        msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
 604        msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
 605                                OPTEE_MSG_ATTR_NONCONTIG;
 606        msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
 607        msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
 608        /*
 609         * In the least bits of msg_arg->params->u.tmem.buf_ptr we
 610         * store buffer offset from 4k page, as described in OP-TEE ABI.
 611         */
 612        msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
 613          (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 614
 615        if (optee_do_call_with_arg(ctx, msg_parg) ||
 616            msg_arg->ret != TEEC_SUCCESS)
 617                rc = -EINVAL;
 618
 619        tee_shm_free(shm_arg);
 620out:
 621        optee_free_pages_list(pages_list, num_pages);
 622        return rc;
 623}
 624
 625int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
 626{
 627        struct tee_shm *shm_arg;
 628        struct optee_msg_arg *msg_arg;
 629        phys_addr_t msg_parg;
 630        int rc = 0;
 631
 632        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 633        if (IS_ERR(shm_arg))
 634                return PTR_ERR(shm_arg);
 635
 636        msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
 637
 638        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 639        msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
 640
 641        if (optee_do_call_with_arg(ctx, msg_parg) ||
 642            msg_arg->ret != TEEC_SUCCESS)
 643                rc = -EINVAL;
 644        tee_shm_free(shm_arg);
 645        return rc;
 646}
 647
 648int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
 649                            struct page **pages, size_t num_pages,
 650                            unsigned long start)
 651{
 652        /*
 653         * We don't want to register supplicant memory in OP-TEE.
 654         * Instead information about it will be passed in RPC code.
 655         */
 656        return check_mem_type(start, num_pages);
 657}
 658
 659int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
 660{
 661        return 0;
 662}
 663