linux/drivers/tee/optee/call.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, Linaro Limited
   4 */
   5#include <linux/arm-smccc.h>
   6#include <linux/device.h>
   7#include <linux/err.h>
   8#include <linux/errno.h>
   9#include <linux/mm.h>
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/tee_drv.h>
  13#include <linux/types.h>
  14#include <linux/uaccess.h>
  15#include "optee_private.h"
  16#include "optee_smc.h"
  17
  18struct optee_call_waiter {
  19        struct list_head list_node;
  20        struct completion c;
  21};
  22
  23static void optee_cq_wait_init(struct optee_call_queue *cq,
  24                               struct optee_call_waiter *w)
  25{
  26        /*
  27         * We're preparing to make a call to secure world. In case we can't
  28         * allocate a thread in secure world we'll end up waiting in
  29         * optee_cq_wait_for_completion().
  30         *
  31         * Normally if there's no contention in secure world the call will
  32         * complete and we can cleanup directly with optee_cq_wait_final().
  33         */
  34        mutex_lock(&cq->mutex);
  35
  36        /*
  37         * We add ourselves to the queue, but we don't wait. This
  38         * guarantees that we don't lose a completion if secure world
  39         * returns busy and another thread just exited and try to complete
  40         * someone.
  41         */
  42        init_completion(&w->c);
  43        list_add_tail(&w->list_node, &cq->waiters);
  44
  45        mutex_unlock(&cq->mutex);
  46}
  47
  48static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
  49                                         struct optee_call_waiter *w)
  50{
  51        wait_for_completion(&w->c);
  52
  53        mutex_lock(&cq->mutex);
  54
  55        /* Move to end of list to get out of the way for other waiters */
  56        list_del(&w->list_node);
  57        reinit_completion(&w->c);
  58        list_add_tail(&w->list_node, &cq->waiters);
  59
  60        mutex_unlock(&cq->mutex);
  61}
  62
  63static void optee_cq_complete_one(struct optee_call_queue *cq)
  64{
  65        struct optee_call_waiter *w;
  66
  67        list_for_each_entry(w, &cq->waiters, list_node) {
  68                if (!completion_done(&w->c)) {
  69                        complete(&w->c);
  70                        break;
  71                }
  72        }
  73}
  74
  75static void optee_cq_wait_final(struct optee_call_queue *cq,
  76                                struct optee_call_waiter *w)
  77{
  78        /*
  79         * We're done with the call to secure world. The thread in secure
  80         * world that was used for this call is now available for some
  81         * other task to use.
  82         */
  83        mutex_lock(&cq->mutex);
  84
  85        /* Get out of the list */
  86        list_del(&w->list_node);
  87
  88        /* Wake up one eventual waiting task */
  89        optee_cq_complete_one(cq);
  90
  91        /*
  92         * If we're completed we've got a completion from another task that
  93         * was just done with its call to secure world. Since yet another
  94         * thread now is available in secure world wake up another eventual
  95         * waiting task.
  96         */
  97        if (completion_done(&w->c))
  98                optee_cq_complete_one(cq);
  99
 100        mutex_unlock(&cq->mutex);
 101}
 102
 103/* Requires the filpstate mutex to be held */
 104static struct optee_session *find_session(struct optee_context_data *ctxdata,
 105                                          u32 session_id)
 106{
 107        struct optee_session *sess;
 108
 109        list_for_each_entry(sess, &ctxdata->sess_list, list_node)
 110                if (sess->session_id == session_id)
 111                        return sess;
 112
 113        return NULL;
 114}
 115
 116/**
 117 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
 118 * @ctx:        calling context
 119 * @parg:       physical address of message to pass to secure world
 120 *
 121 * Does and SMC to OP-TEE in secure world and handles eventual resulting
 122 * Remote Procedure Calls (RPC) from OP-TEE.
 123 *
 124 * Returns return code from secure world, 0 is OK
 125 */
 126u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
 127{
 128        struct optee *optee = tee_get_drvdata(ctx->teedev);
 129        struct optee_call_waiter w;
 130        struct optee_rpc_param param = { };
 131        struct optee_call_ctx call_ctx = { };
 132        u32 ret;
 133
 134        param.a0 = OPTEE_SMC_CALL_WITH_ARG;
 135        reg_pair_from_64(&param.a1, &param.a2, parg);
 136        /* Initialize waiter */
 137        optee_cq_wait_init(&optee->call_queue, &w);
 138        while (true) {
 139                struct arm_smccc_res res;
 140
 141                optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
 142                                 param.a4, param.a5, param.a6, param.a7,
 143                                 &res);
 144
 145                if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
 146                        /*
 147                         * Out of threads in secure world, wait for a thread
 148                         * become available.
 149                         */
 150                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 151                } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
 152                        cond_resched();
 153                        param.a0 = res.a0;
 154                        param.a1 = res.a1;
 155                        param.a2 = res.a2;
 156                        param.a3 = res.a3;
 157                        optee_handle_rpc(ctx, &param, &call_ctx);
 158                } else {
 159                        ret = res.a0;
 160                        break;
 161                }
 162        }
 163
 164        optee_rpc_finalize_call(&call_ctx);
 165        /*
 166         * We're done with our thread in secure world, if there's any
 167         * thread waiters wake up one.
 168         */
 169        optee_cq_wait_final(&optee->call_queue, &w);
 170
 171        return ret;
 172}
 173
 174static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
 175                                   struct optee_msg_arg **msg_arg,
 176                                   phys_addr_t *msg_parg)
 177{
 178        int rc;
 179        struct tee_shm *shm;
 180        struct optee_msg_arg *ma;
 181
 182        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
 183                            TEE_SHM_MAPPED);
 184        if (IS_ERR(shm))
 185                return shm;
 186
 187        ma = tee_shm_get_va(shm, 0);
 188        if (IS_ERR(ma)) {
 189                rc = PTR_ERR(ma);
 190                goto out;
 191        }
 192
 193        rc = tee_shm_get_pa(shm, 0, msg_parg);
 194        if (rc)
 195                goto out;
 196
 197        memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
 198        ma->num_params = num_params;
 199        *msg_arg = ma;
 200out:
 201        if (rc) {
 202                tee_shm_free(shm);
 203                return ERR_PTR(rc);
 204        }
 205
 206        return shm;
 207}
 208
 209int optee_open_session(struct tee_context *ctx,
 210                       struct tee_ioctl_open_session_arg *arg,
 211                       struct tee_param *param)
 212{
 213        struct optee_context_data *ctxdata = ctx->data;
 214        int rc;
 215        struct tee_shm *shm;
 216        struct optee_msg_arg *msg_arg;
 217        phys_addr_t msg_parg;
 218        struct optee_session *sess = NULL;
 219
 220        /* +2 for the meta parameters added below */
 221        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
 222        if (IS_ERR(shm))
 223                return PTR_ERR(shm);
 224
 225        msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
 226        msg_arg->cancel_id = arg->cancel_id;
 227
 228        /*
 229         * Initialize and add the meta parameters needed when opening a
 230         * session.
 231         */
 232        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 233                                  OPTEE_MSG_ATTR_META;
 234        msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 235                                  OPTEE_MSG_ATTR_META;
 236        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
 237        msg_arg->params[1].u.value.c = arg->clnt_login;
 238
 239        rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
 240                                          arg->clnt_login, arg->clnt_uuid);
 241        if (rc)
 242                goto out;
 243
 244        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
 245        if (rc)
 246                goto out;
 247
 248        sess = kzalloc(sizeof(*sess), GFP_KERNEL);
 249        if (!sess) {
 250                rc = -ENOMEM;
 251                goto out;
 252        }
 253
 254        if (optee_do_call_with_arg(ctx, msg_parg)) {
 255                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 256                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 257        }
 258
 259        if (msg_arg->ret == TEEC_SUCCESS) {
 260                /* A new session has been created, add it to the list. */
 261                sess->session_id = msg_arg->session;
 262                mutex_lock(&ctxdata->mutex);
 263                list_add(&sess->list_node, &ctxdata->sess_list);
 264                mutex_unlock(&ctxdata->mutex);
 265        } else {
 266                kfree(sess);
 267        }
 268
 269        if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
 270                arg->ret = TEEC_ERROR_COMMUNICATION;
 271                arg->ret_origin = TEEC_ORIGIN_COMMS;
 272                /* Close session again to avoid leakage */
 273                optee_close_session(ctx, msg_arg->session);
 274        } else {
 275                arg->session = msg_arg->session;
 276                arg->ret = msg_arg->ret;
 277                arg->ret_origin = msg_arg->ret_origin;
 278        }
 279out:
 280        tee_shm_free(shm);
 281
 282        return rc;
 283}
 284
 285int optee_close_session(struct tee_context *ctx, u32 session)
 286{
 287        struct optee_context_data *ctxdata = ctx->data;
 288        struct tee_shm *shm;
 289        struct optee_msg_arg *msg_arg;
 290        phys_addr_t msg_parg;
 291        struct optee_session *sess;
 292
 293        /* Check that the session is valid and remove it from the list */
 294        mutex_lock(&ctxdata->mutex);
 295        sess = find_session(ctxdata, session);
 296        if (sess)
 297                list_del(&sess->list_node);
 298        mutex_unlock(&ctxdata->mutex);
 299        if (!sess)
 300                return -EINVAL;
 301        kfree(sess);
 302
 303        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 304        if (IS_ERR(shm))
 305                return PTR_ERR(shm);
 306
 307        msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
 308        msg_arg->session = session;
 309        optee_do_call_with_arg(ctx, msg_parg);
 310
 311        tee_shm_free(shm);
 312        return 0;
 313}
 314
 315int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
 316                      struct tee_param *param)
 317{
 318        struct optee_context_data *ctxdata = ctx->data;
 319        struct tee_shm *shm;
 320        struct optee_msg_arg *msg_arg;
 321        phys_addr_t msg_parg;
 322        struct optee_session *sess;
 323        int rc;
 324
 325        /* Check that the session is valid */
 326        mutex_lock(&ctxdata->mutex);
 327        sess = find_session(ctxdata, arg->session);
 328        mutex_unlock(&ctxdata->mutex);
 329        if (!sess)
 330                return -EINVAL;
 331
 332        shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
 333        if (IS_ERR(shm))
 334                return PTR_ERR(shm);
 335        msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
 336        msg_arg->func = arg->func;
 337        msg_arg->session = arg->session;
 338        msg_arg->cancel_id = arg->cancel_id;
 339
 340        rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
 341        if (rc)
 342                goto out;
 343
 344        if (optee_do_call_with_arg(ctx, msg_parg)) {
 345                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 346                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 347        }
 348
 349        if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
 350                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 351                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 352        }
 353
 354        arg->ret = msg_arg->ret;
 355        arg->ret_origin = msg_arg->ret_origin;
 356out:
 357        tee_shm_free(shm);
 358        return rc;
 359}
 360
 361int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
 362{
 363        struct optee_context_data *ctxdata = ctx->data;
 364        struct tee_shm *shm;
 365        struct optee_msg_arg *msg_arg;
 366        phys_addr_t msg_parg;
 367        struct optee_session *sess;
 368
 369        /* Check that the session is valid */
 370        mutex_lock(&ctxdata->mutex);
 371        sess = find_session(ctxdata, session);
 372        mutex_unlock(&ctxdata->mutex);
 373        if (!sess)
 374                return -EINVAL;
 375
 376        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 377        if (IS_ERR(shm))
 378                return PTR_ERR(shm);
 379
 380        msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
 381        msg_arg->session = session;
 382        msg_arg->cancel_id = cancel_id;
 383        optee_do_call_with_arg(ctx, msg_parg);
 384
 385        tee_shm_free(shm);
 386        return 0;
 387}
 388
 389/**
 390 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
 391 *                            in OP-TEE
 392 * @optee:      main service struct
 393 */
 394void optee_enable_shm_cache(struct optee *optee)
 395{
 396        struct optee_call_waiter w;
 397
 398        /* We need to retry until secure world isn't busy. */
 399        optee_cq_wait_init(&optee->call_queue, &w);
 400        while (true) {
 401                struct arm_smccc_res res;
 402
 403                optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 404                                 0, &res);
 405                if (res.a0 == OPTEE_SMC_RETURN_OK)
 406                        break;
 407                optee_cq_wait_for_completion(&optee->call_queue, &w);
 408        }
 409        optee_cq_wait_final(&optee->call_queue, &w);
 410}
 411
 412/**
 413 * optee_disable_shm_cache() - Disables caching of some shared memory allocation
 414 *                            in OP-TEE
 415 * @optee:      main service struct
 416 */
 417void optee_disable_shm_cache(struct optee *optee)
 418{
 419        struct optee_call_waiter w;
 420
 421        /* We need to retry until secure world isn't busy. */
 422        optee_cq_wait_init(&optee->call_queue, &w);
 423        while (true) {
 424                union {
 425                        struct arm_smccc_res smccc;
 426                        struct optee_smc_disable_shm_cache_result result;
 427                } res;
 428
 429                optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 430                                 0, &res.smccc);
 431                if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
 432                        break; /* All shm's freed */
 433                if (res.result.status == OPTEE_SMC_RETURN_OK) {
 434                        struct tee_shm *shm;
 435
 436                        shm = reg_pair_to_ptr(res.result.shm_upper32,
 437                                              res.result.shm_lower32);
 438                        tee_shm_free(shm);
 439                } else {
 440                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 441                }
 442        }
 443        optee_cq_wait_final(&optee->call_queue, &w);
 444}
 445
 446#define PAGELIST_ENTRIES_PER_PAGE                               \
 447        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 448
 449/**
 450 * optee_fill_pages_list() - write list of user pages to given shared
 451 * buffer.
 452 *
 453 * @dst: page-aligned buffer where list of pages will be stored
 454 * @pages: array of pages that represents shared buffer
 455 * @num_pages: number of entries in @pages
 456 * @page_offset: offset of user buffer from page start
 457 *
 458 * @dst should be big enough to hold list of user page addresses and
 459 *      links to the next pages of buffer
 460 */
 461void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 462                           size_t page_offset)
 463{
 464        int n = 0;
 465        phys_addr_t optee_page;
 466        /*
 467         * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
 468         * for details.
 469         */
 470        struct {
 471                u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
 472                u64 next_page_data;
 473        } *pages_data;
 474
 475        /*
 476         * Currently OP-TEE uses 4k page size and it does not looks
 477         * like this will change in the future.  On other hand, there are
 478         * no know ARM architectures with page size < 4k.
 479         * Thus the next built assert looks redundant. But the following
 480         * code heavily relies on this assumption, so it is better be
 481         * safe than sorry.
 482         */
 483        BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 484
 485        pages_data = (void *)dst;
 486        /*
 487         * If linux page is bigger than 4k, and user buffer offset is
 488         * larger than 4k/8k/12k/etc this will skip first 4k pages,
 489         * because they bear no value data for OP-TEE.
 490         */
 491        optee_page = page_to_phys(*pages) +
 492                round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 493
 494        while (true) {
 495                pages_data->pages_list[n++] = optee_page;
 496
 497                if (n == PAGELIST_ENTRIES_PER_PAGE) {
 498                        pages_data->next_page_data =
 499                                virt_to_phys(pages_data + 1);
 500                        pages_data++;
 501                        n = 0;
 502                }
 503
 504                optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 505                if (!(optee_page & ~PAGE_MASK)) {
 506                        if (!--num_pages)
 507                                break;
 508                        pages++;
 509                        optee_page = page_to_phys(*pages);
 510                }
 511        }
 512}
 513
 514/*
 515 * The final entry in each pagelist page is a pointer to the next
 516 * pagelist page.
 517 */
 518static size_t get_pages_list_size(size_t num_entries)
 519{
 520        int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
 521
 522        return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 523}
 524
 525u64 *optee_allocate_pages_list(size_t num_entries)
 526{
 527        return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
 528}
 529
 530void optee_free_pages_list(void *list, size_t num_entries)
 531{
 532        free_pages_exact(list, get_pages_list_size(num_entries));
 533}
 534
 535static bool is_normal_memory(pgprot_t p)
 536{
 537#if defined(CONFIG_ARM)
 538        return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
 539                ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
 540#elif defined(CONFIG_ARM64)
 541        return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
 542#else
 543#error "Unuspported architecture"
 544#endif
 545}
 546
 547static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
 548{
 549        while (vma && is_normal_memory(vma->vm_page_prot)) {
 550                if (vma->vm_end >= end)
 551                        return 0;
 552                vma = vma->vm_next;
 553        }
 554
 555        return -EINVAL;
 556}
 557
 558static int check_mem_type(unsigned long start, size_t num_pages)
 559{
 560        struct mm_struct *mm = current->mm;
 561        int rc;
 562
 563        /*
 564         * Allow kernel address to register with OP-TEE as kernel
 565         * pages are configured as normal memory only.
 566         */
 567        if (virt_addr_valid(start))
 568                return 0;
 569
 570        mmap_read_lock(mm);
 571        rc = __check_mem_type(find_vma(mm, start),
 572                              start + num_pages * PAGE_SIZE);
 573        mmap_read_unlock(mm);
 574
 575        return rc;
 576}
 577
 578int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
 579                       struct page **pages, size_t num_pages,
 580                       unsigned long start)
 581{
 582        struct tee_shm *shm_arg = NULL;
 583        struct optee_msg_arg *msg_arg;
 584        u64 *pages_list;
 585        phys_addr_t msg_parg;
 586        int rc;
 587
 588        if (!num_pages)
 589                return -EINVAL;
 590
 591        rc = check_mem_type(start, num_pages);
 592        if (rc)
 593                return rc;
 594
 595        pages_list = optee_allocate_pages_list(num_pages);
 596        if (!pages_list)
 597                return -ENOMEM;
 598
 599        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 600        if (IS_ERR(shm_arg)) {
 601                rc = PTR_ERR(shm_arg);
 602                goto out;
 603        }
 604
 605        optee_fill_pages_list(pages_list, pages, num_pages,
 606                              tee_shm_get_page_offset(shm));
 607
 608        msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
 609        msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
 610                                OPTEE_MSG_ATTR_NONCONTIG;
 611        msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
 612        msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
 613        /*
 614         * In the least bits of msg_arg->params->u.tmem.buf_ptr we
 615         * store buffer offset from 4k page, as described in OP-TEE ABI.
 616         */
 617        msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
 618          (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 619
 620        if (optee_do_call_with_arg(ctx, msg_parg) ||
 621            msg_arg->ret != TEEC_SUCCESS)
 622                rc = -EINVAL;
 623
 624        tee_shm_free(shm_arg);
 625out:
 626        optee_free_pages_list(pages_list, num_pages);
 627        return rc;
 628}
 629
 630int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
 631{
 632        struct tee_shm *shm_arg;
 633        struct optee_msg_arg *msg_arg;
 634        phys_addr_t msg_parg;
 635        int rc = 0;
 636
 637        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 638        if (IS_ERR(shm_arg))
 639                return PTR_ERR(shm_arg);
 640
 641        msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
 642
 643        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 644        msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
 645
 646        if (optee_do_call_with_arg(ctx, msg_parg) ||
 647            msg_arg->ret != TEEC_SUCCESS)
 648                rc = -EINVAL;
 649        tee_shm_free(shm_arg);
 650        return rc;
 651}
 652
 653int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
 654                            struct page **pages, size_t num_pages,
 655                            unsigned long start)
 656{
 657        /*
 658         * We don't want to register supplicant memory in OP-TEE.
 659         * Instead information about it will be passed in RPC code.
 660         */
 661        return check_mem_type(start, num_pages);
 662}
 663
 664int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
 665{
 666        return 0;
 667}
 668