linux/drivers/tee/optee/call.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, Linaro Limited
   4 */
   5#include <linux/arm-smccc.h>
   6#include <linux/device.h>
   7#include <linux/err.h>
   8#include <linux/errno.h>
   9#include <linux/mm.h>
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/tee_drv.h>
  13#include <linux/types.h>
  14#include <linux/uaccess.h>
  15#include "optee_private.h"
  16#include "optee_smc.h"
  17#define CREATE_TRACE_POINTS
  18#include "optee_trace.h"
  19
  20struct optee_call_waiter {
  21        struct list_head list_node;
  22        struct completion c;
  23};
  24
  25static void optee_cq_wait_init(struct optee_call_queue *cq,
  26                               struct optee_call_waiter *w)
  27{
  28        /*
  29         * We're preparing to make a call to secure world. In case we can't
  30         * allocate a thread in secure world we'll end up waiting in
  31         * optee_cq_wait_for_completion().
  32         *
  33         * Normally if there's no contention in secure world the call will
  34         * complete and we can cleanup directly with optee_cq_wait_final().
  35         */
  36        mutex_lock(&cq->mutex);
  37
  38        /*
  39         * We add ourselves to the queue, but we don't wait. This
  40         * guarantees that we don't lose a completion if secure world
  41         * returns busy and another thread just exited and try to complete
  42         * someone.
  43         */
  44        init_completion(&w->c);
  45        list_add_tail(&w->list_node, &cq->waiters);
  46
  47        mutex_unlock(&cq->mutex);
  48}
  49
  50static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
  51                                         struct optee_call_waiter *w)
  52{
  53        wait_for_completion(&w->c);
  54
  55        mutex_lock(&cq->mutex);
  56
  57        /* Move to end of list to get out of the way for other waiters */
  58        list_del(&w->list_node);
  59        reinit_completion(&w->c);
  60        list_add_tail(&w->list_node, &cq->waiters);
  61
  62        mutex_unlock(&cq->mutex);
  63}
  64
  65static void optee_cq_complete_one(struct optee_call_queue *cq)
  66{
  67        struct optee_call_waiter *w;
  68
  69        list_for_each_entry(w, &cq->waiters, list_node) {
  70                if (!completion_done(&w->c)) {
  71                        complete(&w->c);
  72                        break;
  73                }
  74        }
  75}
  76
  77static void optee_cq_wait_final(struct optee_call_queue *cq,
  78                                struct optee_call_waiter *w)
  79{
  80        /*
  81         * We're done with the call to secure world. The thread in secure
  82         * world that was used for this call is now available for some
  83         * other task to use.
  84         */
  85        mutex_lock(&cq->mutex);
  86
  87        /* Get out of the list */
  88        list_del(&w->list_node);
  89
  90        /* Wake up one eventual waiting task */
  91        optee_cq_complete_one(cq);
  92
  93        /*
  94         * If we're completed we've got a completion from another task that
  95         * was just done with its call to secure world. Since yet another
  96         * thread now is available in secure world wake up another eventual
  97         * waiting task.
  98         */
  99        if (completion_done(&w->c))
 100                optee_cq_complete_one(cq);
 101
 102        mutex_unlock(&cq->mutex);
 103}
 104
 105/* Requires the filpstate mutex to be held */
 106static struct optee_session *find_session(struct optee_context_data *ctxdata,
 107                                          u32 session_id)
 108{
 109        struct optee_session *sess;
 110
 111        list_for_each_entry(sess, &ctxdata->sess_list, list_node)
 112                if (sess->session_id == session_id)
 113                        return sess;
 114
 115        return NULL;
 116}
 117
 118/**
 119 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
 120 * @ctx:        calling context
 121 * @parg:       physical address of message to pass to secure world
 122 *
 123 * Does and SMC to OP-TEE in secure world and handles eventual resulting
 124 * Remote Procedure Calls (RPC) from OP-TEE.
 125 *
 126 * Returns return code from secure world, 0 is OK
 127 */
 128u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
 129{
 130        struct optee *optee = tee_get_drvdata(ctx->teedev);
 131        struct optee_call_waiter w;
 132        struct optee_rpc_param param = { };
 133        struct optee_call_ctx call_ctx = { };
 134        u32 ret;
 135
 136        param.a0 = OPTEE_SMC_CALL_WITH_ARG;
 137        reg_pair_from_64(&param.a1, &param.a2, parg);
 138        /* Initialize waiter */
 139        optee_cq_wait_init(&optee->call_queue, &w);
 140        while (true) {
 141                struct arm_smccc_res res;
 142
 143                trace_optee_invoke_fn_begin(&param);
 144                optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
 145                                 param.a4, param.a5, param.a6, param.a7,
 146                                 &res);
 147                trace_optee_invoke_fn_end(&param, &res);
 148
 149                if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
 150                        /*
 151                         * Out of threads in secure world, wait for a thread
 152                         * become available.
 153                         */
 154                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 155                } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
 156                        cond_resched();
 157                        param.a0 = res.a0;
 158                        param.a1 = res.a1;
 159                        param.a2 = res.a2;
 160                        param.a3 = res.a3;
 161                        optee_handle_rpc(ctx, &param, &call_ctx);
 162                } else {
 163                        ret = res.a0;
 164                        break;
 165                }
 166        }
 167
 168        optee_rpc_finalize_call(&call_ctx);
 169        /*
 170         * We're done with our thread in secure world, if there's any
 171         * thread waiters wake up one.
 172         */
 173        optee_cq_wait_final(&optee->call_queue, &w);
 174
 175        return ret;
 176}
 177
 178static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
 179                                   struct optee_msg_arg **msg_arg,
 180                                   phys_addr_t *msg_parg)
 181{
 182        int rc;
 183        struct tee_shm *shm;
 184        struct optee_msg_arg *ma;
 185
 186        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
 187                            TEE_SHM_MAPPED | TEE_SHM_PRIV);
 188        if (IS_ERR(shm))
 189                return shm;
 190
 191        ma = tee_shm_get_va(shm, 0);
 192        if (IS_ERR(ma)) {
 193                rc = PTR_ERR(ma);
 194                goto out;
 195        }
 196
 197        rc = tee_shm_get_pa(shm, 0, msg_parg);
 198        if (rc)
 199                goto out;
 200
 201        memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
 202        ma->num_params = num_params;
 203        *msg_arg = ma;
 204out:
 205        if (rc) {
 206                tee_shm_free(shm);
 207                return ERR_PTR(rc);
 208        }
 209
 210        return shm;
 211}
 212
 213int optee_open_session(struct tee_context *ctx,
 214                       struct tee_ioctl_open_session_arg *arg,
 215                       struct tee_param *param)
 216{
 217        struct optee_context_data *ctxdata = ctx->data;
 218        int rc;
 219        struct tee_shm *shm;
 220        struct optee_msg_arg *msg_arg;
 221        phys_addr_t msg_parg;
 222        struct optee_session *sess = NULL;
 223        uuid_t client_uuid;
 224
 225        /* +2 for the meta parameters added below */
 226        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
 227        if (IS_ERR(shm))
 228                return PTR_ERR(shm);
 229
 230        msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
 231        msg_arg->cancel_id = arg->cancel_id;
 232
 233        /*
 234         * Initialize and add the meta parameters needed when opening a
 235         * session.
 236         */
 237        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 238                                  OPTEE_MSG_ATTR_META;
 239        msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
 240                                  OPTEE_MSG_ATTR_META;
 241        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
 242        msg_arg->params[1].u.value.c = arg->clnt_login;
 243
 244        rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
 245                                          arg->clnt_uuid);
 246        if (rc)
 247                goto out;
 248        export_uuid(msg_arg->params[1].u.octets, &client_uuid);
 249
 250        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
 251        if (rc)
 252                goto out;
 253
 254        sess = kzalloc(sizeof(*sess), GFP_KERNEL);
 255        if (!sess) {
 256                rc = -ENOMEM;
 257                goto out;
 258        }
 259
 260        if (optee_do_call_with_arg(ctx, msg_parg)) {
 261                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 262                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 263        }
 264
 265        if (msg_arg->ret == TEEC_SUCCESS) {
 266                /* A new session has been created, add it to the list. */
 267                sess->session_id = msg_arg->session;
 268                mutex_lock(&ctxdata->mutex);
 269                list_add(&sess->list_node, &ctxdata->sess_list);
 270                mutex_unlock(&ctxdata->mutex);
 271        } else {
 272                kfree(sess);
 273        }
 274
 275        if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
 276                arg->ret = TEEC_ERROR_COMMUNICATION;
 277                arg->ret_origin = TEEC_ORIGIN_COMMS;
 278                /* Close session again to avoid leakage */
 279                optee_close_session(ctx, msg_arg->session);
 280        } else {
 281                arg->session = msg_arg->session;
 282                arg->ret = msg_arg->ret;
 283                arg->ret_origin = msg_arg->ret_origin;
 284        }
 285out:
 286        tee_shm_free(shm);
 287
 288        return rc;
 289}
 290
 291int optee_close_session(struct tee_context *ctx, u32 session)
 292{
 293        struct optee_context_data *ctxdata = ctx->data;
 294        struct tee_shm *shm;
 295        struct optee_msg_arg *msg_arg;
 296        phys_addr_t msg_parg;
 297        struct optee_session *sess;
 298
 299        /* Check that the session is valid and remove it from the list */
 300        mutex_lock(&ctxdata->mutex);
 301        sess = find_session(ctxdata, session);
 302        if (sess)
 303                list_del(&sess->list_node);
 304        mutex_unlock(&ctxdata->mutex);
 305        if (!sess)
 306                return -EINVAL;
 307        kfree(sess);
 308
 309        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 310        if (IS_ERR(shm))
 311                return PTR_ERR(shm);
 312
 313        msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
 314        msg_arg->session = session;
 315        optee_do_call_with_arg(ctx, msg_parg);
 316
 317        tee_shm_free(shm);
 318        return 0;
 319}
 320
 321int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
 322                      struct tee_param *param)
 323{
 324        struct optee_context_data *ctxdata = ctx->data;
 325        struct tee_shm *shm;
 326        struct optee_msg_arg *msg_arg;
 327        phys_addr_t msg_parg;
 328        struct optee_session *sess;
 329        int rc;
 330
 331        /* Check that the session is valid */
 332        mutex_lock(&ctxdata->mutex);
 333        sess = find_session(ctxdata, arg->session);
 334        mutex_unlock(&ctxdata->mutex);
 335        if (!sess)
 336                return -EINVAL;
 337
 338        shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
 339        if (IS_ERR(shm))
 340                return PTR_ERR(shm);
 341        msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
 342        msg_arg->func = arg->func;
 343        msg_arg->session = arg->session;
 344        msg_arg->cancel_id = arg->cancel_id;
 345
 346        rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
 347        if (rc)
 348                goto out;
 349
 350        if (optee_do_call_with_arg(ctx, msg_parg)) {
 351                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 352                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 353        }
 354
 355        if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
 356                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
 357                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
 358        }
 359
 360        arg->ret = msg_arg->ret;
 361        arg->ret_origin = msg_arg->ret_origin;
 362out:
 363        tee_shm_free(shm);
 364        return rc;
 365}
 366
 367int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
 368{
 369        struct optee_context_data *ctxdata = ctx->data;
 370        struct tee_shm *shm;
 371        struct optee_msg_arg *msg_arg;
 372        phys_addr_t msg_parg;
 373        struct optee_session *sess;
 374
 375        /* Check that the session is valid */
 376        mutex_lock(&ctxdata->mutex);
 377        sess = find_session(ctxdata, session);
 378        mutex_unlock(&ctxdata->mutex);
 379        if (!sess)
 380                return -EINVAL;
 381
 382        shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
 383        if (IS_ERR(shm))
 384                return PTR_ERR(shm);
 385
 386        msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
 387        msg_arg->session = session;
 388        msg_arg->cancel_id = cancel_id;
 389        optee_do_call_with_arg(ctx, msg_parg);
 390
 391        tee_shm_free(shm);
 392        return 0;
 393}
 394
 395/**
 396 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
 397 *                            in OP-TEE
 398 * @optee:      main service struct
 399 */
 400void optee_enable_shm_cache(struct optee *optee)
 401{
 402        struct optee_call_waiter w;
 403
 404        /* We need to retry until secure world isn't busy. */
 405        optee_cq_wait_init(&optee->call_queue, &w);
 406        while (true) {
 407                struct arm_smccc_res res;
 408
 409                optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 410                                 0, &res);
 411                if (res.a0 == OPTEE_SMC_RETURN_OK)
 412                        break;
 413                optee_cq_wait_for_completion(&optee->call_queue, &w);
 414        }
 415        optee_cq_wait_final(&optee->call_queue, &w);
 416}
 417
 418/**
 419 * __optee_disable_shm_cache() - Disables caching of some shared memory
 420 *                               allocation in OP-TEE
 421 * @optee:      main service struct
 422 * @is_mapped:  true if the cached shared memory addresses were mapped by this
 423 *              kernel, are safe to dereference, and should be freed
 424 */
 425static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
 426{
 427        struct optee_call_waiter w;
 428
 429        /* We need to retry until secure world isn't busy. */
 430        optee_cq_wait_init(&optee->call_queue, &w);
 431        while (true) {
 432                union {
 433                        struct arm_smccc_res smccc;
 434                        struct optee_smc_disable_shm_cache_result result;
 435                } res;
 436
 437                optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
 438                                 0, &res.smccc);
 439                if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
 440                        break; /* All shm's freed */
 441                if (res.result.status == OPTEE_SMC_RETURN_OK) {
 442                        struct tee_shm *shm;
 443
 444                        /*
 445                         * Shared memory references that were not mapped by
 446                         * this kernel must be ignored to prevent a crash.
 447                         */
 448                        if (!is_mapped)
 449                                continue;
 450
 451                        shm = reg_pair_to_ptr(res.result.shm_upper32,
 452                                              res.result.shm_lower32);
 453                        tee_shm_free(shm);
 454                } else {
 455                        optee_cq_wait_for_completion(&optee->call_queue, &w);
 456                }
 457        }
 458        optee_cq_wait_final(&optee->call_queue, &w);
 459}
 460
 461/**
 462 * optee_disable_shm_cache() - Disables caching of mapped shared memory
 463 *                             allocations in OP-TEE
 464 * @optee:      main service struct
 465 */
 466void optee_disable_shm_cache(struct optee *optee)
 467{
 468        return __optee_disable_shm_cache(optee, true);
 469}
 470
 471/**
 472 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
 473 *                                      allocations in OP-TEE which are not
 474 *                                      currently mapped
 475 * @optee:      main service struct
 476 */
 477void optee_disable_unmapped_shm_cache(struct optee *optee)
 478{
 479        return __optee_disable_shm_cache(optee, false);
 480}
 481
 482#define PAGELIST_ENTRIES_PER_PAGE                               \
 483        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 484
 485/**
 486 * optee_fill_pages_list() - write list of user pages to given shared
 487 * buffer.
 488 *
 489 * @dst: page-aligned buffer where list of pages will be stored
 490 * @pages: array of pages that represents shared buffer
 491 * @num_pages: number of entries in @pages
 492 * @page_offset: offset of user buffer from page start
 493 *
 494 * @dst should be big enough to hold list of user page addresses and
 495 *      links to the next pages of buffer
 496 */
 497void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 498                           size_t page_offset)
 499{
 500        int n = 0;
 501        phys_addr_t optee_page;
 502        /*
 503         * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
 504         * for details.
 505         */
 506        struct {
 507                u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
 508                u64 next_page_data;
 509        } *pages_data;
 510
 511        /*
 512         * Currently OP-TEE uses 4k page size and it does not looks
 513         * like this will change in the future.  On other hand, there are
 514         * no know ARM architectures with page size < 4k.
 515         * Thus the next built assert looks redundant. But the following
 516         * code heavily relies on this assumption, so it is better be
 517         * safe than sorry.
 518         */
 519        BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 520
 521        pages_data = (void *)dst;
 522        /*
 523         * If linux page is bigger than 4k, and user buffer offset is
 524         * larger than 4k/8k/12k/etc this will skip first 4k pages,
 525         * because they bear no value data for OP-TEE.
 526         */
 527        optee_page = page_to_phys(*pages) +
 528                round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 529
 530        while (true) {
 531                pages_data->pages_list[n++] = optee_page;
 532
 533                if (n == PAGELIST_ENTRIES_PER_PAGE) {
 534                        pages_data->next_page_data =
 535                                virt_to_phys(pages_data + 1);
 536                        pages_data++;
 537                        n = 0;
 538                }
 539
 540                optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 541                if (!(optee_page & ~PAGE_MASK)) {
 542                        if (!--num_pages)
 543                                break;
 544                        pages++;
 545                        optee_page = page_to_phys(*pages);
 546                }
 547        }
 548}
 549
 550/*
 551 * The final entry in each pagelist page is a pointer to the next
 552 * pagelist page.
 553 */
 554static size_t get_pages_list_size(size_t num_entries)
 555{
 556        int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
 557
 558        return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 559}
 560
 561u64 *optee_allocate_pages_list(size_t num_entries)
 562{
 563        return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
 564}
 565
 566void optee_free_pages_list(void *list, size_t num_entries)
 567{
 568        free_pages_exact(list, get_pages_list_size(num_entries));
 569}
 570
 571static bool is_normal_memory(pgprot_t p)
 572{
 573#if defined(CONFIG_ARM)
 574        return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
 575                ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
 576#elif defined(CONFIG_ARM64)
 577        return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
 578#else
 579#error "Unuspported architecture"
 580#endif
 581}
 582
 583static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
 584{
 585        while (vma && is_normal_memory(vma->vm_page_prot)) {
 586                if (vma->vm_end >= end)
 587                        return 0;
 588                vma = vma->vm_next;
 589        }
 590
 591        return -EINVAL;
 592}
 593
 594static int check_mem_type(unsigned long start, size_t num_pages)
 595{
 596        struct mm_struct *mm = current->mm;
 597        int rc;
 598
 599        /*
 600         * Allow kernel address to register with OP-TEE as kernel
 601         * pages are configured as normal memory only.
 602         */
 603        if (virt_addr_valid(start))
 604                return 0;
 605
 606        mmap_read_lock(mm);
 607        rc = __check_mem_type(find_vma(mm, start),
 608                              start + num_pages * PAGE_SIZE);
 609        mmap_read_unlock(mm);
 610
 611        return rc;
 612}
 613
 614int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
 615                       struct page **pages, size_t num_pages,
 616                       unsigned long start)
 617{
 618        struct tee_shm *shm_arg = NULL;
 619        struct optee_msg_arg *msg_arg;
 620        u64 *pages_list;
 621        phys_addr_t msg_parg;
 622        int rc;
 623
 624        if (!num_pages)
 625                return -EINVAL;
 626
 627        rc = check_mem_type(start, num_pages);
 628        if (rc)
 629                return rc;
 630
 631        pages_list = optee_allocate_pages_list(num_pages);
 632        if (!pages_list)
 633                return -ENOMEM;
 634
 635        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 636        if (IS_ERR(shm_arg)) {
 637                rc = PTR_ERR(shm_arg);
 638                goto out;
 639        }
 640
 641        optee_fill_pages_list(pages_list, pages, num_pages,
 642                              tee_shm_get_page_offset(shm));
 643
 644        msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
 645        msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
 646                                OPTEE_MSG_ATTR_NONCONTIG;
 647        msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
 648        msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
 649        /*
 650         * In the least bits of msg_arg->params->u.tmem.buf_ptr we
 651         * store buffer offset from 4k page, as described in OP-TEE ABI.
 652         */
 653        msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
 654          (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 655
 656        if (optee_do_call_with_arg(ctx, msg_parg) ||
 657            msg_arg->ret != TEEC_SUCCESS)
 658                rc = -EINVAL;
 659
 660        tee_shm_free(shm_arg);
 661out:
 662        optee_free_pages_list(pages_list, num_pages);
 663        return rc;
 664}
 665
 666int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
 667{
 668        struct tee_shm *shm_arg;
 669        struct optee_msg_arg *msg_arg;
 670        phys_addr_t msg_parg;
 671        int rc = 0;
 672
 673        shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
 674        if (IS_ERR(shm_arg))
 675                return PTR_ERR(shm_arg);
 676
 677        msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
 678
 679        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 680        msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
 681
 682        if (optee_do_call_with_arg(ctx, msg_parg) ||
 683            msg_arg->ret != TEEC_SUCCESS)
 684                rc = -EINVAL;
 685        tee_shm_free(shm_arg);
 686        return rc;
 687}
 688
 689int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
 690                            struct page **pages, size_t num_pages,
 691                            unsigned long start)
 692{
 693        /*
 694         * We don't want to register supplicant memory in OP-TEE.
 695         * Instead information about it will be passed in RPC code.
 696         */
 697        return check_mem_type(start, num_pages);
 698}
 699
 700int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
 701{
 702        return 0;
 703}
 704