linux/drivers/tee/tee_shm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015-2016, Linaro Limited
   4 */
   5#include <linux/device.h>
   6#include <linux/dma-buf.h>
   7#include <linux/fdtable.h>
   8#include <linux/idr.h>
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/tee_drv.h>
  12#include <linux/uio.h>
  13#include "tee_private.h"
  14
  15static void release_registered_pages(struct tee_shm *shm)
  16{
  17        if (shm->pages) {
  18                if (shm->flags & TEE_SHM_USER_MAPPED) {
  19                        unpin_user_pages(shm->pages, shm->num_pages);
  20                } else {
  21                        size_t n;
  22
  23                        for (n = 0; n < shm->num_pages; n++)
  24                                put_page(shm->pages[n]);
  25                }
  26
  27                kfree(shm->pages);
  28        }
  29}
  30
  31static void tee_shm_release(struct tee_shm *shm)
  32{
  33        struct tee_device *teedev = shm->ctx->teedev;
  34
  35        if (shm->flags & TEE_SHM_DMA_BUF) {
  36                mutex_lock(&teedev->mutex);
  37                idr_remove(&teedev->idr, shm->id);
  38                mutex_unlock(&teedev->mutex);
  39        }
  40
  41        if (shm->flags & TEE_SHM_POOL) {
  42                struct tee_shm_pool_mgr *poolm;
  43
  44                if (shm->flags & TEE_SHM_DMA_BUF)
  45                        poolm = teedev->pool->dma_buf_mgr;
  46                else
  47                        poolm = teedev->pool->private_mgr;
  48
  49                poolm->ops->free(poolm, shm);
  50        } else if (shm->flags & TEE_SHM_REGISTER) {
  51                int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
  52
  53                if (rc)
  54                        dev_err(teedev->dev.parent,
  55                                "unregister shm %p failed: %d", shm, rc);
  56
  57                release_registered_pages(shm);
  58        }
  59
  60        teedev_ctx_put(shm->ctx);
  61
  62        kfree(shm);
  63
  64        tee_device_put(teedev);
  65}
  66
  67static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
  68                        *attach, enum dma_data_direction dir)
  69{
  70        return NULL;
  71}
  72
  73static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
  74                                     struct sg_table *table,
  75                                     enum dma_data_direction dir)
  76{
  77}
  78
  79static void tee_shm_op_release(struct dma_buf *dmabuf)
  80{
  81        struct tee_shm *shm = dmabuf->priv;
  82
  83        tee_shm_release(shm);
  84}
  85
  86static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  87{
  88        struct tee_shm *shm = dmabuf->priv;
  89        size_t size = vma->vm_end - vma->vm_start;
  90
  91        /* Refuse sharing shared memory provided by application */
  92        if (shm->flags & TEE_SHM_USER_MAPPED)
  93                return -EINVAL;
  94
  95        return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
  96                               size, vma->vm_page_prot);
  97}
  98
  99static const struct dma_buf_ops tee_shm_dma_buf_ops = {
 100        .map_dma_buf = tee_shm_op_map_dma_buf,
 101        .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
 102        .release = tee_shm_op_release,
 103        .mmap = tee_shm_op_mmap,
 104};
 105
 106struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 107{
 108        struct tee_device *teedev = ctx->teedev;
 109        struct tee_shm_pool_mgr *poolm = NULL;
 110        struct tee_shm *shm;
 111        void *ret;
 112        int rc;
 113
 114        if (!(flags & TEE_SHM_MAPPED)) {
 115                dev_err(teedev->dev.parent,
 116                        "only mapped allocations supported\n");
 117                return ERR_PTR(-EINVAL);
 118        }
 119
 120        if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
 121                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
 122                return ERR_PTR(-EINVAL);
 123        }
 124
 125        if (!tee_device_get(teedev))
 126                return ERR_PTR(-EINVAL);
 127
 128        if (!teedev->pool) {
 129                /* teedev has been detached from driver */
 130                ret = ERR_PTR(-EINVAL);
 131                goto err_dev_put;
 132        }
 133
 134        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 135        if (!shm) {
 136                ret = ERR_PTR(-ENOMEM);
 137                goto err_dev_put;
 138        }
 139
 140        shm->flags = flags | TEE_SHM_POOL;
 141        shm->ctx = ctx;
 142        if (flags & TEE_SHM_DMA_BUF)
 143                poolm = teedev->pool->dma_buf_mgr;
 144        else
 145                poolm = teedev->pool->private_mgr;
 146
 147        rc = poolm->ops->alloc(poolm, shm, size);
 148        if (rc) {
 149                ret = ERR_PTR(rc);
 150                goto err_kfree;
 151        }
 152
 153
 154        if (flags & TEE_SHM_DMA_BUF) {
 155                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 156
 157                mutex_lock(&teedev->mutex);
 158                shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 159                mutex_unlock(&teedev->mutex);
 160                if (shm->id < 0) {
 161                        ret = ERR_PTR(shm->id);
 162                        goto err_pool_free;
 163                }
 164
 165                exp_info.ops = &tee_shm_dma_buf_ops;
 166                exp_info.size = shm->size;
 167                exp_info.flags = O_RDWR;
 168                exp_info.priv = shm;
 169
 170                shm->dmabuf = dma_buf_export(&exp_info);
 171                if (IS_ERR(shm->dmabuf)) {
 172                        ret = ERR_CAST(shm->dmabuf);
 173                        goto err_rem;
 174                }
 175        }
 176
 177        teedev_ctx_get(ctx);
 178
 179        return shm;
 180err_rem:
 181        if (flags & TEE_SHM_DMA_BUF) {
 182                mutex_lock(&teedev->mutex);
 183                idr_remove(&teedev->idr, shm->id);
 184                mutex_unlock(&teedev->mutex);
 185        }
 186err_pool_free:
 187        poolm->ops->free(poolm, shm);
 188err_kfree:
 189        kfree(shm);
 190err_dev_put:
 191        tee_device_put(teedev);
 192        return ret;
 193}
 194EXPORT_SYMBOL_GPL(tee_shm_alloc);
 195
 196/**
 197 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
 198 * @ctx:        Context that allocates the shared memory
 199 * @size:       Requested size of shared memory
 200 *
 201 * The returned memory registered in secure world and is suitable to be
 202 * passed as a memory buffer in parameter argument to
 203 * tee_client_invoke_func(). The memory allocated is later freed with a
 204 * call to tee_shm_free().
 205 *
 206 * @returns a pointer to 'struct tee_shm'
 207 */
 208struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
 209{
 210        return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
 211}
 212EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
 213
 214struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
 215                                 size_t length, u32 flags)
 216{
 217        struct tee_device *teedev = ctx->teedev;
 218        const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
 219        const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
 220        struct tee_shm *shm;
 221        void *ret;
 222        int rc;
 223        int num_pages;
 224        unsigned long start;
 225
 226        if (flags != req_user_flags && flags != req_kernel_flags)
 227                return ERR_PTR(-ENOTSUPP);
 228
 229        if (!tee_device_get(teedev))
 230                return ERR_PTR(-EINVAL);
 231
 232        if (!teedev->desc->ops->shm_register ||
 233            !teedev->desc->ops->shm_unregister) {
 234                tee_device_put(teedev);
 235                return ERR_PTR(-ENOTSUPP);
 236        }
 237
 238        teedev_ctx_get(ctx);
 239
 240        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 241        if (!shm) {
 242                ret = ERR_PTR(-ENOMEM);
 243                goto err;
 244        }
 245
 246        shm->flags = flags | TEE_SHM_REGISTER;
 247        shm->ctx = ctx;
 248        shm->id = -1;
 249        addr = untagged_addr(addr);
 250        start = rounddown(addr, PAGE_SIZE);
 251        shm->offset = addr - start;
 252        shm->size = length;
 253        num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
 254        shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
 255        if (!shm->pages) {
 256                ret = ERR_PTR(-ENOMEM);
 257                goto err;
 258        }
 259
 260        if (flags & TEE_SHM_USER_MAPPED) {
 261                rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
 262                                         shm->pages);
 263        } else {
 264                struct kvec *kiov;
 265                int i;
 266
 267                kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
 268                if (!kiov) {
 269                        ret = ERR_PTR(-ENOMEM);
 270                        goto err;
 271                }
 272
 273                for (i = 0; i < num_pages; i++) {
 274                        kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
 275                        kiov[i].iov_len = PAGE_SIZE;
 276                }
 277
 278                rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
 279                kfree(kiov);
 280        }
 281        if (rc > 0)
 282                shm->num_pages = rc;
 283        if (rc != num_pages) {
 284                if (rc >= 0)
 285                        rc = -ENOMEM;
 286                ret = ERR_PTR(rc);
 287                goto err;
 288        }
 289
 290        mutex_lock(&teedev->mutex);
 291        shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 292        mutex_unlock(&teedev->mutex);
 293
 294        if (shm->id < 0) {
 295                ret = ERR_PTR(shm->id);
 296                goto err;
 297        }
 298
 299        rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
 300                                             shm->num_pages, start);
 301        if (rc) {
 302                ret = ERR_PTR(rc);
 303                goto err;
 304        }
 305
 306        if (flags & TEE_SHM_DMA_BUF) {
 307                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 308
 309                exp_info.ops = &tee_shm_dma_buf_ops;
 310                exp_info.size = shm->size;
 311                exp_info.flags = O_RDWR;
 312                exp_info.priv = shm;
 313
 314                shm->dmabuf = dma_buf_export(&exp_info);
 315                if (IS_ERR(shm->dmabuf)) {
 316                        ret = ERR_CAST(shm->dmabuf);
 317                        teedev->desc->ops->shm_unregister(ctx, shm);
 318                        goto err;
 319                }
 320        }
 321
 322        return shm;
 323err:
 324        if (shm) {
 325                if (shm->id >= 0) {
 326                        mutex_lock(&teedev->mutex);
 327                        idr_remove(&teedev->idr, shm->id);
 328                        mutex_unlock(&teedev->mutex);
 329                }
 330                release_registered_pages(shm);
 331        }
 332        kfree(shm);
 333        teedev_ctx_put(ctx);
 334        tee_device_put(teedev);
 335        return ret;
 336}
 337EXPORT_SYMBOL_GPL(tee_shm_register);
 338
 339/**
 340 * tee_shm_get_fd() - Increase reference count and return file descriptor
 341 * @shm:        Shared memory handle
 342 * @returns user space file descriptor to shared memory
 343 */
 344int tee_shm_get_fd(struct tee_shm *shm)
 345{
 346        int fd;
 347
 348        if (!(shm->flags & TEE_SHM_DMA_BUF))
 349                return -EINVAL;
 350
 351        get_dma_buf(shm->dmabuf);
 352        fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
 353        if (fd < 0)
 354                dma_buf_put(shm->dmabuf);
 355        return fd;
 356}
 357
 358/**
 359 * tee_shm_free() - Free shared memory
 360 * @shm:        Handle to shared memory to free
 361 */
 362void tee_shm_free(struct tee_shm *shm)
 363{
 364        /*
 365         * dma_buf_put() decreases the dmabuf reference counter and will
 366         * call tee_shm_release() when the last reference is gone.
 367         *
 368         * In the case of driver private memory we call tee_shm_release
 369         * directly instead as it doesn't have a reference counter.
 370         */
 371        if (shm->flags & TEE_SHM_DMA_BUF)
 372                dma_buf_put(shm->dmabuf);
 373        else
 374                tee_shm_release(shm);
 375}
 376EXPORT_SYMBOL_GPL(tee_shm_free);
 377
 378/**
 379 * tee_shm_va2pa() - Get physical address of a virtual address
 380 * @shm:        Shared memory handle
 381 * @va:         Virtual address to tranlsate
 382 * @pa:         Returned physical address
 383 * @returns 0 on success and < 0 on failure
 384 */
 385int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
 386{
 387        if (!(shm->flags & TEE_SHM_MAPPED))
 388                return -EINVAL;
 389        /* Check that we're in the range of the shm */
 390        if ((char *)va < (char *)shm->kaddr)
 391                return -EINVAL;
 392        if ((char *)va >= ((char *)shm->kaddr + shm->size))
 393                return -EINVAL;
 394
 395        return tee_shm_get_pa(
 396                        shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
 397}
 398EXPORT_SYMBOL_GPL(tee_shm_va2pa);
 399
 400/**
 401 * tee_shm_pa2va() - Get virtual address of a physical address
 402 * @shm:        Shared memory handle
 403 * @pa:         Physical address to tranlsate
 404 * @va:         Returned virtual address
 405 * @returns 0 on success and < 0 on failure
 406 */
 407int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
 408{
 409        if (!(shm->flags & TEE_SHM_MAPPED))
 410                return -EINVAL;
 411        /* Check that we're in the range of the shm */
 412        if (pa < shm->paddr)
 413                return -EINVAL;
 414        if (pa >= (shm->paddr + shm->size))
 415                return -EINVAL;
 416
 417        if (va) {
 418                void *v = tee_shm_get_va(shm, pa - shm->paddr);
 419
 420                if (IS_ERR(v))
 421                        return PTR_ERR(v);
 422                *va = v;
 423        }
 424        return 0;
 425}
 426EXPORT_SYMBOL_GPL(tee_shm_pa2va);
 427
 428/**
 429 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
 430 * @shm:        Shared memory handle
 431 * @offs:       Offset from start of this shared memory
 432 * @returns virtual address of the shared memory + offs if offs is within
 433 *      the bounds of this shared memory, else an ERR_PTR
 434 */
 435void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
 436{
 437        if (!(shm->flags & TEE_SHM_MAPPED))
 438                return ERR_PTR(-EINVAL);
 439        if (offs >= shm->size)
 440                return ERR_PTR(-EINVAL);
 441        return (char *)shm->kaddr + offs;
 442}
 443EXPORT_SYMBOL_GPL(tee_shm_get_va);
 444
 445/**
 446 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
 447 * @shm:        Shared memory handle
 448 * @offs:       Offset from start of this shared memory
 449 * @pa:         Physical address to return
 450 * @returns 0 if offs is within the bounds of this shared memory, else an
 451 *      error code.
 452 */
 453int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
 454{
 455        if (offs >= shm->size)
 456                return -EINVAL;
 457        if (pa)
 458                *pa = shm->paddr + offs;
 459        return 0;
 460}
 461EXPORT_SYMBOL_GPL(tee_shm_get_pa);
 462
 463/**
 464 * tee_shm_get_from_id() - Find shared memory object and increase reference
 465 * count
 466 * @ctx:        Context owning the shared memory
 467 * @id:         Id of shared memory object
 468 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 469 */
 470struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
 471{
 472        struct tee_device *teedev;
 473        struct tee_shm *shm;
 474
 475        if (!ctx)
 476                return ERR_PTR(-EINVAL);
 477
 478        teedev = ctx->teedev;
 479        mutex_lock(&teedev->mutex);
 480        shm = idr_find(&teedev->idr, id);
 481        if (!shm || shm->ctx != ctx)
 482                shm = ERR_PTR(-EINVAL);
 483        else if (shm->flags & TEE_SHM_DMA_BUF)
 484                get_dma_buf(shm->dmabuf);
 485        mutex_unlock(&teedev->mutex);
 486        return shm;
 487}
 488EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
 489
 490/**
 491 * tee_shm_put() - Decrease reference count on a shared memory handle
 492 * @shm:        Shared memory handle
 493 */
 494void tee_shm_put(struct tee_shm *shm)
 495{
 496        if (shm->flags & TEE_SHM_DMA_BUF)
 497                dma_buf_put(shm->dmabuf);
 498}
 499EXPORT_SYMBOL_GPL(tee_shm_put);
 500