linux/drivers/tee/tee_shm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015-2016, Linaro Limited
   4 */
   5#include <linux/device.h>
   6#include <linux/dma-buf.h>
   7#include <linux/fdtable.h>
   8#include <linux/idr.h>
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/tee_drv.h>
  12#include <linux/uio.h>
  13#include "tee_private.h"
  14
  15static void tee_shm_release(struct tee_shm *shm)
  16{
  17        struct tee_device *teedev = shm->ctx->teedev;
  18
  19        if (shm->flags & TEE_SHM_DMA_BUF) {
  20                mutex_lock(&teedev->mutex);
  21                idr_remove(&teedev->idr, shm->id);
  22                mutex_unlock(&teedev->mutex);
  23        }
  24
  25        if (shm->flags & TEE_SHM_POOL) {
  26                struct tee_shm_pool_mgr *poolm;
  27
  28                if (shm->flags & TEE_SHM_DMA_BUF)
  29                        poolm = teedev->pool->dma_buf_mgr;
  30                else
  31                        poolm = teedev->pool->private_mgr;
  32
  33                poolm->ops->free(poolm, shm);
  34        } else if (shm->flags & TEE_SHM_REGISTER) {
  35                size_t n;
  36                int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
  37
  38                if (rc)
  39                        dev_err(teedev->dev.parent,
  40                                "unregister shm %p failed: %d", shm, rc);
  41
  42                for (n = 0; n < shm->num_pages; n++)
  43                        put_page(shm->pages[n]);
  44
  45                kfree(shm->pages);
  46        }
  47
  48        teedev_ctx_put(shm->ctx);
  49
  50        kfree(shm);
  51
  52        tee_device_put(teedev);
  53}
  54
  55static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
  56                        *attach, enum dma_data_direction dir)
  57{
  58        return NULL;
  59}
  60
  61static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
  62                                     struct sg_table *table,
  63                                     enum dma_data_direction dir)
  64{
  65}
  66
  67static void tee_shm_op_release(struct dma_buf *dmabuf)
  68{
  69        struct tee_shm *shm = dmabuf->priv;
  70
  71        tee_shm_release(shm);
  72}
  73
  74static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  75{
  76        struct tee_shm *shm = dmabuf->priv;
  77        size_t size = vma->vm_end - vma->vm_start;
  78
  79        /* Refuse sharing shared memory provided by application */
  80        if (shm->flags & TEE_SHM_USER_MAPPED)
  81                return -EINVAL;
  82
  83        return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
  84                               size, vma->vm_page_prot);
  85}
  86
  87static const struct dma_buf_ops tee_shm_dma_buf_ops = {
  88        .map_dma_buf = tee_shm_op_map_dma_buf,
  89        .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
  90        .release = tee_shm_op_release,
  91        .mmap = tee_shm_op_mmap,
  92};
  93
  94struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
  95{
  96        struct tee_device *teedev = ctx->teedev;
  97        struct tee_shm_pool_mgr *poolm = NULL;
  98        struct tee_shm *shm;
  99        void *ret;
 100        int rc;
 101
 102        if (!(flags & TEE_SHM_MAPPED)) {
 103                dev_err(teedev->dev.parent,
 104                        "only mapped allocations supported\n");
 105                return ERR_PTR(-EINVAL);
 106        }
 107
 108        if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
 109                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
 110                return ERR_PTR(-EINVAL);
 111        }
 112
 113        if (!tee_device_get(teedev))
 114                return ERR_PTR(-EINVAL);
 115
 116        if (!teedev->pool) {
 117                /* teedev has been detached from driver */
 118                ret = ERR_PTR(-EINVAL);
 119                goto err_dev_put;
 120        }
 121
 122        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 123        if (!shm) {
 124                ret = ERR_PTR(-ENOMEM);
 125                goto err_dev_put;
 126        }
 127
 128        shm->flags = flags | TEE_SHM_POOL;
 129        shm->ctx = ctx;
 130        if (flags & TEE_SHM_DMA_BUF)
 131                poolm = teedev->pool->dma_buf_mgr;
 132        else
 133                poolm = teedev->pool->private_mgr;
 134
 135        rc = poolm->ops->alloc(poolm, shm, size);
 136        if (rc) {
 137                ret = ERR_PTR(rc);
 138                goto err_kfree;
 139        }
 140
 141
 142        if (flags & TEE_SHM_DMA_BUF) {
 143                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 144
 145                mutex_lock(&teedev->mutex);
 146                shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 147                mutex_unlock(&teedev->mutex);
 148                if (shm->id < 0) {
 149                        ret = ERR_PTR(shm->id);
 150                        goto err_pool_free;
 151                }
 152
 153                exp_info.ops = &tee_shm_dma_buf_ops;
 154                exp_info.size = shm->size;
 155                exp_info.flags = O_RDWR;
 156                exp_info.priv = shm;
 157
 158                shm->dmabuf = dma_buf_export(&exp_info);
 159                if (IS_ERR(shm->dmabuf)) {
 160                        ret = ERR_CAST(shm->dmabuf);
 161                        goto err_rem;
 162                }
 163        }
 164
 165        teedev_ctx_get(ctx);
 166
 167        return shm;
 168err_rem:
 169        if (flags & TEE_SHM_DMA_BUF) {
 170                mutex_lock(&teedev->mutex);
 171                idr_remove(&teedev->idr, shm->id);
 172                mutex_unlock(&teedev->mutex);
 173        }
 174err_pool_free:
 175        poolm->ops->free(poolm, shm);
 176err_kfree:
 177        kfree(shm);
 178err_dev_put:
 179        tee_device_put(teedev);
 180        return ret;
 181}
 182EXPORT_SYMBOL_GPL(tee_shm_alloc);
 183
 184struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
 185                                 size_t length, u32 flags)
 186{
 187        struct tee_device *teedev = ctx->teedev;
 188        const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
 189        const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
 190        struct tee_shm *shm;
 191        void *ret;
 192        int rc;
 193        int num_pages;
 194        unsigned long start;
 195
 196        if (flags != req_user_flags && flags != req_kernel_flags)
 197                return ERR_PTR(-ENOTSUPP);
 198
 199        if (!tee_device_get(teedev))
 200                return ERR_PTR(-EINVAL);
 201
 202        if (!teedev->desc->ops->shm_register ||
 203            !teedev->desc->ops->shm_unregister) {
 204                tee_device_put(teedev);
 205                return ERR_PTR(-ENOTSUPP);
 206        }
 207
 208        teedev_ctx_get(ctx);
 209
 210        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 211        if (!shm) {
 212                ret = ERR_PTR(-ENOMEM);
 213                goto err;
 214        }
 215
 216        shm->flags = flags | TEE_SHM_REGISTER;
 217        shm->ctx = ctx;
 218        shm->id = -1;
 219        addr = untagged_addr(addr);
 220        start = rounddown(addr, PAGE_SIZE);
 221        shm->offset = addr - start;
 222        shm->size = length;
 223        num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
 224        shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
 225        if (!shm->pages) {
 226                ret = ERR_PTR(-ENOMEM);
 227                goto err;
 228        }
 229
 230        if (flags & TEE_SHM_USER_MAPPED) {
 231                rc = get_user_pages_fast(start, num_pages, FOLL_WRITE,
 232                                         shm->pages);
 233        } else {
 234                struct kvec *kiov;
 235                int i;
 236
 237                kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
 238                if (!kiov) {
 239                        ret = ERR_PTR(-ENOMEM);
 240                        goto err;
 241                }
 242
 243                for (i = 0; i < num_pages; i++) {
 244                        kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
 245                        kiov[i].iov_len = PAGE_SIZE;
 246                }
 247
 248                rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
 249                kfree(kiov);
 250        }
 251        if (rc > 0)
 252                shm->num_pages = rc;
 253        if (rc != num_pages) {
 254                if (rc >= 0)
 255                        rc = -ENOMEM;
 256                ret = ERR_PTR(rc);
 257                goto err;
 258        }
 259
 260        mutex_lock(&teedev->mutex);
 261        shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 262        mutex_unlock(&teedev->mutex);
 263
 264        if (shm->id < 0) {
 265                ret = ERR_PTR(shm->id);
 266                goto err;
 267        }
 268
 269        rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
 270                                             shm->num_pages, start);
 271        if (rc) {
 272                ret = ERR_PTR(rc);
 273                goto err;
 274        }
 275
 276        if (flags & TEE_SHM_DMA_BUF) {
 277                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 278
 279                exp_info.ops = &tee_shm_dma_buf_ops;
 280                exp_info.size = shm->size;
 281                exp_info.flags = O_RDWR;
 282                exp_info.priv = shm;
 283
 284                shm->dmabuf = dma_buf_export(&exp_info);
 285                if (IS_ERR(shm->dmabuf)) {
 286                        ret = ERR_CAST(shm->dmabuf);
 287                        teedev->desc->ops->shm_unregister(ctx, shm);
 288                        goto err;
 289                }
 290        }
 291
 292        return shm;
 293err:
 294        if (shm) {
 295                size_t n;
 296
 297                if (shm->id >= 0) {
 298                        mutex_lock(&teedev->mutex);
 299                        idr_remove(&teedev->idr, shm->id);
 300                        mutex_unlock(&teedev->mutex);
 301                }
 302                if (shm->pages) {
 303                        for (n = 0; n < shm->num_pages; n++)
 304                                put_page(shm->pages[n]);
 305                        kfree(shm->pages);
 306                }
 307        }
 308        kfree(shm);
 309        teedev_ctx_put(ctx);
 310        tee_device_put(teedev);
 311        return ret;
 312}
 313EXPORT_SYMBOL_GPL(tee_shm_register);
 314
 315/**
 316 * tee_shm_get_fd() - Increase reference count and return file descriptor
 317 * @shm:        Shared memory handle
 318 * @returns user space file descriptor to shared memory
 319 */
 320int tee_shm_get_fd(struct tee_shm *shm)
 321{
 322        int fd;
 323
 324        if (!(shm->flags & TEE_SHM_DMA_BUF))
 325                return -EINVAL;
 326
 327        get_dma_buf(shm->dmabuf);
 328        fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
 329        if (fd < 0)
 330                dma_buf_put(shm->dmabuf);
 331        return fd;
 332}
 333
 334/**
 335 * tee_shm_free() - Free shared memory
 336 * @shm:        Handle to shared memory to free
 337 */
 338void tee_shm_free(struct tee_shm *shm)
 339{
 340        /*
 341         * dma_buf_put() decreases the dmabuf reference counter and will
 342         * call tee_shm_release() when the last reference is gone.
 343         *
 344         * In the case of driver private memory we call tee_shm_release
 345         * directly instead as it doesn't have a reference counter.
 346         */
 347        if (shm->flags & TEE_SHM_DMA_BUF)
 348                dma_buf_put(shm->dmabuf);
 349        else
 350                tee_shm_release(shm);
 351}
 352EXPORT_SYMBOL_GPL(tee_shm_free);
 353
 354/**
 355 * tee_shm_va2pa() - Get physical address of a virtual address
 356 * @shm:        Shared memory handle
 357 * @va:         Virtual address to tranlsate
 358 * @pa:         Returned physical address
 359 * @returns 0 on success and < 0 on failure
 360 */
 361int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
 362{
 363        if (!(shm->flags & TEE_SHM_MAPPED))
 364                return -EINVAL;
 365        /* Check that we're in the range of the shm */
 366        if ((char *)va < (char *)shm->kaddr)
 367                return -EINVAL;
 368        if ((char *)va >= ((char *)shm->kaddr + shm->size))
 369                return -EINVAL;
 370
 371        return tee_shm_get_pa(
 372                        shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
 373}
 374EXPORT_SYMBOL_GPL(tee_shm_va2pa);
 375
 376/**
 377 * tee_shm_pa2va() - Get virtual address of a physical address
 378 * @shm:        Shared memory handle
 379 * @pa:         Physical address to tranlsate
 380 * @va:         Returned virtual address
 381 * @returns 0 on success and < 0 on failure
 382 */
 383int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
 384{
 385        if (!(shm->flags & TEE_SHM_MAPPED))
 386                return -EINVAL;
 387        /* Check that we're in the range of the shm */
 388        if (pa < shm->paddr)
 389                return -EINVAL;
 390        if (pa >= (shm->paddr + shm->size))
 391                return -EINVAL;
 392
 393        if (va) {
 394                void *v = tee_shm_get_va(shm, pa - shm->paddr);
 395
 396                if (IS_ERR(v))
 397                        return PTR_ERR(v);
 398                *va = v;
 399        }
 400        return 0;
 401}
 402EXPORT_SYMBOL_GPL(tee_shm_pa2va);
 403
 404/**
 405 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
 406 * @shm:        Shared memory handle
 407 * @offs:       Offset from start of this shared memory
 408 * @returns virtual address of the shared memory + offs if offs is within
 409 *      the bounds of this shared memory, else an ERR_PTR
 410 */
 411void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
 412{
 413        if (!(shm->flags & TEE_SHM_MAPPED))
 414                return ERR_PTR(-EINVAL);
 415        if (offs >= shm->size)
 416                return ERR_PTR(-EINVAL);
 417        return (char *)shm->kaddr + offs;
 418}
 419EXPORT_SYMBOL_GPL(tee_shm_get_va);
 420
 421/**
 422 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
 423 * @shm:        Shared memory handle
 424 * @offs:       Offset from start of this shared memory
 425 * @pa:         Physical address to return
 426 * @returns 0 if offs is within the bounds of this shared memory, else an
 427 *      error code.
 428 */
 429int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
 430{
 431        if (offs >= shm->size)
 432                return -EINVAL;
 433        if (pa)
 434                *pa = shm->paddr + offs;
 435        return 0;
 436}
 437EXPORT_SYMBOL_GPL(tee_shm_get_pa);
 438
 439/**
 440 * tee_shm_get_from_id() - Find shared memory object and increase reference
 441 * count
 442 * @ctx:        Context owning the shared memory
 443 * @id:         Id of shared memory object
 444 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 445 */
 446struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
 447{
 448        struct tee_device *teedev;
 449        struct tee_shm *shm;
 450
 451        if (!ctx)
 452                return ERR_PTR(-EINVAL);
 453
 454        teedev = ctx->teedev;
 455        mutex_lock(&teedev->mutex);
 456        shm = idr_find(&teedev->idr, id);
 457        if (!shm || shm->ctx != ctx)
 458                shm = ERR_PTR(-EINVAL);
 459        else if (shm->flags & TEE_SHM_DMA_BUF)
 460                get_dma_buf(shm->dmabuf);
 461        mutex_unlock(&teedev->mutex);
 462        return shm;
 463}
 464EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
 465
 466/**
 467 * tee_shm_put() - Decrease reference count on a shared memory handle
 468 * @shm:        Shared memory handle
 469 */
 470void tee_shm_put(struct tee_shm *shm)
 471{
 472        if (shm->flags & TEE_SHM_DMA_BUF)
 473                dma_buf_put(shm->dmabuf);
 474}
 475EXPORT_SYMBOL_GPL(tee_shm_put);
 476