linux/drivers/tee/tee_shm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Linaro Limited
   3 *
   4 * This software is licensed under the terms of the GNU General Public
   5 * License version 2, as published by the Free Software Foundation, and
   6 * may be copied, distributed, and modified under those terms.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 */
  14#include <linux/device.h>
  15#include <linux/dma-buf.h>
  16#include <linux/fdtable.h>
  17#include <linux/idr.h>
  18#include <linux/sched.h>
  19#include <linux/slab.h>
  20#include <linux/tee_drv.h>
  21#include "tee_private.h"
  22
  23static void tee_shm_release(struct tee_shm *shm)
  24{
  25        struct tee_device *teedev = shm->teedev;
  26
  27        mutex_lock(&teedev->mutex);
  28        idr_remove(&teedev->idr, shm->id);
  29        if (shm->ctx)
  30                list_del(&shm->link);
  31        mutex_unlock(&teedev->mutex);
  32
  33        if (shm->flags & TEE_SHM_POOL) {
  34                struct tee_shm_pool_mgr *poolm;
  35
  36                if (shm->flags & TEE_SHM_DMA_BUF)
  37                        poolm = teedev->pool->dma_buf_mgr;
  38                else
  39                        poolm = teedev->pool->private_mgr;
  40
  41                poolm->ops->free(poolm, shm);
  42        } else if (shm->flags & TEE_SHM_REGISTER) {
  43                size_t n;
  44                int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
  45
  46                if (rc)
  47                        dev_err(teedev->dev.parent,
  48                                "unregister shm %p failed: %d", shm, rc);
  49
  50                for (n = 0; n < shm->num_pages; n++)
  51                        put_page(shm->pages[n]);
  52
  53                kfree(shm->pages);
  54        }
  55
  56        if (shm->ctx)
  57                teedev_ctx_put(shm->ctx);
  58
  59        kfree(shm);
  60
  61        tee_device_put(teedev);
  62}
  63
  64static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
  65                        *attach, enum dma_data_direction dir)
  66{
  67        return NULL;
  68}
  69
  70static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
  71                                     struct sg_table *table,
  72                                     enum dma_data_direction dir)
  73{
  74}
  75
  76static void tee_shm_op_release(struct dma_buf *dmabuf)
  77{
  78        struct tee_shm *shm = dmabuf->priv;
  79
  80        tee_shm_release(shm);
  81}
  82
  83static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
  84{
  85        return NULL;
  86}
  87
  88static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
  89{
  90        return NULL;
  91}
  92
  93static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  94{
  95        struct tee_shm *shm = dmabuf->priv;
  96        size_t size = vma->vm_end - vma->vm_start;
  97
  98        /* Refuse sharing shared memory provided by application */
  99        if (shm->flags & TEE_SHM_REGISTER)
 100                return -EINVAL;
 101
 102        return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
 103                               size, vma->vm_page_prot);
 104}
 105
 106static const struct dma_buf_ops tee_shm_dma_buf_ops = {
 107        .map_dma_buf = tee_shm_op_map_dma_buf,
 108        .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
 109        .release = tee_shm_op_release,
 110        .map_atomic = tee_shm_op_map_atomic,
 111        .map = tee_shm_op_map,
 112        .mmap = tee_shm_op_mmap,
 113};
 114
 115static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
 116                                       struct tee_device *teedev,
 117                                       size_t size, u32 flags)
 118{
 119        struct tee_shm_pool_mgr *poolm = NULL;
 120        struct tee_shm *shm;
 121        void *ret;
 122        int rc;
 123
 124        if (ctx && ctx->teedev != teedev) {
 125                dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
 126                return ERR_PTR(-EINVAL);
 127        }
 128
 129        if (!(flags & TEE_SHM_MAPPED)) {
 130                dev_err(teedev->dev.parent,
 131                        "only mapped allocations supported\n");
 132                return ERR_PTR(-EINVAL);
 133        }
 134
 135        if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
 136                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
 137                return ERR_PTR(-EINVAL);
 138        }
 139
 140        if (!tee_device_get(teedev))
 141                return ERR_PTR(-EINVAL);
 142
 143        if (!teedev->pool) {
 144                /* teedev has been detached from driver */
 145                ret = ERR_PTR(-EINVAL);
 146                goto err_dev_put;
 147        }
 148
 149        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 150        if (!shm) {
 151                ret = ERR_PTR(-ENOMEM);
 152                goto err_dev_put;
 153        }
 154
 155        shm->flags = flags | TEE_SHM_POOL;
 156        shm->teedev = teedev;
 157        shm->ctx = ctx;
 158        if (flags & TEE_SHM_DMA_BUF)
 159                poolm = teedev->pool->dma_buf_mgr;
 160        else
 161                poolm = teedev->pool->private_mgr;
 162
 163        rc = poolm->ops->alloc(poolm, shm, size);
 164        if (rc) {
 165                ret = ERR_PTR(rc);
 166                goto err_kfree;
 167        }
 168
 169        mutex_lock(&teedev->mutex);
 170        shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 171        mutex_unlock(&teedev->mutex);
 172        if (shm->id < 0) {
 173                ret = ERR_PTR(shm->id);
 174                goto err_pool_free;
 175        }
 176
 177        if (flags & TEE_SHM_DMA_BUF) {
 178                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 179
 180                exp_info.ops = &tee_shm_dma_buf_ops;
 181                exp_info.size = shm->size;
 182                exp_info.flags = O_RDWR;
 183                exp_info.priv = shm;
 184
 185                shm->dmabuf = dma_buf_export(&exp_info);
 186                if (IS_ERR(shm->dmabuf)) {
 187                        ret = ERR_CAST(shm->dmabuf);
 188                        goto err_rem;
 189                }
 190        }
 191
 192        if (ctx) {
 193                teedev_ctx_get(ctx);
 194                mutex_lock(&teedev->mutex);
 195                list_add_tail(&shm->link, &ctx->list_shm);
 196                mutex_unlock(&teedev->mutex);
 197        }
 198
 199        return shm;
 200err_rem:
 201        mutex_lock(&teedev->mutex);
 202        idr_remove(&teedev->idr, shm->id);
 203        mutex_unlock(&teedev->mutex);
 204err_pool_free:
 205        poolm->ops->free(poolm, shm);
 206err_kfree:
 207        kfree(shm);
 208err_dev_put:
 209        tee_device_put(teedev);
 210        return ret;
 211}
 212
 213/**
 214 * tee_shm_alloc() - Allocate shared memory
 215 * @ctx:        Context that allocates the shared memory
 216 * @size:       Requested size of shared memory
 217 * @flags:      Flags setting properties for the requested shared memory.
 218 *
 219 * Memory allocated as global shared memory is automatically freed when the
 220 * TEE file pointer is closed. The @flags field uses the bits defined by
 221 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
 222 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
 223 * associated with a dma-buf handle, else driver private memory.
 224 */
 225struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 226{
 227        return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
 228}
 229EXPORT_SYMBOL_GPL(tee_shm_alloc);
 230
 231struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
 232{
 233        return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
 234}
 235EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
 236
 237struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
 238                                 size_t length, u32 flags)
 239{
 240        struct tee_device *teedev = ctx->teedev;
 241        const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
 242        struct tee_shm *shm;
 243        void *ret;
 244        int rc;
 245        int num_pages;
 246        unsigned long start;
 247
 248        if (flags != req_flags)
 249                return ERR_PTR(-ENOTSUPP);
 250
 251        if (!tee_device_get(teedev))
 252                return ERR_PTR(-EINVAL);
 253
 254        if (!teedev->desc->ops->shm_register ||
 255            !teedev->desc->ops->shm_unregister) {
 256                tee_device_put(teedev);
 257                return ERR_PTR(-ENOTSUPP);
 258        }
 259
 260        teedev_ctx_get(ctx);
 261
 262        shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 263        if (!shm) {
 264                ret = ERR_PTR(-ENOMEM);
 265                goto err;
 266        }
 267
 268        shm->flags = flags | TEE_SHM_REGISTER;
 269        shm->teedev = teedev;
 270        shm->ctx = ctx;
 271        shm->id = -1;
 272        start = rounddown(addr, PAGE_SIZE);
 273        shm->offset = addr - start;
 274        shm->size = length;
 275        num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
 276        shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
 277        if (!shm->pages) {
 278                ret = ERR_PTR(-ENOMEM);
 279                goto err;
 280        }
 281
 282        rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
 283        if (rc > 0)
 284                shm->num_pages = rc;
 285        if (rc != num_pages) {
 286                if (rc >= 0)
 287                        rc = -ENOMEM;
 288                ret = ERR_PTR(rc);
 289                goto err;
 290        }
 291
 292        mutex_lock(&teedev->mutex);
 293        shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 294        mutex_unlock(&teedev->mutex);
 295
 296        if (shm->id < 0) {
 297                ret = ERR_PTR(shm->id);
 298                goto err;
 299        }
 300
 301        rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
 302                                             shm->num_pages, start);
 303        if (rc) {
 304                ret = ERR_PTR(rc);
 305                goto err;
 306        }
 307
 308        if (flags & TEE_SHM_DMA_BUF) {
 309                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 310
 311                exp_info.ops = &tee_shm_dma_buf_ops;
 312                exp_info.size = shm->size;
 313                exp_info.flags = O_RDWR;
 314                exp_info.priv = shm;
 315
 316                shm->dmabuf = dma_buf_export(&exp_info);
 317                if (IS_ERR(shm->dmabuf)) {
 318                        ret = ERR_CAST(shm->dmabuf);
 319                        teedev->desc->ops->shm_unregister(ctx, shm);
 320                        goto err;
 321                }
 322        }
 323
 324        mutex_lock(&teedev->mutex);
 325        list_add_tail(&shm->link, &ctx->list_shm);
 326        mutex_unlock(&teedev->mutex);
 327
 328        return shm;
 329err:
 330        if (shm) {
 331                size_t n;
 332
 333                if (shm->id >= 0) {
 334                        mutex_lock(&teedev->mutex);
 335                        idr_remove(&teedev->idr, shm->id);
 336                        mutex_unlock(&teedev->mutex);
 337                }
 338                if (shm->pages) {
 339                        for (n = 0; n < shm->num_pages; n++)
 340                                put_page(shm->pages[n]);
 341                        kfree(shm->pages);
 342                }
 343        }
 344        kfree(shm);
 345        teedev_ctx_put(ctx);
 346        tee_device_put(teedev);
 347        return ret;
 348}
 349EXPORT_SYMBOL_GPL(tee_shm_register);
 350
 351/**
 352 * tee_shm_get_fd() - Increase reference count and return file descriptor
 353 * @shm:        Shared memory handle
 354 * @returns user space file descriptor to shared memory
 355 */
 356int tee_shm_get_fd(struct tee_shm *shm)
 357{
 358        int fd;
 359
 360        if (!(shm->flags & TEE_SHM_DMA_BUF))
 361                return -EINVAL;
 362
 363        fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
 364        if (fd >= 0)
 365                get_dma_buf(shm->dmabuf);
 366        return fd;
 367}
 368
 369/**
 370 * tee_shm_free() - Free shared memory
 371 * @shm:        Handle to shared memory to free
 372 */
 373void tee_shm_free(struct tee_shm *shm)
 374{
 375        /*
 376         * dma_buf_put() decreases the dmabuf reference counter and will
 377         * call tee_shm_release() when the last reference is gone.
 378         *
 379         * In the case of driver private memory we call tee_shm_release
 380         * directly instead as it doesn't have a reference counter.
 381         */
 382        if (shm->flags & TEE_SHM_DMA_BUF)
 383                dma_buf_put(shm->dmabuf);
 384        else
 385                tee_shm_release(shm);
 386}
 387EXPORT_SYMBOL_GPL(tee_shm_free);
 388
 389/**
 390 * tee_shm_va2pa() - Get physical address of a virtual address
 391 * @shm:        Shared memory handle
 392 * @va:         Virtual address to tranlsate
 393 * @pa:         Returned physical address
 394 * @returns 0 on success and < 0 on failure
 395 */
 396int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
 397{
 398        if (!(shm->flags & TEE_SHM_MAPPED))
 399                return -EINVAL;
 400        /* Check that we're in the range of the shm */
 401        if ((char *)va < (char *)shm->kaddr)
 402                return -EINVAL;
 403        if ((char *)va >= ((char *)shm->kaddr + shm->size))
 404                return -EINVAL;
 405
 406        return tee_shm_get_pa(
 407                        shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
 408}
 409EXPORT_SYMBOL_GPL(tee_shm_va2pa);
 410
 411/**
 412 * tee_shm_pa2va() - Get virtual address of a physical address
 413 * @shm:        Shared memory handle
 414 * @pa:         Physical address to tranlsate
 415 * @va:         Returned virtual address
 416 * @returns 0 on success and < 0 on failure
 417 */
 418int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
 419{
 420        if (!(shm->flags & TEE_SHM_MAPPED))
 421                return -EINVAL;
 422        /* Check that we're in the range of the shm */
 423        if (pa < shm->paddr)
 424                return -EINVAL;
 425        if (pa >= (shm->paddr + shm->size))
 426                return -EINVAL;
 427
 428        if (va) {
 429                void *v = tee_shm_get_va(shm, pa - shm->paddr);
 430
 431                if (IS_ERR(v))
 432                        return PTR_ERR(v);
 433                *va = v;
 434        }
 435        return 0;
 436}
 437EXPORT_SYMBOL_GPL(tee_shm_pa2va);
 438
 439/**
 440 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
 441 * @shm:        Shared memory handle
 442 * @offs:       Offset from start of this shared memory
 443 * @returns virtual address of the shared memory + offs if offs is within
 444 *      the bounds of this shared memory, else an ERR_PTR
 445 */
 446void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
 447{
 448        if (!(shm->flags & TEE_SHM_MAPPED))
 449                return ERR_PTR(-EINVAL);
 450        if (offs >= shm->size)
 451                return ERR_PTR(-EINVAL);
 452        return (char *)shm->kaddr + offs;
 453}
 454EXPORT_SYMBOL_GPL(tee_shm_get_va);
 455
 456/**
 457 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
 458 * @shm:        Shared memory handle
 459 * @offs:       Offset from start of this shared memory
 460 * @pa:         Physical address to return
 461 * @returns 0 if offs is within the bounds of this shared memory, else an
 462 *      error code.
 463 */
 464int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
 465{
 466        if (offs >= shm->size)
 467                return -EINVAL;
 468        if (pa)
 469                *pa = shm->paddr + offs;
 470        return 0;
 471}
 472EXPORT_SYMBOL_GPL(tee_shm_get_pa);
 473
 474/**
 475 * tee_shm_get_from_id() - Find shared memory object and increase reference
 476 * count
 477 * @ctx:        Context owning the shared memory
 478 * @id:         Id of shared memory object
 479 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 480 */
 481struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
 482{
 483        struct tee_device *teedev;
 484        struct tee_shm *shm;
 485
 486        if (!ctx)
 487                return ERR_PTR(-EINVAL);
 488
 489        teedev = ctx->teedev;
 490        mutex_lock(&teedev->mutex);
 491        shm = idr_find(&teedev->idr, id);
 492        if (!shm || shm->ctx != ctx)
 493                shm = ERR_PTR(-EINVAL);
 494        else if (shm->flags & TEE_SHM_DMA_BUF)
 495                get_dma_buf(shm->dmabuf);
 496        mutex_unlock(&teedev->mutex);
 497        return shm;
 498}
 499EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
 500
 501/**
 502 * tee_shm_put() - Decrease reference count on a shared memory handle
 503 * @shm:        Shared memory handle
 504 */
 505void tee_shm_put(struct tee_shm *shm)
 506{
 507        if (shm->flags & TEE_SHM_DMA_BUF)
 508                dma_buf_put(shm->dmabuf);
 509}
 510EXPORT_SYMBOL_GPL(tee_shm_put);
 511