linux/drivers/gpu/drm/drm_prime.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012 Red Hat
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Dave Airlie <airlied@redhat.com>
  25 *      Rob Clark <rob.clark@linaro.org>
  26 *
  27 */
  28
  29#include <linux/export.h>
  30#include <linux/dma-buf.h>
  31#include <drm/drmP.h>
  32
  33/*
  34 * DMA-BUF/GEM Object references and lifetime overview:
  35 *
  36 * On the export the dma_buf holds a reference to the exporting GEM
  37 * object. It takes this reference in handle_to_fd_ioctl, when it
  38 * first calls .prime_export and stores the exporting GEM object in
  39 * the dma_buf priv. This reference is released when the dma_buf
  40 * object goes away in the driver .release function.
  41 *
  42 * On the import the importing GEM object holds a reference to the
  43 * dma_buf (which in turn holds a ref to the exporting GEM object).
  44 * It takes that reference in the fd_to_handle ioctl.
  45 * It calls dma_buf_get, creates an attachment to it and stores the
  46 * attachment in the GEM object. When this attachment is destroyed
  47 * when the imported object is destroyed, we remove the attachment
  48 * and drop the reference to the dma_buf.
  49 *
  50 * Thus the chain of references always flows in one direction
  51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
  52 *
  53 * Self-importing: if userspace is using PRIME as a replacement for flink
  54 * then it will get a fd->handle request for a GEM object that it created.
  55 * Drivers should detect this situation and return back the gem object
  56 * from the dma-buf private.
  57 */
  58
  59struct drm_prime_member {
  60        struct list_head entry;
  61        struct dma_buf *dma_buf;
  62        uint32_t handle;
  63};
  64
  65int drm_gem_prime_handle_to_fd(struct drm_device *dev,
  66                struct drm_file *file_priv, uint32_t handle, uint32_t flags,
  67                int *prime_fd)
  68{
  69        struct drm_gem_object *obj;
  70        void *buf;
  71        int ret;
  72
  73        obj = drm_gem_object_lookup(dev, file_priv, handle);
  74        if (!obj)
  75                return -ENOENT;
  76
  77        mutex_lock(&file_priv->prime.lock);
  78        /* re-export the original imported object */
  79        if (obj->import_attach) {
  80                get_dma_buf(obj->import_attach->dmabuf);
  81                *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
  82                drm_gem_object_unreference_unlocked(obj);
  83                mutex_unlock(&file_priv->prime.lock);
  84                return 0;
  85        }
  86
  87        if (obj->export_dma_buf) {
  88                get_dma_buf(obj->export_dma_buf);
  89                *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
  90                drm_gem_object_unreference_unlocked(obj);
  91        } else {
  92                buf = dev->driver->gem_prime_export(dev, obj, flags);
  93                if (IS_ERR(buf)) {
  94                        /* normally the created dma-buf takes ownership of the ref,
  95                         * but if that fails then drop the ref
  96                         */
  97                        drm_gem_object_unreference_unlocked(obj);
  98                        mutex_unlock(&file_priv->prime.lock);
  99                        return PTR_ERR(buf);
 100                }
 101                obj->export_dma_buf = buf;
 102                *prime_fd = dma_buf_fd(buf, flags);
 103        }
 104        /* if we've exported this buffer the cheat and add it to the import list
 105         * so we get the correct handle back
 106         */
 107        ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
 108                        obj->export_dma_buf, handle);
 109        if (ret) {
 110                drm_gem_object_unreference_unlocked(obj);
 111                mutex_unlock(&file_priv->prime.lock);
 112                return ret;
 113        }
 114
 115        mutex_unlock(&file_priv->prime.lock);
 116        return 0;
 117}
 118EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 119
 120int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 121                struct drm_file *file_priv, int prime_fd, uint32_t *handle)
 122{
 123        struct dma_buf *dma_buf;
 124        struct drm_gem_object *obj;
 125        int ret;
 126
 127        dma_buf = dma_buf_get(prime_fd);
 128        if (IS_ERR(dma_buf))
 129                return PTR_ERR(dma_buf);
 130
 131        mutex_lock(&file_priv->prime.lock);
 132
 133        ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
 134                        dma_buf, handle);
 135        if (!ret) {
 136                ret = 0;
 137                goto out_put;
 138        }
 139
 140        /* never seen this one, need to import */
 141        obj = dev->driver->gem_prime_import(dev, dma_buf);
 142        if (IS_ERR(obj)) {
 143                ret = PTR_ERR(obj);
 144                goto out_put;
 145        }
 146
 147        ret = drm_gem_handle_create(file_priv, obj, handle);
 148        drm_gem_object_unreference_unlocked(obj);
 149        if (ret)
 150                goto out_put;
 151
 152        ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
 153                        dma_buf, *handle);
 154        if (ret)
 155                goto fail;
 156
 157        mutex_unlock(&file_priv->prime.lock);
 158        return 0;
 159
 160fail:
 161        /* hmm, if driver attached, we are relying on the free-object path
 162         * to detach.. which seems ok..
 163         */
 164        drm_gem_object_handle_unreference_unlocked(obj);
 165out_put:
 166        dma_buf_put(dma_buf);
 167        mutex_unlock(&file_priv->prime.lock);
 168        return ret;
 169}
 170EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
 171
 172int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 173                                 struct drm_file *file_priv)
 174{
 175        struct drm_prime_handle *args = data;
 176        uint32_t flags;
 177
 178        if (!drm_core_check_feature(dev, DRIVER_PRIME))
 179                return -EINVAL;
 180
 181        if (!dev->driver->prime_handle_to_fd)
 182                return -ENOSYS;
 183
 184        /* check flags are valid */
 185        if (args->flags & ~DRM_CLOEXEC)
 186                return -EINVAL;
 187
 188        /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
 189        flags = args->flags & DRM_CLOEXEC;
 190
 191        return dev->driver->prime_handle_to_fd(dev, file_priv,
 192                        args->handle, flags, &args->fd);
 193}
 194
 195int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 196                                 struct drm_file *file_priv)
 197{
 198        struct drm_prime_handle *args = data;
 199
 200        if (!drm_core_check_feature(dev, DRIVER_PRIME))
 201                return -EINVAL;
 202
 203        if (!dev->driver->prime_fd_to_handle)
 204                return -ENOSYS;
 205
 206        return dev->driver->prime_fd_to_handle(dev, file_priv,
 207                        args->fd, &args->handle);
 208}
 209
 210/*
 211 * drm_prime_pages_to_sg
 212 *
 213 * this helper creates an sg table object from a set of pages
 214 * the driver is responsible for mapping the pages into the
 215 * importers address space
 216 */
 217struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
 218{
 219        struct sg_table *sg = NULL;
 220        struct scatterlist *iter;
 221        int i;
 222        int ret;
 223
 224        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 225        if (!sg)
 226                goto out;
 227
 228        ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
 229        if (ret)
 230                goto out;
 231
 232        for_each_sg(sg->sgl, iter, nr_pages, i)
 233                sg_set_page(iter, pages[i], PAGE_SIZE, 0);
 234
 235        return sg;
 236out:
 237        kfree(sg);
 238        return NULL;
 239}
 240EXPORT_SYMBOL(drm_prime_pages_to_sg);
 241
 242/* export an sg table into an array of pages and addresses
 243   this is currently required by the TTM driver in order to do correct fault
 244   handling */
 245int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 246                                     dma_addr_t *addrs, int max_pages)
 247{
 248        unsigned count;
 249        struct scatterlist *sg;
 250        struct page *page;
 251        u32 len, offset;
 252        int pg_index;
 253        dma_addr_t addr;
 254
 255        pg_index = 0;
 256        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 257                len = sg->length;
 258                offset = sg->offset;
 259                page = sg_page(sg);
 260                addr = sg_dma_address(sg);
 261
 262                while (len > 0) {
 263                        if (WARN_ON(pg_index >= max_pages))
 264                                return -1;
 265                        pages[pg_index] = page;
 266                        if (addrs)
 267                                addrs[pg_index] = addr;
 268
 269                        page++;
 270                        addr += PAGE_SIZE;
 271                        len -= PAGE_SIZE;
 272                        pg_index++;
 273                }
 274        }
 275        return 0;
 276}
 277EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
 278/* helper function to cleanup a GEM/prime object */
 279void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
 280{
 281        struct dma_buf_attachment *attach;
 282        struct dma_buf *dma_buf;
 283        attach = obj->import_attach;
 284        if (sg)
 285                dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 286        dma_buf = attach->dmabuf;
 287        dma_buf_detach(attach->dmabuf, attach);
 288        /* remove the reference */
 289        dma_buf_put(dma_buf);
 290}
 291EXPORT_SYMBOL(drm_prime_gem_destroy);
 292
 293void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
 294{
 295        INIT_LIST_HEAD(&prime_fpriv->head);
 296        mutex_init(&prime_fpriv->lock);
 297}
 298EXPORT_SYMBOL(drm_prime_init_file_private);
 299
 300void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 301{
 302        struct drm_prime_member *member, *safe;
 303        list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
 304                list_del(&member->entry);
 305                kfree(member);
 306        }
 307}
 308EXPORT_SYMBOL(drm_prime_destroy_file_private);
 309
 310int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
 311{
 312        struct drm_prime_member *member;
 313
 314        member = kmalloc(sizeof(*member), GFP_KERNEL);
 315        if (!member)
 316                return -ENOMEM;
 317
 318        member->dma_buf = dma_buf;
 319        member->handle = handle;
 320        list_add(&member->entry, &prime_fpriv->head);
 321        return 0;
 322}
 323EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
 324
 325int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
 326{
 327        struct drm_prime_member *member;
 328
 329        list_for_each_entry(member, &prime_fpriv->head, entry) {
 330                if (member->dma_buf == dma_buf) {
 331                        *handle = member->handle;
 332                        return 0;
 333                }
 334        }
 335        return -ENOENT;
 336}
 337EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
 338
 339void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
 340{
 341        struct drm_prime_member *member, *safe;
 342
 343        mutex_lock(&prime_fpriv->lock);
 344        list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
 345                if (member->dma_buf == dma_buf) {
 346                        list_del(&member->entry);
 347                        kfree(member);
 348                }
 349        }
 350        mutex_unlock(&prime_fpriv->lock);
 351}
 352EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
 353