linux/drivers/gpu/drm/drm_gem_cma_helper.c
<<
>>
Prefs
   1/*
   2 * drm gem CMA (contiguous memory allocator) helper functions
   3 *
   4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
   5 *
   6 * Based on Samsung Exynos code
   7 *
   8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version 2
  13 * of the License, or (at your option) any later version.
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#include <linux/mm.h>
  21#include <linux/slab.h>
  22#include <linux/mutex.h>
  23#include <linux/export.h>
  24#include <linux/dma-buf.h>
  25#include <linux/dma-mapping.h>
  26
  27#include <drm/drmP.h>
  28#include <drm/drm.h>
  29#include <drm/drm_gem_cma_helper.h>
  30#include <drm/drm_vma_manager.h>
  31
  32/**
  33 * DOC: cma helpers
  34 *
  35 * The Contiguous Memory Allocator reserves a pool of memory at early boot
  36 * that is used to service requests for large blocks of contiguous memory.
  37 *
  38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  39 * objects that are physically contiguous in memory. This is useful for
  40 * display drivers that are unable to map scattered buffers via an IOMMU.
  41 */
  42
  43/**
  44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  45 * @drm: DRM device
  46 * @size: size of the object to allocate
  47 *
  48 * This function creates and initializes a GEM CMA object of the given size,
  49 * but doesn't allocate any memory to back the object.
  50 *
  51 * Returns:
  52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  53 * error code on failure.
  54 */
  55static struct drm_gem_cma_object *
  56__drm_gem_cma_create(struct drm_device *drm, size_t size)
  57{
  58        struct drm_gem_cma_object *cma_obj;
  59        struct drm_gem_object *gem_obj;
  60        int ret;
  61
  62        if (drm->driver->gem_create_object)
  63                gem_obj = drm->driver->gem_create_object(drm, size);
  64        else
  65                gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  66        if (!gem_obj)
  67                return ERR_PTR(-ENOMEM);
  68        cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
  69
  70        ret = drm_gem_object_init(drm, gem_obj, size);
  71        if (ret)
  72                goto error;
  73
  74        ret = drm_gem_create_mmap_offset(gem_obj);
  75        if (ret) {
  76                drm_gem_object_release(gem_obj);
  77                goto error;
  78        }
  79
  80        return cma_obj;
  81
  82error:
  83        kfree(cma_obj);
  84        return ERR_PTR(ret);
  85}
  86
  87/**
  88 * drm_gem_cma_create - allocate an object with the given size
  89 * @drm: DRM device
  90 * @size: size of the object to allocate
  91 *
  92 * This function creates a CMA GEM object and allocates a contiguous chunk of
  93 * memory as backing store. The backing memory has the writecombine attribute
  94 * set.
  95 *
  96 * Returns:
  97 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  98 * error code on failure.
  99 */
 100struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
 101                                              size_t size)
 102{
 103        struct drm_gem_cma_object *cma_obj;
 104        int ret;
 105
 106        size = round_up(size, PAGE_SIZE);
 107
 108        cma_obj = __drm_gem_cma_create(drm, size);
 109        if (IS_ERR(cma_obj))
 110                return cma_obj;
 111
 112        cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
 113                                      GFP_KERNEL | __GFP_NOWARN);
 114        if (!cma_obj->vaddr) {
 115                dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
 116                        size);
 117                ret = -ENOMEM;
 118                goto error;
 119        }
 120
 121        return cma_obj;
 122
 123error:
 124        drm_gem_object_unreference_unlocked(&cma_obj->base);
 125        return ERR_PTR(ret);
 126}
 127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
 128
 129/**
 130 * drm_gem_cma_create_with_handle - allocate an object with the given size and
 131 *     return a GEM handle to it
 132 * @file_priv: DRM file-private structure to register the handle for
 133 * @drm: DRM device
 134 * @size: size of the object to allocate
 135 * @handle: return location for the GEM handle
 136 *
 137 * This function creates a CMA GEM object, allocating a physically contiguous
 138 * chunk of memory as backing store. The GEM object is then added to the list
 139 * of object associated with the given file and a handle to it is returned.
 140 *
 141 * Returns:
 142 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
 143 * error code on failure.
 144 */
 145static struct drm_gem_cma_object *
 146drm_gem_cma_create_with_handle(struct drm_file *file_priv,
 147                               struct drm_device *drm, size_t size,
 148                               uint32_t *handle)
 149{
 150        struct drm_gem_cma_object *cma_obj;
 151        struct drm_gem_object *gem_obj;
 152        int ret;
 153
 154        cma_obj = drm_gem_cma_create(drm, size);
 155        if (IS_ERR(cma_obj))
 156                return cma_obj;
 157
 158        gem_obj = &cma_obj->base;
 159
 160        /*
 161         * allocate a id of idr table where the obj is registered
 162         * and handle has the id what user can see.
 163         */
 164        ret = drm_gem_handle_create(file_priv, gem_obj, handle);
 165        /* drop reference from allocate - handle holds it now. */
 166        drm_gem_object_unreference_unlocked(gem_obj);
 167        if (ret)
 168                return ERR_PTR(ret);
 169
 170        return cma_obj;
 171}
 172
 173/**
 174 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
 175 * @gem_obj: GEM object to free
 176 *
 177 * This function frees the backing memory of the CMA GEM object, cleans up the
 178 * GEM object state and frees the memory used to store the object itself.
 179 * Drivers using the CMA helpers should set this as their DRM driver's
 180 * ->gem_free_object() callback.
 181 */
 182void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 183{
 184        struct drm_gem_cma_object *cma_obj;
 185
 186        cma_obj = to_drm_gem_cma_obj(gem_obj);
 187
 188        if (cma_obj->vaddr) {
 189                dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
 190                            cma_obj->vaddr, cma_obj->paddr);
 191        } else if (gem_obj->import_attach) {
 192                drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
 193        }
 194
 195        drm_gem_object_release(gem_obj);
 196
 197        kfree(cma_obj);
 198}
 199EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
 200
 201/**
 202 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
 203 * @file_priv: DRM file-private structure to create the dumb buffer for
 204 * @drm: DRM device
 205 * @args: IOCTL data
 206 *
 207 * This aligns the pitch and size arguments to the minimum required. This is
 208 * an internal helper that can be wrapped by a driver to account for hardware
 209 * with more specific alignment requirements. It should not be used directly
 210 * as the ->dumb_create() callback in a DRM driver.
 211 *
 212 * Returns:
 213 * 0 on success or a negative error code on failure.
 214 */
 215int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
 216                                     struct drm_device *drm,
 217                                     struct drm_mode_create_dumb *args)
 218{
 219        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 220        struct drm_gem_cma_object *cma_obj;
 221
 222        if (args->pitch < min_pitch)
 223                args->pitch = min_pitch;
 224
 225        if (args->size < args->pitch * args->height)
 226                args->size = args->pitch * args->height;
 227
 228        cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
 229                                                 &args->handle);
 230        return PTR_ERR_OR_ZERO(cma_obj);
 231}
 232EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
 233
 234/**
 235 * drm_gem_cma_dumb_create - create a dumb buffer object
 236 * @file_priv: DRM file-private structure to create the dumb buffer for
 237 * @drm: DRM device
 238 * @args: IOCTL data
 239 *
 240 * This function computes the pitch of the dumb buffer and rounds it up to an
 241 * integer number of bytes per pixel. Drivers for hardware that doesn't have
 242 * any additional restrictions on the pitch can directly use this function as
 243 * their ->dumb_create() callback.
 244 *
 245 * For hardware with additional restrictions, drivers can adjust the fields
 246 * set up by userspace and pass the IOCTL data along to the
 247 * drm_gem_cma_dumb_create_internal() function.
 248 *
 249 * Returns:
 250 * 0 on success or a negative error code on failure.
 251 */
 252int drm_gem_cma_dumb_create(struct drm_file *file_priv,
 253                            struct drm_device *drm,
 254                            struct drm_mode_create_dumb *args)
 255{
 256        struct drm_gem_cma_object *cma_obj;
 257
 258        args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 259        args->size = args->pitch * args->height;
 260
 261        cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
 262                                                 &args->handle);
 263        return PTR_ERR_OR_ZERO(cma_obj);
 264}
 265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
 266
 267/**
 268 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
 269 *     object
 270 * @file_priv: DRM file-private structure containing the GEM object
 271 * @drm: DRM device
 272 * @handle: GEM object handle
 273 * @offset: return location for the fake mmap offset
 274 *
 275 * This function look up an object by its handle and returns the fake mmap
 276 * offset associated with it. Drivers using the CMA helpers should set this
 277 * as their DRM driver's ->dumb_map_offset() callback.
 278 *
 279 * Returns:
 280 * 0 on success or a negative error code on failure.
 281 */
 282int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 283                                struct drm_device *drm, u32 handle,
 284                                u64 *offset)
 285{
 286        struct drm_gem_object *gem_obj;
 287
 288        gem_obj = drm_gem_object_lookup(file_priv, handle);
 289        if (!gem_obj) {
 290                dev_err(drm->dev, "failed to lookup GEM object\n");
 291                return -EINVAL;
 292        }
 293
 294        *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 295
 296        drm_gem_object_unreference_unlocked(gem_obj);
 297
 298        return 0;
 299}
 300EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
 301
 302const struct vm_operations_struct drm_gem_cma_vm_ops = {
 303        .open = drm_gem_vm_open,
 304        .close = drm_gem_vm_close,
 305};
 306EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
 307
 308static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
 309                                struct vm_area_struct *vma)
 310{
 311        int ret;
 312
 313        /*
 314         * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 315         * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 316         * the whole buffer.
 317         */
 318        vma->vm_flags &= ~VM_PFNMAP;
 319        vma->vm_pgoff = 0;
 320
 321        ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
 322                          cma_obj->paddr, vma->vm_end - vma->vm_start);
 323        if (ret)
 324                drm_gem_vm_close(vma);
 325
 326        return ret;
 327}
 328
 329/**
 330 * drm_gem_cma_mmap - memory-map a CMA GEM object
 331 * @filp: file object
 332 * @vma: VMA for the area to be mapped
 333 *
 334 * This function implements an augmented version of the GEM DRM file mmap
 335 * operation for CMA objects: In addition to the usual GEM VMA setup it
 336 * immediately faults in the entire object instead of using on-demaind
 337 * faulting. Drivers which employ the CMA helpers should use this function
 338 * as their ->mmap() handler in the DRM device file's file_operations
 339 * structure.
 340 *
 341 * Returns:
 342 * 0 on success or a negative error code on failure.
 343 */
 344int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 345{
 346        struct drm_gem_cma_object *cma_obj;
 347        struct drm_gem_object *gem_obj;
 348        int ret;
 349
 350        ret = drm_gem_mmap(filp, vma);
 351        if (ret)
 352                return ret;
 353
 354        gem_obj = vma->vm_private_data;
 355        cma_obj = to_drm_gem_cma_obj(gem_obj);
 356
 357        return drm_gem_cma_mmap_obj(cma_obj, vma);
 358}
 359EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 360
 361#ifdef CONFIG_DEBUG_FS
 362/**
 363 * drm_gem_cma_describe - describe a CMA GEM object for debugfs
 364 * @cma_obj: CMA GEM object
 365 * @m: debugfs file handle
 366 *
 367 * This function can be used to dump a human-readable representation of the
 368 * CMA GEM object into a synthetic file.
 369 */
 370void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
 371                          struct seq_file *m)
 372{
 373        struct drm_gem_object *obj = &cma_obj->base;
 374        uint64_t off;
 375
 376        off = drm_vma_node_start(&obj->vma_node);
 377
 378        seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
 379                        obj->name, obj->refcount.refcount.counter,
 380                        off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
 381
 382        seq_printf(m, "\n");
 383}
 384EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
 385#endif
 386
 387/**
 388 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
 389 *     pages for a CMA GEM object
 390 * @obj: GEM object
 391 *
 392 * This function exports a scatter/gather table suitable for PRIME usage by
 393 * calling the standard DMA mapping API. Drivers using the CMA helpers should
 394 * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
 395 *
 396 * Returns:
 397 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
 398 */
 399struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
 400{
 401        struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
 402        struct sg_table *sgt;
 403        int ret;
 404
 405        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 406        if (!sgt)
 407                return NULL;
 408
 409        ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
 410                              cma_obj->paddr, obj->size);
 411        if (ret < 0)
 412                goto out;
 413
 414        return sgt;
 415
 416out:
 417        kfree(sgt);
 418        return NULL;
 419}
 420EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
 421
 422/**
 423 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
 424 *     driver's scatter/gather table of pinned pages
 425 * @dev: device to import into
 426 * @attach: DMA-BUF attachment
 427 * @sgt: scatter/gather table of pinned pages
 428 *
 429 * This function imports a scatter/gather table exported via DMA-BUF by
 430 * another driver. Imported buffers must be physically contiguous in memory
 431 * (i.e. the scatter/gather table must contain a single entry). Drivers that
 432 * use the CMA helpers should set this as their DRM driver's
 433 * ->gem_prime_import_sg_table() callback.
 434 *
 435 * Returns:
 436 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
 437 * error code on failure.
 438 */
 439struct drm_gem_object *
 440drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
 441                                  struct dma_buf_attachment *attach,
 442                                  struct sg_table *sgt)
 443{
 444        struct drm_gem_cma_object *cma_obj;
 445
 446        if (sgt->nents != 1)
 447                return ERR_PTR(-EINVAL);
 448
 449        /* Create a CMA GEM buffer. */
 450        cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
 451        if (IS_ERR(cma_obj))
 452                return ERR_CAST(cma_obj);
 453
 454        cma_obj->paddr = sg_dma_address(sgt->sgl);
 455        cma_obj->sgt = sgt;
 456
 457        DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
 458
 459        return &cma_obj->base;
 460}
 461EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
 462
 463/**
 464 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
 465 * @obj: GEM object
 466 * @vma: VMA for the area to be mapped
 467 *
 468 * This function maps a buffer imported via DRM PRIME into a userspace
 469 * process's address space. Drivers that use the CMA helpers should set this
 470 * as their DRM driver's ->gem_prime_mmap() callback.
 471 *
 472 * Returns:
 473 * 0 on success or a negative error code on failure.
 474 */
 475int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
 476                           struct vm_area_struct *vma)
 477{
 478        struct drm_gem_cma_object *cma_obj;
 479        int ret;
 480
 481        ret = drm_gem_mmap_obj(obj, obj->size, vma);
 482        if (ret < 0)
 483                return ret;
 484
 485        cma_obj = to_drm_gem_cma_obj(obj);
 486        return drm_gem_cma_mmap_obj(cma_obj, vma);
 487}
 488EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
 489
 490/**
 491 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
 492 *     address space
 493 * @obj: GEM object
 494 *
 495 * This function maps a buffer exported via DRM PRIME into the kernel's
 496 * virtual address space. Since the CMA buffers are already mapped into the
 497 * kernel virtual address space this simply returns the cached virtual
 498 * address. Drivers using the CMA helpers should set this as their DRM
 499 * driver's ->gem_prime_vmap() callback.
 500 *
 501 * Returns:
 502 * The kernel virtual address of the CMA GEM object's backing store.
 503 */
 504void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
 505{
 506        struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
 507
 508        return cma_obj->vaddr;
 509}
 510EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
 511
 512/**
 513 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
 514 *     address space
 515 * @obj: GEM object
 516 * @vaddr: kernel virtual address where the CMA GEM object was mapped
 517 *
 518 * This function removes a buffer exported via DRM PRIME from the kernel's
 519 * virtual address space. This is a no-op because CMA buffers cannot be
 520 * unmapped from kernel space. Drivers using the CMA helpers should set this
 521 * as their DRM driver's ->gem_prime_vunmap() callback.
 522 */
 523void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 524{
 525        /* Nothing to do */
 526}
 527EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
 528