linux/drivers/gpu/drm/exynos/exynos_drm_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* exynos_drm_gem.c
   3 *
   4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   5 * Author: Inki Dae <inki.dae@samsung.com>
   6 */
   7
   8
   9#include <linux/dma-buf.h>
  10#include <linux/pfn_t.h>
  11#include <linux/shmem_fs.h>
  12
  13#include <drm/drm_prime.h>
  14#include <drm/drm_vma_manager.h>
  15#include <drm/exynos_drm.h>
  16
  17#include "exynos_drm_drv.h"
  18#include "exynos_drm_gem.h"
  19
  20static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
  21{
  22        struct drm_device *dev = exynos_gem->base.dev;
  23        unsigned long attr;
  24        unsigned int nr_pages;
  25        struct sg_table sgt;
  26        int ret = -ENOMEM;
  27
  28        if (exynos_gem->dma_addr) {
  29                DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
  30                return 0;
  31        }
  32
  33        exynos_gem->dma_attrs = 0;
  34
  35        /*
  36         * if EXYNOS_BO_CONTIG, fully physically contiguous memory
  37         * region will be allocated else physically contiguous
  38         * as possible.
  39         */
  40        if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
  41                exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
  42
  43        /*
  44         * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
  45         * else cachable mapping.
  46         */
  47        if (exynos_gem->flags & EXYNOS_BO_WC ||
  48                        !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
  49                attr = DMA_ATTR_WRITE_COMBINE;
  50        else
  51                attr = DMA_ATTR_NON_CONSISTENT;
  52
  53        exynos_gem->dma_attrs |= attr;
  54        exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  55
  56        nr_pages = exynos_gem->size >> PAGE_SHIFT;
  57
  58        exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
  59                        GFP_KERNEL | __GFP_ZERO);
  60        if (!exynos_gem->pages) {
  61                DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
  62                return -ENOMEM;
  63        }
  64
  65        exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
  66                                             &exynos_gem->dma_addr, GFP_KERNEL,
  67                                             exynos_gem->dma_attrs);
  68        if (!exynos_gem->cookie) {
  69                DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
  70                goto err_free;
  71        }
  72
  73        ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
  74                                    exynos_gem->dma_addr, exynos_gem->size,
  75                                    exynos_gem->dma_attrs);
  76        if (ret < 0) {
  77                DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
  78                goto err_dma_free;
  79        }
  80
  81        if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
  82                                             nr_pages)) {
  83                DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
  84                ret = -EINVAL;
  85                goto err_sgt_free;
  86        }
  87
  88        sg_free_table(&sgt);
  89
  90        DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
  91                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
  92
  93        return 0;
  94
  95err_sgt_free:
  96        sg_free_table(&sgt);
  97err_dma_free:
  98        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
  99                       exynos_gem->dma_addr, exynos_gem->dma_attrs);
 100err_free:
 101        kvfree(exynos_gem->pages);
 102
 103        return ret;
 104}
 105
 106static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
 107{
 108        struct drm_device *dev = exynos_gem->base.dev;
 109
 110        if (!exynos_gem->dma_addr) {
 111                DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
 112                return;
 113        }
 114
 115        DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
 116                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 117
 118        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
 119                        (dma_addr_t)exynos_gem->dma_addr,
 120                        exynos_gem->dma_attrs);
 121
 122        kvfree(exynos_gem->pages);
 123}
 124
 125static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
 126                                        struct drm_file *file_priv,
 127                                        unsigned int *handle)
 128{
 129        int ret;
 130
 131        /*
 132         * allocate a id of idr table where the obj is registered
 133         * and handle has the id what user can see.
 134         */
 135        ret = drm_gem_handle_create(file_priv, obj, handle);
 136        if (ret)
 137                return ret;
 138
 139        DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
 140
 141        /* drop reference from allocate - handle holds it now. */
 142        drm_gem_object_put_unlocked(obj);
 143
 144        return 0;
 145}
 146
 147void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
 148{
 149        struct drm_gem_object *obj = &exynos_gem->base;
 150
 151        DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
 152                          obj->handle_count);
 153
 154        /*
 155         * do not release memory region from exporter.
 156         *
 157         * the region will be released by exporter
 158         * once dmabuf's refcount becomes 0.
 159         */
 160        if (obj->import_attach)
 161                drm_prime_gem_destroy(obj, exynos_gem->sgt);
 162        else
 163                exynos_drm_free_buf(exynos_gem);
 164
 165        /* release file pointer to gem object. */
 166        drm_gem_object_release(obj);
 167
 168        kfree(exynos_gem);
 169}
 170
 171static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
 172                                                  unsigned long size)
 173{
 174        struct exynos_drm_gem *exynos_gem;
 175        struct drm_gem_object *obj;
 176        int ret;
 177
 178        exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
 179        if (!exynos_gem)
 180                return ERR_PTR(-ENOMEM);
 181
 182        exynos_gem->size = size;
 183        obj = &exynos_gem->base;
 184
 185        ret = drm_gem_object_init(dev, obj, size);
 186        if (ret < 0) {
 187                DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
 188                kfree(exynos_gem);
 189                return ERR_PTR(ret);
 190        }
 191
 192        ret = drm_gem_create_mmap_offset(obj);
 193        if (ret < 0) {
 194                drm_gem_object_release(obj);
 195                kfree(exynos_gem);
 196                return ERR_PTR(ret);
 197        }
 198
 199        DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
 200
 201        return exynos_gem;
 202}
 203
 204struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
 205                                             unsigned int flags,
 206                                             unsigned long size)
 207{
 208        struct exynos_drm_gem *exynos_gem;
 209        int ret;
 210
 211        if (flags & ~(EXYNOS_BO_MASK)) {
 212                DRM_DEV_ERROR(dev->dev,
 213                              "invalid GEM buffer flags: %u\n", flags);
 214                return ERR_PTR(-EINVAL);
 215        }
 216
 217        if (!size) {
 218                DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
 219                return ERR_PTR(-EINVAL);
 220        }
 221
 222        size = roundup(size, PAGE_SIZE);
 223
 224        exynos_gem = exynos_drm_gem_init(dev, size);
 225        if (IS_ERR(exynos_gem))
 226                return exynos_gem;
 227
 228        if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
 229                /*
 230                 * when no IOMMU is available, all allocated buffers are
 231                 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
 232                 */
 233                flags &= ~EXYNOS_BO_NONCONTIG;
 234                DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
 235        }
 236
 237        /* set memory type and cache attribute from user side. */
 238        exynos_gem->flags = flags;
 239
 240        ret = exynos_drm_alloc_buf(exynos_gem);
 241        if (ret < 0) {
 242                drm_gem_object_release(&exynos_gem->base);
 243                kfree(exynos_gem);
 244                return ERR_PTR(ret);
 245        }
 246
 247        return exynos_gem;
 248}
 249
 250int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
 251                                struct drm_file *file_priv)
 252{
 253        struct drm_exynos_gem_create *args = data;
 254        struct exynos_drm_gem *exynos_gem;
 255        int ret;
 256
 257        exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
 258        if (IS_ERR(exynos_gem))
 259                return PTR_ERR(exynos_gem);
 260
 261        ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
 262                                           &args->handle);
 263        if (ret) {
 264                exynos_drm_gem_destroy(exynos_gem);
 265                return ret;
 266        }
 267
 268        return 0;
 269}
 270
 271int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
 272                             struct drm_file *file_priv)
 273{
 274        struct drm_exynos_gem_map *args = data;
 275
 276        return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
 277                                       &args->offset);
 278}
 279
 280struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
 281                                          unsigned int gem_handle)
 282{
 283        struct drm_gem_object *obj;
 284
 285        obj = drm_gem_object_lookup(filp, gem_handle);
 286        if (!obj)
 287                return NULL;
 288        return to_exynos_gem(obj);
 289}
 290
 291static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
 292                                      struct vm_area_struct *vma)
 293{
 294        struct drm_device *drm_dev = exynos_gem->base.dev;
 295        unsigned long vm_size;
 296        int ret;
 297
 298        vma->vm_flags &= ~VM_PFNMAP;
 299        vma->vm_pgoff = 0;
 300
 301        vm_size = vma->vm_end - vma->vm_start;
 302
 303        /* check if user-requested size is valid. */
 304        if (vm_size > exynos_gem->size)
 305                return -EINVAL;
 306
 307        ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
 308                             exynos_gem->dma_addr, exynos_gem->size,
 309                             exynos_gem->dma_attrs);
 310        if (ret < 0) {
 311                DRM_ERROR("failed to mmap.\n");
 312                return ret;
 313        }
 314
 315        return 0;
 316}
 317
 318int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
 319                                      struct drm_file *file_priv)
 320{
 321        struct exynos_drm_gem *exynos_gem;
 322        struct drm_exynos_gem_info *args = data;
 323        struct drm_gem_object *obj;
 324
 325        obj = drm_gem_object_lookup(file_priv, args->handle);
 326        if (!obj) {
 327                DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
 328                return -EINVAL;
 329        }
 330
 331        exynos_gem = to_exynos_gem(obj);
 332
 333        args->flags = exynos_gem->flags;
 334        args->size = exynos_gem->size;
 335
 336        drm_gem_object_put_unlocked(obj);
 337
 338        return 0;
 339}
 340
 341void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 342{
 343        exynos_drm_gem_destroy(to_exynos_gem(obj));
 344}
 345
 346int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
 347                               struct drm_device *dev,
 348                               struct drm_mode_create_dumb *args)
 349{
 350        struct exynos_drm_gem *exynos_gem;
 351        unsigned int flags;
 352        int ret;
 353
 354        /*
 355         * allocate memory to be used for framebuffer.
 356         * - this callback would be called by user application
 357         *      with DRM_IOCTL_MODE_CREATE_DUMB command.
 358         */
 359
 360        args->pitch = args->width * ((args->bpp + 7) / 8);
 361        args->size = args->pitch * args->height;
 362
 363        if (is_drm_iommu_supported(dev))
 364                flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
 365        else
 366                flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 367
 368        exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
 369        if (IS_ERR(exynos_gem)) {
 370                dev_warn(dev->dev, "FB allocation failed.\n");
 371                return PTR_ERR(exynos_gem);
 372        }
 373
 374        ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
 375                                           &args->handle);
 376        if (ret) {
 377                exynos_drm_gem_destroy(exynos_gem);
 378                return ret;
 379        }
 380
 381        return 0;
 382}
 383
 384vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
 385{
 386        struct vm_area_struct *vma = vmf->vma;
 387        struct drm_gem_object *obj = vma->vm_private_data;
 388        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 389        unsigned long pfn;
 390        pgoff_t page_offset;
 391
 392        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 393
 394        if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
 395                DRM_ERROR("invalid page offset\n");
 396                return VM_FAULT_SIGBUS;
 397        }
 398
 399        pfn = page_to_pfn(exynos_gem->pages[page_offset]);
 400        return vmf_insert_mixed(vma, vmf->address,
 401                        __pfn_to_pfn_t(pfn, PFN_DEV));
 402}
 403
 404static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
 405                                   struct vm_area_struct *vma)
 406{
 407        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 408        int ret;
 409
 410        DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
 411                          exynos_gem->flags);
 412
 413        /* non-cachable as default. */
 414        if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
 415                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 416        else if (exynos_gem->flags & EXYNOS_BO_WC)
 417                vma->vm_page_prot =
 418                        pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 419        else
 420                vma->vm_page_prot =
 421                        pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 422
 423        ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
 424        if (ret)
 425                goto err_close_vm;
 426
 427        return ret;
 428
 429err_close_vm:
 430        drm_gem_vm_close(vma);
 431
 432        return ret;
 433}
 434
 435int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 436{
 437        struct drm_gem_object *obj;
 438        int ret;
 439
 440        /* set vm_area_struct. */
 441        ret = drm_gem_mmap(filp, vma);
 442        if (ret < 0) {
 443                DRM_ERROR("failed to mmap.\n");
 444                return ret;
 445        }
 446
 447        obj = vma->vm_private_data;
 448
 449        if (obj->import_attach)
 450                return dma_buf_mmap(obj->dma_buf, vma, 0);
 451
 452        return exynos_drm_gem_mmap_obj(obj, vma);
 453}
 454
 455/* low-level interface prime helpers */
 456struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
 457                                            struct dma_buf *dma_buf)
 458{
 459        return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
 460}
 461
 462struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 463{
 464        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 465        int npages;
 466
 467        npages = exynos_gem->size >> PAGE_SHIFT;
 468
 469        return drm_prime_pages_to_sg(exynos_gem->pages, npages);
 470}
 471
 472struct drm_gem_object *
 473exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
 474                                     struct dma_buf_attachment *attach,
 475                                     struct sg_table *sgt)
 476{
 477        struct exynos_drm_gem *exynos_gem;
 478        int npages;
 479        int ret;
 480
 481        exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
 482        if (IS_ERR(exynos_gem)) {
 483                ret = PTR_ERR(exynos_gem);
 484                return ERR_PTR(ret);
 485        }
 486
 487        exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
 488
 489        npages = exynos_gem->size >> PAGE_SHIFT;
 490        exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 491        if (!exynos_gem->pages) {
 492                ret = -ENOMEM;
 493                goto err;
 494        }
 495
 496        ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
 497                                               npages);
 498        if (ret < 0)
 499                goto err_free_large;
 500
 501        exynos_gem->sgt = sgt;
 502
 503        if (sgt->nents == 1) {
 504                /* always physically continuous memory if sgt->nents is 1. */
 505                exynos_gem->flags |= EXYNOS_BO_CONTIG;
 506        } else {
 507                /*
 508                 * this case could be CONTIG or NONCONTIG type but for now
 509                 * sets NONCONTIG.
 510                 * TODO. we have to find a way that exporter can notify
 511                 * the type of its own buffer to importer.
 512                 */
 513                exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
 514        }
 515
 516        return &exynos_gem->base;
 517
 518err_free_large:
 519        kvfree(exynos_gem->pages);
 520err:
 521        drm_gem_object_release(&exynos_gem->base);
 522        kfree(exynos_gem);
 523        return ERR_PTR(ret);
 524}
 525
 526void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
 527{
 528        return NULL;
 529}
 530
 531void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 532{
 533        /* Nothing to do */
 534}
 535
 536int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
 537                              struct vm_area_struct *vma)
 538{
 539        int ret;
 540
 541        ret = drm_gem_mmap_obj(obj, obj->size, vma);
 542        if (ret < 0)
 543                return ret;
 544
 545        return exynos_drm_gem_mmap_obj(obj, vma);
 546}
 547