linux/drivers/gpu/drm/armada/armada_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Russell King
   4 */
   5
   6#include <linux/dma-buf.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/mman.h>
   9#include <linux/shmem_fs.h>
  10
  11#include <drm/armada_drm.h>
  12#include <drm/drm_prime.h>
  13
  14#include "armada_drm.h"
  15#include "armada_gem.h"
  16#include "armada_ioctlP.h"
  17
  18static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
  19{
  20        struct drm_gem_object *gobj = vmf->vma->vm_private_data;
  21        struct armada_gem_object *obj = drm_to_armada_gem(gobj);
  22        unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  23
  24        pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
  25        return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
  26}
  27
  28static const struct vm_operations_struct armada_gem_vm_ops = {
  29        .fault  = armada_gem_vm_fault,
  30        .open   = drm_gem_vm_open,
  31        .close  = drm_gem_vm_close,
  32};
  33
  34static size_t roundup_gem_size(size_t size)
  35{
  36        return roundup(size, PAGE_SIZE);
  37}
  38
  39void armada_gem_free_object(struct drm_gem_object *obj)
  40{
  41        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  42        struct armada_private *priv = drm_to_armada_dev(obj->dev);
  43
  44        DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  45
  46        drm_gem_free_mmap_offset(&dobj->obj);
  47
  48        might_lock(&priv->linear_lock);
  49
  50        if (dobj->page) {
  51                /* page backed memory */
  52                unsigned int order = get_order(dobj->obj.size);
  53                __free_pages(dobj->page, order);
  54        } else if (dobj->linear) {
  55                /* linear backed memory */
  56                mutex_lock(&priv->linear_lock);
  57                drm_mm_remove_node(dobj->linear);
  58                mutex_unlock(&priv->linear_lock);
  59                kfree(dobj->linear);
  60                if (dobj->addr)
  61                        iounmap(dobj->addr);
  62        }
  63
  64        if (dobj->obj.import_attach) {
  65                /* We only ever display imported data */
  66                if (dobj->sgt)
  67                        dma_buf_unmap_attachment(dobj->obj.import_attach,
  68                                                 dobj->sgt, DMA_TO_DEVICE);
  69                drm_prime_gem_destroy(&dobj->obj, NULL);
  70        }
  71
  72        drm_gem_object_release(&dobj->obj);
  73
  74        kfree(dobj);
  75}
  76
  77int
  78armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  79{
  80        struct armada_private *priv = drm_to_armada_dev(dev);
  81        size_t size = obj->obj.size;
  82
  83        if (obj->page || obj->linear)
  84                return 0;
  85
  86        /*
  87         * If it is a small allocation (typically cursor, which will
  88         * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  89         * Framebuffers will never be this small (our minimum size for
  90         * framebuffers is larger than this anyway.)  Such objects are
  91         * only accessed by the CPU so we don't need any special handing
  92         * here.
  93         */
  94        if (size <= 8192) {
  95                unsigned int order = get_order(size);
  96                struct page *p = alloc_pages(GFP_KERNEL, order);
  97
  98                if (p) {
  99                        obj->addr = page_address(p);
 100                        obj->phys_addr = page_to_phys(p);
 101                        obj->page = p;
 102
 103                        memset(obj->addr, 0, PAGE_ALIGN(size));
 104                }
 105        }
 106
 107        /*
 108         * We could grab something from CMA if it's enabled, but that
 109         * involves building in a problem:
 110         *
 111         * CMA's interface uses dma_alloc_coherent(), which provides us
 112         * with an CPU virtual address and a device address.
 113         *
 114         * The CPU virtual address may be either an address in the kernel
 115         * direct mapped region (for example, as it would be on x86) or
 116         * it may be remapped into another part of kernel memory space
 117         * (eg, as it would be on ARM.)  This means virt_to_phys() on the
 118         * returned virtual address is invalid depending on the architecture
 119         * implementation.
 120         *
 121         * The device address may also not be a physical address; it may
 122         * be that there is some kind of remapping between the device and
 123         * system RAM, which makes the use of the device address also
 124         * unsafe to re-use as a physical address.
 125         *
 126         * This makes DRM usage of dma_alloc_coherent() in a generic way
 127         * at best very questionable and unsafe.
 128         */
 129
 130        /* Otherwise, grab it from our linear allocation */
 131        if (!obj->page) {
 132                struct drm_mm_node *node;
 133                unsigned align = min_t(unsigned, size, SZ_2M);
 134                void __iomem *ptr;
 135                int ret;
 136
 137                node = kzalloc(sizeof(*node), GFP_KERNEL);
 138                if (!node)
 139                        return -ENOSPC;
 140
 141                mutex_lock(&priv->linear_lock);
 142                ret = drm_mm_insert_node_generic(&priv->linear, node,
 143                                                 size, align, 0, 0);
 144                mutex_unlock(&priv->linear_lock);
 145                if (ret) {
 146                        kfree(node);
 147                        return ret;
 148                }
 149
 150                obj->linear = node;
 151
 152                /* Ensure that the memory we're returning is cleared. */
 153                ptr = ioremap_wc(obj->linear->start, size);
 154                if (!ptr) {
 155                        mutex_lock(&priv->linear_lock);
 156                        drm_mm_remove_node(obj->linear);
 157                        mutex_unlock(&priv->linear_lock);
 158                        kfree(obj->linear);
 159                        obj->linear = NULL;
 160                        return -ENOMEM;
 161                }
 162
 163                memset_io(ptr, 0, size);
 164                iounmap(ptr);
 165
 166                obj->phys_addr = obj->linear->start;
 167                obj->dev_addr = obj->linear->start;
 168                obj->mapped = true;
 169        }
 170
 171        DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
 172                         (unsigned long long)obj->phys_addr,
 173                         (unsigned long long)obj->dev_addr);
 174
 175        return 0;
 176}
 177
 178void *
 179armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
 180{
 181        /* only linear objects need to be ioremap'd */
 182        if (!dobj->addr && dobj->linear)
 183                dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
 184        return dobj->addr;
 185}
 186
 187static const struct drm_gem_object_funcs armada_gem_object_funcs = {
 188        .free = armada_gem_free_object,
 189        .export = armada_gem_prime_export,
 190        .vm_ops = &armada_gem_vm_ops,
 191};
 192
 193struct armada_gem_object *
 194armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
 195{
 196        struct armada_gem_object *obj;
 197
 198        size = roundup_gem_size(size);
 199
 200        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 201        if (!obj)
 202                return NULL;
 203
 204        obj->obj.funcs = &armada_gem_object_funcs;
 205
 206        drm_gem_private_object_init(dev, &obj->obj, size);
 207
 208        DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
 209
 210        return obj;
 211}
 212
 213static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
 214        size_t size)
 215{
 216        struct armada_gem_object *obj;
 217        struct address_space *mapping;
 218
 219        size = roundup_gem_size(size);
 220
 221        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 222        if (!obj)
 223                return NULL;
 224
 225        obj->obj.funcs = &armada_gem_object_funcs;
 226
 227        if (drm_gem_object_init(dev, &obj->obj, size)) {
 228                kfree(obj);
 229                return NULL;
 230        }
 231
 232        mapping = obj->obj.filp->f_mapping;
 233        mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
 234
 235        DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
 236
 237        return obj;
 238}
 239
 240/* Dumb alloc support */
 241int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 242        struct drm_mode_create_dumb *args)
 243{
 244        struct armada_gem_object *dobj;
 245        u32 handle;
 246        size_t size;
 247        int ret;
 248
 249        args->pitch = armada_pitch(args->width, args->bpp);
 250        args->size = size = args->pitch * args->height;
 251
 252        dobj = armada_gem_alloc_private_object(dev, size);
 253        if (dobj == NULL)
 254                return -ENOMEM;
 255
 256        ret = armada_gem_linear_back(dev, dobj);
 257        if (ret)
 258                goto err;
 259
 260        ret = drm_gem_handle_create(file, &dobj->obj, &handle);
 261        if (ret)
 262                goto err;
 263
 264        args->handle = handle;
 265
 266        /* drop reference from allocate - handle holds it now */
 267        DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
 268 err:
 269        drm_gem_object_put(&dobj->obj);
 270        return ret;
 271}
 272
 273/* Private driver gem ioctls */
 274int armada_gem_create_ioctl(struct drm_device *dev, void *data,
 275        struct drm_file *file)
 276{
 277        struct drm_armada_gem_create *args = data;
 278        struct armada_gem_object *dobj;
 279        size_t size;
 280        u32 handle;
 281        int ret;
 282
 283        if (args->size == 0)
 284                return -ENOMEM;
 285
 286        size = args->size;
 287
 288        dobj = armada_gem_alloc_object(dev, size);
 289        if (dobj == NULL)
 290                return -ENOMEM;
 291
 292        ret = drm_gem_handle_create(file, &dobj->obj, &handle);
 293        if (ret)
 294                goto err;
 295
 296        args->handle = handle;
 297
 298        /* drop reference from allocate - handle holds it now */
 299        DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
 300 err:
 301        drm_gem_object_put(&dobj->obj);
 302        return ret;
 303}
 304
 305/* Map a shmem-backed object into process memory space */
 306int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
 307        struct drm_file *file)
 308{
 309        struct drm_armada_gem_mmap *args = data;
 310        struct armada_gem_object *dobj;
 311        unsigned long addr;
 312
 313        dobj = armada_gem_object_lookup(file, args->handle);
 314        if (dobj == NULL)
 315                return -ENOENT;
 316
 317        if (!dobj->obj.filp) {
 318                drm_gem_object_put(&dobj->obj);
 319                return -EINVAL;
 320        }
 321
 322        addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
 323                       MAP_SHARED, args->offset);
 324        drm_gem_object_put(&dobj->obj);
 325        if (IS_ERR_VALUE(addr))
 326                return addr;
 327
 328        args->addr = addr;
 329
 330        return 0;
 331}
 332
 333int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 334        struct drm_file *file)
 335{
 336        struct drm_armada_gem_pwrite *args = data;
 337        struct armada_gem_object *dobj;
 338        char __user *ptr;
 339        int ret;
 340
 341        DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
 342                args->handle, args->offset, args->size, args->ptr);
 343
 344        if (args->size == 0)
 345                return 0;
 346
 347        ptr = (char __user *)(uintptr_t)args->ptr;
 348
 349        if (!access_ok(ptr, args->size))
 350                return -EFAULT;
 351
 352        ret = fault_in_pages_readable(ptr, args->size);
 353        if (ret)
 354                return ret;
 355
 356        dobj = armada_gem_object_lookup(file, args->handle);
 357        if (dobj == NULL)
 358                return -ENOENT;
 359
 360        /* Must be a kernel-mapped object */
 361        if (!dobj->addr)
 362                return -EINVAL;
 363
 364        if (args->offset > dobj->obj.size ||
 365            args->size > dobj->obj.size - args->offset) {
 366                DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
 367                ret = -EINVAL;
 368                goto unref;
 369        }
 370
 371        if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
 372                ret = -EFAULT;
 373        } else if (dobj->update) {
 374                dobj->update(dobj->update_data);
 375                ret = 0;
 376        }
 377
 378 unref:
 379        drm_gem_object_put(&dobj->obj);
 380        return ret;
 381}
 382
 383/* Prime support */
 384static struct sg_table *
 385armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 386        enum dma_data_direction dir)
 387{
 388        struct drm_gem_object *obj = attach->dmabuf->priv;
 389        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 390        struct scatterlist *sg;
 391        struct sg_table *sgt;
 392        int i;
 393
 394        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 395        if (!sgt)
 396                return NULL;
 397
 398        if (dobj->obj.filp) {
 399                struct address_space *mapping;
 400                int count;
 401
 402                count = dobj->obj.size / PAGE_SIZE;
 403                if (sg_alloc_table(sgt, count, GFP_KERNEL))
 404                        goto free_sgt;
 405
 406                mapping = dobj->obj.filp->f_mapping;
 407
 408                for_each_sgtable_sg(sgt, sg, i) {
 409                        struct page *page;
 410
 411                        page = shmem_read_mapping_page(mapping, i);
 412                        if (IS_ERR(page))
 413                                goto release;
 414
 415                        sg_set_page(sg, page, PAGE_SIZE, 0);
 416                }
 417
 418                if (dma_map_sgtable(attach->dev, sgt, dir, 0))
 419                        goto release;
 420        } else if (dobj->page) {
 421                /* Single contiguous page */
 422                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 423                        goto free_sgt;
 424
 425                sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
 426
 427                if (dma_map_sgtable(attach->dev, sgt, dir, 0))
 428                        goto free_table;
 429        } else if (dobj->linear) {
 430                /* Single contiguous physical region - no struct page */
 431                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 432                        goto free_sgt;
 433                sg_dma_address(sgt->sgl) = dobj->dev_addr;
 434                sg_dma_len(sgt->sgl) = dobj->obj.size;
 435        } else {
 436                goto free_sgt;
 437        }
 438        return sgt;
 439
 440 release:
 441        for_each_sgtable_sg(sgt, sg, i)
 442                if (sg_page(sg))
 443                        put_page(sg_page(sg));
 444 free_table:
 445        sg_free_table(sgt);
 446 free_sgt:
 447        kfree(sgt);
 448        return NULL;
 449}
 450
 451static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 452        struct sg_table *sgt, enum dma_data_direction dir)
 453{
 454        struct drm_gem_object *obj = attach->dmabuf->priv;
 455        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 456        int i;
 457
 458        if (!dobj->linear)
 459                dma_unmap_sgtable(attach->dev, sgt, dir, 0);
 460
 461        if (dobj->obj.filp) {
 462                struct scatterlist *sg;
 463
 464                for_each_sgtable_sg(sgt, sg, i)
 465                        put_page(sg_page(sg));
 466        }
 467
 468        sg_free_table(sgt);
 469        kfree(sgt);
 470}
 471
 472static int
 473armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 474{
 475        return -EINVAL;
 476}
 477
 478static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
 479        .map_dma_buf    = armada_gem_prime_map_dma_buf,
 480        .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
 481        .release        = drm_gem_dmabuf_release,
 482        .mmap           = armada_gem_dmabuf_mmap,
 483};
 484
 485struct dma_buf *
 486armada_gem_prime_export(struct drm_gem_object *obj, int flags)
 487{
 488        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 489
 490        exp_info.ops = &armada_gem_prime_dmabuf_ops;
 491        exp_info.size = obj->size;
 492        exp_info.flags = O_RDWR;
 493        exp_info.priv = obj;
 494
 495        return drm_gem_dmabuf_export(obj->dev, &exp_info);
 496}
 497
 498struct drm_gem_object *
 499armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
 500{
 501        struct dma_buf_attachment *attach;
 502        struct armada_gem_object *dobj;
 503
 504        if (buf->ops == &armada_gem_prime_dmabuf_ops) {
 505                struct drm_gem_object *obj = buf->priv;
 506                if (obj->dev == dev) {
 507                        /*
 508                         * Importing our own dmabuf(s) increases the
 509                         * refcount on the gem object itself.
 510                         */
 511                        drm_gem_object_get(obj);
 512                        return obj;
 513                }
 514        }
 515
 516        attach = dma_buf_attach(buf, dev->dev);
 517        if (IS_ERR(attach))
 518                return ERR_CAST(attach);
 519
 520        dobj = armada_gem_alloc_private_object(dev, buf->size);
 521        if (!dobj) {
 522                dma_buf_detach(buf, attach);
 523                return ERR_PTR(-ENOMEM);
 524        }
 525
 526        dobj->obj.import_attach = attach;
 527        get_dma_buf(buf);
 528
 529        /*
 530         * Don't call dma_buf_map_attachment() here - it maps the
 531         * scatterlist immediately for DMA, and this is not always
 532         * an appropriate thing to do.
 533         */
 534        return &dobj->obj;
 535}
 536
 537int armada_gem_map_import(struct armada_gem_object *dobj)
 538{
 539        int ret;
 540
 541        dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
 542                                           DMA_TO_DEVICE);
 543        if (IS_ERR(dobj->sgt)) {
 544                ret = PTR_ERR(dobj->sgt);
 545                dobj->sgt = NULL;
 546                DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
 547                return ret;
 548        }
 549        if (dobj->sgt->nents > 1) {
 550                DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
 551                return -EINVAL;
 552        }
 553        if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
 554                DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
 555                return -EINVAL;
 556        }
 557        dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
 558        dobj->mapped = true;
 559        return 0;
 560}
 561