linux/drivers/gpu/drm/armada/armada_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Russell King
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/dma-buf.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/shmem_fs.h>
  11#include <drm/drmP.h>
  12#include "armada_drm.h"
  13#include "armada_gem.h"
  14#include <drm/armada_drm.h>
  15#include "armada_ioctlP.h"
  16
  17static int armada_gem_vm_fault(struct vm_fault *vmf)
  18{
  19        struct drm_gem_object *gobj = vmf->vma->vm_private_data;
  20        struct armada_gem_object *obj = drm_to_armada_gem(gobj);
  21        unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  22        int ret;
  23
  24        pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
  25        ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
  26
  27        switch (ret) {
  28        case 0:
  29        case -EBUSY:
  30                return VM_FAULT_NOPAGE;
  31        case -ENOMEM:
  32                return VM_FAULT_OOM;
  33        default:
  34                return VM_FAULT_SIGBUS;
  35        }
  36}
  37
  38const struct vm_operations_struct armada_gem_vm_ops = {
  39        .fault  = armada_gem_vm_fault,
  40        .open   = drm_gem_vm_open,
  41        .close  = drm_gem_vm_close,
  42};
  43
  44static size_t roundup_gem_size(size_t size)
  45{
  46        return roundup(size, PAGE_SIZE);
  47}
  48
  49void armada_gem_free_object(struct drm_gem_object *obj)
  50{
  51        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  52        struct armada_private *priv = obj->dev->dev_private;
  53
  54        DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  55
  56        drm_gem_free_mmap_offset(&dobj->obj);
  57
  58        might_lock(&priv->linear_lock);
  59
  60        if (dobj->page) {
  61                /* page backed memory */
  62                unsigned int order = get_order(dobj->obj.size);
  63                __free_pages(dobj->page, order);
  64        } else if (dobj->linear) {
  65                /* linear backed memory */
  66                mutex_lock(&priv->linear_lock);
  67                drm_mm_remove_node(dobj->linear);
  68                mutex_unlock(&priv->linear_lock);
  69                kfree(dobj->linear);
  70                if (dobj->addr)
  71                        iounmap(dobj->addr);
  72        }
  73
  74        if (dobj->obj.import_attach) {
  75                /* We only ever display imported data */
  76                if (dobj->sgt)
  77                        dma_buf_unmap_attachment(dobj->obj.import_attach,
  78                                                 dobj->sgt, DMA_TO_DEVICE);
  79                drm_prime_gem_destroy(&dobj->obj, NULL);
  80        }
  81
  82        drm_gem_object_release(&dobj->obj);
  83
  84        kfree(dobj);
  85}
  86
  87int
  88armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  89{
  90        struct armada_private *priv = dev->dev_private;
  91        size_t size = obj->obj.size;
  92
  93        if (obj->page || obj->linear)
  94                return 0;
  95
  96        /*
  97         * If it is a small allocation (typically cursor, which will
  98         * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  99         * Framebuffers will never be this small (our minimum size for
 100         * framebuffers is larger than this anyway.)  Such objects are
 101         * only accessed by the CPU so we don't need any special handing
 102         * here.
 103         */
 104        if (size <= 8192) {
 105                unsigned int order = get_order(size);
 106                struct page *p = alloc_pages(GFP_KERNEL, order);
 107
 108                if (p) {
 109                        obj->addr = page_address(p);
 110                        obj->phys_addr = page_to_phys(p);
 111                        obj->page = p;
 112
 113                        memset(obj->addr, 0, PAGE_ALIGN(size));
 114                }
 115        }
 116
 117        /*
 118         * We could grab something from CMA if it's enabled, but that
 119         * involves building in a problem:
 120         *
 121         * CMA's interface uses dma_alloc_coherent(), which provides us
 122         * with an CPU virtual address and a device address.
 123         *
 124         * The CPU virtual address may be either an address in the kernel
 125         * direct mapped region (for example, as it would be on x86) or
 126         * it may be remapped into another part of kernel memory space
 127         * (eg, as it would be on ARM.)  This means virt_to_phys() on the
 128         * returned virtual address is invalid depending on the architecture
 129         * implementation.
 130         *
 131         * The device address may also not be a physical address; it may
 132         * be that there is some kind of remapping between the device and
 133         * system RAM, which makes the use of the device address also
 134         * unsafe to re-use as a physical address.
 135         *
 136         * This makes DRM usage of dma_alloc_coherent() in a generic way
 137         * at best very questionable and unsafe.
 138         */
 139
 140        /* Otherwise, grab it from our linear allocation */
 141        if (!obj->page) {
 142                struct drm_mm_node *node;
 143                unsigned align = min_t(unsigned, size, SZ_2M);
 144                void __iomem *ptr;
 145                int ret;
 146
 147                node = kzalloc(sizeof(*node), GFP_KERNEL);
 148                if (!node)
 149                        return -ENOSPC;
 150
 151                mutex_lock(&priv->linear_lock);
 152                ret = drm_mm_insert_node_generic(&priv->linear, node,
 153                                                 size, align, 0, 0);
 154                mutex_unlock(&priv->linear_lock);
 155                if (ret) {
 156                        kfree(node);
 157                        return ret;
 158                }
 159
 160                obj->linear = node;
 161
 162                /* Ensure that the memory we're returning is cleared. */
 163                ptr = ioremap_wc(obj->linear->start, size);
 164                if (!ptr) {
 165                        mutex_lock(&priv->linear_lock);
 166                        drm_mm_remove_node(obj->linear);
 167                        mutex_unlock(&priv->linear_lock);
 168                        kfree(obj->linear);
 169                        obj->linear = NULL;
 170                        return -ENOMEM;
 171                }
 172
 173                memset_io(ptr, 0, size);
 174                iounmap(ptr);
 175
 176                obj->phys_addr = obj->linear->start;
 177                obj->dev_addr = obj->linear->start;
 178                obj->mapped = true;
 179        }
 180
 181        DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
 182                         (unsigned long long)obj->phys_addr,
 183                         (unsigned long long)obj->dev_addr);
 184
 185        return 0;
 186}
 187
 188void *
 189armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
 190{
 191        /* only linear objects need to be ioremap'd */
 192        if (!dobj->addr && dobj->linear)
 193                dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
 194        return dobj->addr;
 195}
 196
 197struct armada_gem_object *
 198armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
 199{
 200        struct armada_gem_object *obj;
 201
 202        size = roundup_gem_size(size);
 203
 204        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 205        if (!obj)
 206                return NULL;
 207
 208        drm_gem_private_object_init(dev, &obj->obj, size);
 209
 210        DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
 211
 212        return obj;
 213}
 214
 215static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
 216        size_t size)
 217{
 218        struct armada_gem_object *obj;
 219        struct address_space *mapping;
 220
 221        size = roundup_gem_size(size);
 222
 223        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 224        if (!obj)
 225                return NULL;
 226
 227        if (drm_gem_object_init(dev, &obj->obj, size)) {
 228                kfree(obj);
 229                return NULL;
 230        }
 231
 232        mapping = obj->obj.filp->f_mapping;
 233        mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
 234
 235        DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
 236
 237        return obj;
 238}
 239
 240/* Dumb alloc support */
 241int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 242        struct drm_mode_create_dumb *args)
 243{
 244        struct armada_gem_object *dobj;
 245        u32 handle;
 246        size_t size;
 247        int ret;
 248
 249        args->pitch = armada_pitch(args->width, args->bpp);
 250        args->size = size = args->pitch * args->height;
 251
 252        dobj = armada_gem_alloc_private_object(dev, size);
 253        if (dobj == NULL)
 254                return -ENOMEM;
 255
 256        ret = armada_gem_linear_back(dev, dobj);
 257        if (ret)
 258                goto err;
 259
 260        ret = drm_gem_handle_create(file, &dobj->obj, &handle);
 261        if (ret)
 262                goto err;
 263
 264        args->handle = handle;
 265
 266        /* drop reference from allocate - handle holds it now */
 267        DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
 268 err:
 269        drm_gem_object_unreference_unlocked(&dobj->obj);
 270        return ret;
 271}
 272
 273int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 274        uint32_t handle, uint64_t *offset)
 275{
 276        struct armada_gem_object *obj;
 277        int ret = 0;
 278
 279        obj = armada_gem_object_lookup(file, handle);
 280        if (!obj) {
 281                DRM_ERROR("failed to lookup gem object\n");
 282                return -EINVAL;
 283        }
 284
 285        /* Don't allow imported objects to be mapped */
 286        if (obj->obj.import_attach) {
 287                ret = -EINVAL;
 288                goto err_unref;
 289        }
 290
 291        ret = drm_gem_create_mmap_offset(&obj->obj);
 292        if (ret == 0) {
 293                *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
 294                DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
 295        }
 296
 297 err_unref:
 298        drm_gem_object_unreference_unlocked(&obj->obj);
 299
 300        return ret;
 301}
 302
 303int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
 304        uint32_t handle)
 305{
 306        return drm_gem_handle_delete(file, handle);
 307}
 308
 309/* Private driver gem ioctls */
 310int armada_gem_create_ioctl(struct drm_device *dev, void *data,
 311        struct drm_file *file)
 312{
 313        struct drm_armada_gem_create *args = data;
 314        struct armada_gem_object *dobj;
 315        size_t size;
 316        u32 handle;
 317        int ret;
 318
 319        if (args->size == 0)
 320                return -ENOMEM;
 321
 322        size = args->size;
 323
 324        dobj = armada_gem_alloc_object(dev, size);
 325        if (dobj == NULL)
 326                return -ENOMEM;
 327
 328        ret = drm_gem_handle_create(file, &dobj->obj, &handle);
 329        if (ret)
 330                goto err;
 331
 332        args->handle = handle;
 333
 334        /* drop reference from allocate - handle holds it now */
 335        DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
 336 err:
 337        drm_gem_object_unreference_unlocked(&dobj->obj);
 338        return ret;
 339}
 340
 341/* Map a shmem-backed object into process memory space */
 342int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
 343        struct drm_file *file)
 344{
 345        struct drm_armada_gem_mmap *args = data;
 346        struct armada_gem_object *dobj;
 347        unsigned long addr;
 348
 349        dobj = armada_gem_object_lookup(file, args->handle);
 350        if (dobj == NULL)
 351                return -ENOENT;
 352
 353        if (!dobj->obj.filp) {
 354                drm_gem_object_unreference_unlocked(&dobj->obj);
 355                return -EINVAL;
 356        }
 357
 358        addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
 359                       MAP_SHARED, args->offset);
 360        drm_gem_object_unreference_unlocked(&dobj->obj);
 361        if (IS_ERR_VALUE(addr))
 362                return addr;
 363
 364        args->addr = addr;
 365
 366        return 0;
 367}
 368
 369int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 370        struct drm_file *file)
 371{
 372        struct drm_armada_gem_pwrite *args = data;
 373        struct armada_gem_object *dobj;
 374        char __user *ptr;
 375        int ret;
 376
 377        DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
 378                args->handle, args->offset, args->size, args->ptr);
 379
 380        if (args->size == 0)
 381                return 0;
 382
 383        ptr = (char __user *)(uintptr_t)args->ptr;
 384
 385        if (!access_ok(VERIFY_READ, ptr, args->size))
 386                return -EFAULT;
 387
 388        ret = fault_in_pages_readable(ptr, args->size);
 389        if (ret)
 390                return ret;
 391
 392        dobj = armada_gem_object_lookup(file, args->handle);
 393        if (dobj == NULL)
 394                return -ENOENT;
 395
 396        /* Must be a kernel-mapped object */
 397        if (!dobj->addr)
 398                return -EINVAL;
 399
 400        if (args->offset > dobj->obj.size ||
 401            args->size > dobj->obj.size - args->offset) {
 402                DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
 403                ret = -EINVAL;
 404                goto unref;
 405        }
 406
 407        if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
 408                ret = -EFAULT;
 409        } else if (dobj->update) {
 410                dobj->update(dobj->update_data);
 411                ret = 0;
 412        }
 413
 414 unref:
 415        drm_gem_object_unreference_unlocked(&dobj->obj);
 416        return ret;
 417}
 418
 419/* Prime support */
 420static struct sg_table *
 421armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 422        enum dma_data_direction dir)
 423{
 424        struct drm_gem_object *obj = attach->dmabuf->priv;
 425        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 426        struct scatterlist *sg;
 427        struct sg_table *sgt;
 428        int i, num;
 429
 430        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 431        if (!sgt)
 432                return NULL;
 433
 434        if (dobj->obj.filp) {
 435                struct address_space *mapping;
 436                int count;
 437
 438                count = dobj->obj.size / PAGE_SIZE;
 439                if (sg_alloc_table(sgt, count, GFP_KERNEL))
 440                        goto free_sgt;
 441
 442                mapping = dobj->obj.filp->f_mapping;
 443
 444                for_each_sg(sgt->sgl, sg, count, i) {
 445                        struct page *page;
 446
 447                        page = shmem_read_mapping_page(mapping, i);
 448                        if (IS_ERR(page)) {
 449                                num = i;
 450                                goto release;
 451                        }
 452
 453                        sg_set_page(sg, page, PAGE_SIZE, 0);
 454                }
 455
 456                if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
 457                        num = sgt->nents;
 458                        goto release;
 459                }
 460        } else if (dobj->page) {
 461                /* Single contiguous page */
 462                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 463                        goto free_sgt;
 464
 465                sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
 466
 467                if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 468                        goto free_table;
 469        } else if (dobj->linear) {
 470                /* Single contiguous physical region - no struct page */
 471                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 472                        goto free_sgt;
 473                sg_dma_address(sgt->sgl) = dobj->dev_addr;
 474                sg_dma_len(sgt->sgl) = dobj->obj.size;
 475        } else {
 476                goto free_sgt;
 477        }
 478        return sgt;
 479
 480 release:
 481        for_each_sg(sgt->sgl, sg, num, i)
 482                put_page(sg_page(sg));
 483 free_table:
 484        sg_free_table(sgt);
 485 free_sgt:
 486        kfree(sgt);
 487        return NULL;
 488}
 489
 490static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 491        struct sg_table *sgt, enum dma_data_direction dir)
 492{
 493        struct drm_gem_object *obj = attach->dmabuf->priv;
 494        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
 495        int i;
 496
 497        if (!dobj->linear)
 498                dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 499
 500        if (dobj->obj.filp) {
 501                struct scatterlist *sg;
 502                for_each_sg(sgt->sgl, sg, sgt->nents, i)
 503                        put_page(sg_page(sg));
 504        }
 505
 506        sg_free_table(sgt);
 507        kfree(sgt);
 508}
 509
 510static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
 511{
 512        return NULL;
 513}
 514
 515static void
 516armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
 517{
 518}
 519
 520static int
 521armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 522{
 523        return -EINVAL;
 524}
 525
 526static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
 527        .map_dma_buf    = armada_gem_prime_map_dma_buf,
 528        .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
 529        .release        = drm_gem_dmabuf_release,
 530        .map_atomic     = armada_gem_dmabuf_no_kmap,
 531        .unmap_atomic   = armada_gem_dmabuf_no_kunmap,
 532        .map            = armada_gem_dmabuf_no_kmap,
 533        .unmap          = armada_gem_dmabuf_no_kunmap,
 534        .mmap           = armada_gem_dmabuf_mmap,
 535};
 536
 537struct dma_buf *
 538armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
 539        int flags)
 540{
 541        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 542
 543        exp_info.ops = &armada_gem_prime_dmabuf_ops;
 544        exp_info.size = obj->size;
 545        exp_info.flags = O_RDWR;
 546        exp_info.priv = obj;
 547
 548        return drm_gem_dmabuf_export(dev, &exp_info);
 549}
 550
 551struct drm_gem_object *
 552armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
 553{
 554        struct dma_buf_attachment *attach;
 555        struct armada_gem_object *dobj;
 556
 557        if (buf->ops == &armada_gem_prime_dmabuf_ops) {
 558                struct drm_gem_object *obj = buf->priv;
 559                if (obj->dev == dev) {
 560                        /*
 561                         * Importing our own dmabuf(s) increases the
 562                         * refcount on the gem object itself.
 563                         */
 564                        drm_gem_object_reference(obj);
 565                        return obj;
 566                }
 567        }
 568
 569        attach = dma_buf_attach(buf, dev->dev);
 570        if (IS_ERR(attach))
 571                return ERR_CAST(attach);
 572
 573        dobj = armada_gem_alloc_private_object(dev, buf->size);
 574        if (!dobj) {
 575                dma_buf_detach(buf, attach);
 576                return ERR_PTR(-ENOMEM);
 577        }
 578
 579        dobj->obj.import_attach = attach;
 580        get_dma_buf(buf);
 581
 582        /*
 583         * Don't call dma_buf_map_attachment() here - it maps the
 584         * scatterlist immediately for DMA, and this is not always
 585         * an appropriate thing to do.
 586         */
 587        return &dobj->obj;
 588}
 589
 590int armada_gem_map_import(struct armada_gem_object *dobj)
 591{
 592        int ret;
 593
 594        dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
 595                                           DMA_TO_DEVICE);
 596        if (IS_ERR(dobj->sgt)) {
 597                ret = PTR_ERR(dobj->sgt);
 598                dobj->sgt = NULL;
 599                DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
 600                return ret;
 601        }
 602        if (dobj->sgt->nents > 1) {
 603                DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
 604                return -EINVAL;
 605        }
 606        if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
 607                DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
 608                return -EINVAL;
 609        }
 610        dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
 611        dobj->mapped = true;
 612        return 0;
 613}
 614