linux/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
   4 * Author:Mark Yao <mark.yao@rock-chips.com>
   5 */
   6
   7#include <linux/dma-buf.h>
   8#include <linux/iommu.h>
   9#include <linux/vmalloc.h>
  10
  11#include <drm/drm.h>
  12#include <drm/drm_gem.h>
  13#include <drm/drm_gem_cma_helper.h>
  14#include <drm/drm_prime.h>
  15#include <drm/drm_vma_manager.h>
  16
  17#include "rockchip_drm_drv.h"
  18#include "rockchip_drm_gem.h"
  19
  20static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
  21{
  22        struct drm_device *drm = rk_obj->base.dev;
  23        struct rockchip_drm_private *private = drm->dev_private;
  24        int prot = IOMMU_READ | IOMMU_WRITE;
  25        ssize_t ret;
  26
  27        mutex_lock(&private->mm_lock);
  28        ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
  29                                         rk_obj->base.size, PAGE_SIZE,
  30                                         0, 0);
  31        mutex_unlock(&private->mm_lock);
  32
  33        if (ret < 0) {
  34                DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
  35                return ret;
  36        }
  37
  38        rk_obj->dma_addr = rk_obj->mm.start;
  39
  40        ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
  41                                prot);
  42        if (ret < rk_obj->base.size) {
  43                DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
  44                          ret, rk_obj->base.size);
  45                ret = -ENOMEM;
  46                goto err_remove_node;
  47        }
  48
  49        rk_obj->size = ret;
  50
  51        return 0;
  52
  53err_remove_node:
  54        mutex_lock(&private->mm_lock);
  55        drm_mm_remove_node(&rk_obj->mm);
  56        mutex_unlock(&private->mm_lock);
  57
  58        return ret;
  59}
  60
  61static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
  62{
  63        struct drm_device *drm = rk_obj->base.dev;
  64        struct rockchip_drm_private *private = drm->dev_private;
  65
  66        iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
  67
  68        mutex_lock(&private->mm_lock);
  69
  70        drm_mm_remove_node(&rk_obj->mm);
  71
  72        mutex_unlock(&private->mm_lock);
  73
  74        return 0;
  75}
  76
  77static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
  78{
  79        struct drm_device *drm = rk_obj->base.dev;
  80        int ret, i;
  81        struct scatterlist *s;
  82
  83        rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
  84        if (IS_ERR(rk_obj->pages))
  85                return PTR_ERR(rk_obj->pages);
  86
  87        rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
  88
  89        rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
  90                                            rk_obj->pages, rk_obj->num_pages);
  91        if (IS_ERR(rk_obj->sgt)) {
  92                ret = PTR_ERR(rk_obj->sgt);
  93                goto err_put_pages;
  94        }
  95
  96        /*
  97         * Fake up the SG table so that dma_sync_sg_for_device() can be used
  98         * to flush the pages associated with it.
  99         *
 100         * TODO: Replace this by drm_clflush_sg() once it can be implemented
 101         * without relying on symbols that are not exported.
 102         */
 103        for_each_sgtable_sg(rk_obj->sgt, s, i)
 104                sg_dma_address(s) = sg_phys(s);
 105
 106        dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
 107
 108        return 0;
 109
 110err_put_pages:
 111        drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
 112        return ret;
 113}
 114
 115static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
 116{
 117        sg_free_table(rk_obj->sgt);
 118        kfree(rk_obj->sgt);
 119        drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
 120}
 121
 122static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
 123                                    bool alloc_kmap)
 124{
 125        int ret;
 126
 127        ret = rockchip_gem_get_pages(rk_obj);
 128        if (ret < 0)
 129                return ret;
 130
 131        ret = rockchip_gem_iommu_map(rk_obj);
 132        if (ret < 0)
 133                goto err_free;
 134
 135        if (alloc_kmap) {
 136                rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
 137                                      pgprot_writecombine(PAGE_KERNEL));
 138                if (!rk_obj->kvaddr) {
 139                        DRM_ERROR("failed to vmap() buffer\n");
 140                        ret = -ENOMEM;
 141                        goto err_unmap;
 142                }
 143        }
 144
 145        return 0;
 146
 147err_unmap:
 148        rockchip_gem_iommu_unmap(rk_obj);
 149err_free:
 150        rockchip_gem_put_pages(rk_obj);
 151
 152        return ret;
 153}
 154
 155static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
 156                                  bool alloc_kmap)
 157{
 158        struct drm_gem_object *obj = &rk_obj->base;
 159        struct drm_device *drm = obj->dev;
 160
 161        rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
 162
 163        if (!alloc_kmap)
 164                rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 165
 166        rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
 167                                         &rk_obj->dma_addr, GFP_KERNEL,
 168                                         rk_obj->dma_attrs);
 169        if (!rk_obj->kvaddr) {
 170                DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
 171                return -ENOMEM;
 172        }
 173
 174        return 0;
 175}
 176
 177static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
 178                                  bool alloc_kmap)
 179{
 180        struct drm_gem_object *obj = &rk_obj->base;
 181        struct drm_device *drm = obj->dev;
 182        struct rockchip_drm_private *private = drm->dev_private;
 183
 184        if (private->domain)
 185                return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
 186        else
 187                return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
 188}
 189
 190static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
 191{
 192        vunmap(rk_obj->kvaddr);
 193        rockchip_gem_iommu_unmap(rk_obj);
 194        rockchip_gem_put_pages(rk_obj);
 195}
 196
 197static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
 198{
 199        struct drm_gem_object *obj = &rk_obj->base;
 200        struct drm_device *drm = obj->dev;
 201
 202        dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
 203                       rk_obj->dma_attrs);
 204}
 205
 206static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
 207{
 208        if (rk_obj->pages)
 209                rockchip_gem_free_iommu(rk_obj);
 210        else
 211                rockchip_gem_free_dma(rk_obj);
 212}
 213
 214static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
 215                                              struct vm_area_struct *vma)
 216{
 217        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 218        unsigned int count = obj->size >> PAGE_SHIFT;
 219        unsigned long user_count = vma_pages(vma);
 220
 221        if (user_count == 0)
 222                return -ENXIO;
 223
 224        return vm_map_pages(vma, rk_obj->pages, count);
 225}
 226
 227static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
 228                                            struct vm_area_struct *vma)
 229{
 230        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 231        struct drm_device *drm = obj->dev;
 232
 233        return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
 234                              obj->size, rk_obj->dma_attrs);
 235}
 236
 237static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
 238                                        struct vm_area_struct *vma)
 239{
 240        int ret;
 241        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 242
 243        /*
 244         * We allocated a struct page table for rk_obj, so clear
 245         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 246         */
 247        vma->vm_flags &= ~VM_PFNMAP;
 248
 249        if (rk_obj->pages)
 250                ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
 251        else
 252                ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
 253
 254        if (ret)
 255                drm_gem_vm_close(vma);
 256
 257        return ret;
 258}
 259
 260int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
 261                          struct vm_area_struct *vma)
 262{
 263        int ret;
 264
 265        ret = drm_gem_mmap_obj(obj, obj->size, vma);
 266        if (ret)
 267                return ret;
 268
 269        return rockchip_drm_gem_object_mmap(obj, vma);
 270}
 271
 272/* drm driver mmap file operations */
 273int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 274{
 275        struct drm_gem_object *obj;
 276        int ret;
 277
 278        ret = drm_gem_mmap(filp, vma);
 279        if (ret)
 280                return ret;
 281
 282        /*
 283         * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
 284         * whole buffer from the start.
 285         */
 286        vma->vm_pgoff = 0;
 287
 288        obj = vma->vm_private_data;
 289
 290        return rockchip_drm_gem_object_mmap(obj, vma);
 291}
 292
 293static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
 294{
 295        drm_gem_object_release(&rk_obj->base);
 296        kfree(rk_obj);
 297}
 298
 299static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
 300        .free = rockchip_gem_free_object,
 301        .get_sg_table = rockchip_gem_prime_get_sg_table,
 302        .vmap = rockchip_gem_prime_vmap,
 303        .vunmap = rockchip_gem_prime_vunmap,
 304        .vm_ops = &drm_gem_cma_vm_ops,
 305};
 306
 307static struct rockchip_gem_object *
 308        rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
 309{
 310        struct rockchip_gem_object *rk_obj;
 311        struct drm_gem_object *obj;
 312
 313        size = round_up(size, PAGE_SIZE);
 314
 315        rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
 316        if (!rk_obj)
 317                return ERR_PTR(-ENOMEM);
 318
 319        obj = &rk_obj->base;
 320
 321        obj->funcs = &rockchip_gem_object_funcs;
 322
 323        drm_gem_object_init(drm, obj, size);
 324
 325        return rk_obj;
 326}
 327
 328struct rockchip_gem_object *
 329rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
 330                           bool alloc_kmap)
 331{
 332        struct rockchip_gem_object *rk_obj;
 333        int ret;
 334
 335        rk_obj = rockchip_gem_alloc_object(drm, size);
 336        if (IS_ERR(rk_obj))
 337                return rk_obj;
 338
 339        ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
 340        if (ret)
 341                goto err_free_rk_obj;
 342
 343        return rk_obj;
 344
 345err_free_rk_obj:
 346        rockchip_gem_release_object(rk_obj);
 347        return ERR_PTR(ret);
 348}
 349
 350/*
 351 * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
 352 * callback function
 353 */
 354void rockchip_gem_free_object(struct drm_gem_object *obj)
 355{
 356        struct drm_device *drm = obj->dev;
 357        struct rockchip_drm_private *private = drm->dev_private;
 358        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 359
 360        if (obj->import_attach) {
 361                if (private->domain) {
 362                        rockchip_gem_iommu_unmap(rk_obj);
 363                } else {
 364                        dma_unmap_sgtable(drm->dev, rk_obj->sgt,
 365                                          DMA_BIDIRECTIONAL, 0);
 366                }
 367                drm_prime_gem_destroy(obj, rk_obj->sgt);
 368        } else {
 369                rockchip_gem_free_buf(rk_obj);
 370        }
 371
 372        rockchip_gem_release_object(rk_obj);
 373}
 374
 375/*
 376 * rockchip_gem_create_with_handle - allocate an object with the given
 377 * size and create a gem handle on it
 378 *
 379 * returns a struct rockchip_gem_object* on success or ERR_PTR values
 380 * on failure.
 381 */
 382static struct rockchip_gem_object *
 383rockchip_gem_create_with_handle(struct drm_file *file_priv,
 384                                struct drm_device *drm, unsigned int size,
 385                                unsigned int *handle)
 386{
 387        struct rockchip_gem_object *rk_obj;
 388        struct drm_gem_object *obj;
 389        int ret;
 390
 391        rk_obj = rockchip_gem_create_object(drm, size, false);
 392        if (IS_ERR(rk_obj))
 393                return ERR_CAST(rk_obj);
 394
 395        obj = &rk_obj->base;
 396
 397        /*
 398         * allocate a id of idr table where the obj is registered
 399         * and handle has the id what user can see.
 400         */
 401        ret = drm_gem_handle_create(file_priv, obj, handle);
 402        if (ret)
 403                goto err_handle_create;
 404
 405        /* drop reference from allocate - handle holds it now. */
 406        drm_gem_object_put(obj);
 407
 408        return rk_obj;
 409
 410err_handle_create:
 411        rockchip_gem_free_object(obj);
 412
 413        return ERR_PTR(ret);
 414}
 415
 416/*
 417 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
 418 * function
 419 *
 420 * This aligns the pitch and size arguments to the minimum required. wrap
 421 * this into your own function if you need bigger alignment.
 422 */
 423int rockchip_gem_dumb_create(struct drm_file *file_priv,
 424                             struct drm_device *dev,
 425                             struct drm_mode_create_dumb *args)
 426{
 427        struct rockchip_gem_object *rk_obj;
 428        int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 429
 430        /*
 431         * align to 64 bytes since Mali requires it.
 432         */
 433        args->pitch = ALIGN(min_pitch, 64);
 434        args->size = args->pitch * args->height;
 435
 436        rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
 437                                                 &args->handle);
 438
 439        return PTR_ERR_OR_ZERO(rk_obj);
 440}
 441
 442/*
 443 * Allocate a sg_table for this GEM object.
 444 * Note: Both the table's contents, and the sg_table itself must be freed by
 445 *       the caller.
 446 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
 447 */
 448struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
 449{
 450        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 451        struct drm_device *drm = obj->dev;
 452        struct sg_table *sgt;
 453        int ret;
 454
 455        if (rk_obj->pages)
 456                return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
 457
 458        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 459        if (!sgt)
 460                return ERR_PTR(-ENOMEM);
 461
 462        ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
 463                                    rk_obj->dma_addr, obj->size,
 464                                    rk_obj->dma_attrs);
 465        if (ret) {
 466                DRM_ERROR("failed to allocate sgt, %d\n", ret);
 467                kfree(sgt);
 468                return ERR_PTR(ret);
 469        }
 470
 471        return sgt;
 472}
 473
 474static int
 475rockchip_gem_iommu_map_sg(struct drm_device *drm,
 476                          struct dma_buf_attachment *attach,
 477                          struct sg_table *sg,
 478                          struct rockchip_gem_object *rk_obj)
 479{
 480        rk_obj->sgt = sg;
 481        return rockchip_gem_iommu_map(rk_obj);
 482}
 483
 484static int
 485rockchip_gem_dma_map_sg(struct drm_device *drm,
 486                        struct dma_buf_attachment *attach,
 487                        struct sg_table *sg,
 488                        struct rockchip_gem_object *rk_obj)
 489{
 490        int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
 491        if (err)
 492                return err;
 493
 494        if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
 495                DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
 496                dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
 497                return -EINVAL;
 498        }
 499
 500        rk_obj->dma_addr = sg_dma_address(sg->sgl);
 501        rk_obj->sgt = sg;
 502        return 0;
 503}
 504
 505struct drm_gem_object *
 506rockchip_gem_prime_import_sg_table(struct drm_device *drm,
 507                                   struct dma_buf_attachment *attach,
 508                                   struct sg_table *sg)
 509{
 510        struct rockchip_drm_private *private = drm->dev_private;
 511        struct rockchip_gem_object *rk_obj;
 512        int ret;
 513
 514        rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
 515        if (IS_ERR(rk_obj))
 516                return ERR_CAST(rk_obj);
 517
 518        if (private->domain)
 519                ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
 520        else
 521                ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
 522
 523        if (ret < 0) {
 524                DRM_ERROR("failed to import sg table: %d\n", ret);
 525                goto err_free_rk_obj;
 526        }
 527
 528        return &rk_obj->base;
 529
 530err_free_rk_obj:
 531        rockchip_gem_release_object(rk_obj);
 532        return ERR_PTR(ret);
 533}
 534
 535int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 536{
 537        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 538
 539        if (rk_obj->pages) {
 540                void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
 541                                  pgprot_writecombine(PAGE_KERNEL));
 542                if (!vaddr)
 543                        return -ENOMEM;
 544                dma_buf_map_set_vaddr(map, vaddr);
 545                return 0;
 546        }
 547
 548        if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
 549                return -ENOMEM;
 550        dma_buf_map_set_vaddr(map, rk_obj->kvaddr);
 551
 552        return 0;
 553}
 554
 555void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
 556{
 557        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 558
 559        if (rk_obj->pages) {
 560                vunmap(map->vaddr);
 561                return;
 562        }
 563
 564        /* Nothing to do if allocated by DMA mapping API. */
 565}
 566