linux/drivers/gpu/drm/radeon/radeon_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <drm/drmP.h>
  29#include <drm/radeon_drm.h>
  30#include "radeon.h"
  31
  32int radeon_gem_object_init(struct drm_gem_object *obj)
  33{
  34        BUG();
  35
  36        return 0;
  37}
  38
  39void radeon_gem_object_free(struct drm_gem_object *gobj)
  40{
  41        struct radeon_bo *robj = gem_to_radeon_bo(gobj);
  42
  43        if (robj) {
  44                if (robj->gem_base.import_attach)
  45                        drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
  46                radeon_bo_unref(&robj);
  47        }
  48}
  49
  50int radeon_gem_object_create(struct radeon_device *rdev, int size,
  51                                int alignment, int initial_domain,
  52                                bool discardable, bool kernel,
  53                                struct drm_gem_object **obj)
  54{
  55        struct radeon_bo *robj;
  56        unsigned long max_size;
  57        int r;
  58
  59        *obj = NULL;
  60        /* At least align on page size */
  61        if (alignment < PAGE_SIZE) {
  62                alignment = PAGE_SIZE;
  63        }
  64
  65        /* maximun bo size is the minimun btw visible vram and gtt size */
  66        max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
  67        if (size > max_size) {
  68                printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
  69                       __func__, __LINE__, size >> 20, max_size >> 20);
  70                return -ENOMEM;
  71        }
  72
  73retry:
  74        r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
  75        if (r) {
  76                if (r != -ERESTARTSYS) {
  77                        if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
  78                                initial_domain |= RADEON_GEM_DOMAIN_GTT;
  79                                goto retry;
  80                        }
  81                        DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
  82                                  size, initial_domain, alignment, r);
  83                }
  84                return r;
  85        }
  86        *obj = &robj->gem_base;
  87        robj->pid = task_pid_nr(current);
  88
  89        mutex_lock(&rdev->gem.mutex);
  90        list_add_tail(&robj->list, &rdev->gem.objects);
  91        mutex_unlock(&rdev->gem.mutex);
  92
  93        return 0;
  94}
  95
  96int radeon_gem_set_domain(struct drm_gem_object *gobj,
  97                          uint32_t rdomain, uint32_t wdomain)
  98{
  99        struct radeon_bo *robj;
 100        uint32_t domain;
 101        int r;
 102
 103        /* FIXME: reeimplement */
 104        robj = gem_to_radeon_bo(gobj);
 105        /* work out where to validate the buffer to */
 106        domain = wdomain;
 107        if (!domain) {
 108                domain = rdomain;
 109        }
 110        if (!domain) {
 111                /* Do nothings */
 112                printk(KERN_WARNING "Set domain without domain !\n");
 113                return 0;
 114        }
 115        if (domain == RADEON_GEM_DOMAIN_CPU) {
 116                /* Asking for cpu access wait for object idle */
 117                r = radeon_bo_wait(robj, NULL, false);
 118                if (r) {
 119                        printk(KERN_ERR "Failed to wait for object !\n");
 120                        return r;
 121                }
 122        }
 123        return 0;
 124}
 125
 126int radeon_gem_init(struct radeon_device *rdev)
 127{
 128        INIT_LIST_HEAD(&rdev->gem.objects);
 129        return 0;
 130}
 131
 132void radeon_gem_fini(struct radeon_device *rdev)
 133{
 134        radeon_bo_force_delete(rdev);
 135}
 136
 137/*
 138 * Call from drm_gem_handle_create which appear in both new and open ioctl
 139 * case.
 140 */
 141int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 142{
 143        struct radeon_bo *rbo = gem_to_radeon_bo(obj);
 144        struct radeon_device *rdev = rbo->rdev;
 145        struct radeon_fpriv *fpriv = file_priv->driver_priv;
 146        struct radeon_vm *vm = &fpriv->vm;
 147        struct radeon_bo_va *bo_va;
 148        int r;
 149
 150        if (rdev->family < CHIP_CAYMAN) {
 151                return 0;
 152        }
 153
 154        r = radeon_bo_reserve(rbo, false);
 155        if (r) {
 156                return r;
 157        }
 158
 159        bo_va = radeon_vm_bo_find(vm, rbo);
 160        if (!bo_va) {
 161                bo_va = radeon_vm_bo_add(rdev, vm, rbo);
 162        } else {
 163                ++bo_va->ref_count;
 164        }
 165        radeon_bo_unreserve(rbo);
 166
 167        return 0;
 168}
 169
 170void radeon_gem_object_close(struct drm_gem_object *obj,
 171                             struct drm_file *file_priv)
 172{
 173        struct radeon_bo *rbo = gem_to_radeon_bo(obj);
 174        struct radeon_device *rdev = rbo->rdev;
 175        struct radeon_fpriv *fpriv = file_priv->driver_priv;
 176        struct radeon_vm *vm = &fpriv->vm;
 177        struct radeon_bo_va *bo_va;
 178        int r;
 179
 180        if (rdev->family < CHIP_CAYMAN) {
 181                return;
 182        }
 183
 184        r = radeon_bo_reserve(rbo, true);
 185        if (r) {
 186                dev_err(rdev->dev, "leaking bo va because "
 187                        "we fail to reserve bo (%d)\n", r);
 188                return;
 189        }
 190        bo_va = radeon_vm_bo_find(vm, rbo);
 191        if (bo_va) {
 192                if (--bo_va->ref_count == 0) {
 193                        radeon_vm_bo_rmv(rdev, bo_va);
 194                }
 195        }
 196        radeon_bo_unreserve(rbo);
 197}
 198
 199static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
 200{
 201        if (r == -EDEADLK) {
 202                r = radeon_gpu_reset(rdev);
 203                if (!r)
 204                        r = -EAGAIN;
 205        }
 206        return r;
 207}
 208
 209/*
 210 * GEM ioctls.
 211 */
 212int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 213                          struct drm_file *filp)
 214{
 215        struct radeon_device *rdev = dev->dev_private;
 216        struct drm_radeon_gem_info *args = data;
 217        struct ttm_mem_type_manager *man;
 218        unsigned i;
 219
 220        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 221
 222        args->vram_size = rdev->mc.real_vram_size;
 223        args->vram_visible = (u64)man->size << PAGE_SHIFT;
 224        if (rdev->stollen_vga_memory)
 225                args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
 226        args->vram_visible -= radeon_fbdev_total_size(rdev);
 227        args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
 228        for(i = 0; i < RADEON_NUM_RINGS; ++i)
 229                args->gart_size -= rdev->ring[i].ring_size;
 230        return 0;
 231}
 232
 233int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
 234                           struct drm_file *filp)
 235{
 236        /* TODO: implement */
 237        DRM_ERROR("unimplemented %s\n", __func__);
 238        return -ENOSYS;
 239}
 240
 241int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 242                            struct drm_file *filp)
 243{
 244        /* TODO: implement */
 245        DRM_ERROR("unimplemented %s\n", __func__);
 246        return -ENOSYS;
 247}
 248
 249int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
 250                            struct drm_file *filp)
 251{
 252        struct radeon_device *rdev = dev->dev_private;
 253        struct drm_radeon_gem_create *args = data;
 254        struct drm_gem_object *gobj;
 255        uint32_t handle;
 256        int r;
 257
 258        down_read(&rdev->exclusive_lock);
 259        /* create a gem object to contain this object in */
 260        args->size = roundup(args->size, PAGE_SIZE);
 261        r = radeon_gem_object_create(rdev, args->size, args->alignment,
 262                                        args->initial_domain, false,
 263                                        false, &gobj);
 264        if (r) {
 265                up_read(&rdev->exclusive_lock);
 266                r = radeon_gem_handle_lockup(rdev, r);
 267                return r;
 268        }
 269        r = drm_gem_handle_create(filp, gobj, &handle);
 270        /* drop reference from allocate - handle holds it now */
 271        drm_gem_object_unreference_unlocked(gobj);
 272        if (r) {
 273                up_read(&rdev->exclusive_lock);
 274                r = radeon_gem_handle_lockup(rdev, r);
 275                return r;
 276        }
 277        args->handle = handle;
 278        up_read(&rdev->exclusive_lock);
 279        return 0;
 280}
 281
 282int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 283                                struct drm_file *filp)
 284{
 285        /* transition the BO to a domain -
 286         * just validate the BO into a certain domain */
 287        struct radeon_device *rdev = dev->dev_private;
 288        struct drm_radeon_gem_set_domain *args = data;
 289        struct drm_gem_object *gobj;
 290        struct radeon_bo *robj;
 291        int r;
 292
 293        /* for now if someone requests domain CPU -
 294         * just make sure the buffer is finished with */
 295        down_read(&rdev->exclusive_lock);
 296
 297        /* just do a BO wait for now */
 298        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 299        if (gobj == NULL) {
 300                up_read(&rdev->exclusive_lock);
 301                return -ENOENT;
 302        }
 303        robj = gem_to_radeon_bo(gobj);
 304
 305        r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 306
 307        drm_gem_object_unreference_unlocked(gobj);
 308        up_read(&rdev->exclusive_lock);
 309        r = radeon_gem_handle_lockup(robj->rdev, r);
 310        return r;
 311}
 312
 313int radeon_mode_dumb_mmap(struct drm_file *filp,
 314                          struct drm_device *dev,
 315                          uint32_t handle, uint64_t *offset_p)
 316{
 317        struct drm_gem_object *gobj;
 318        struct radeon_bo *robj;
 319
 320        gobj = drm_gem_object_lookup(dev, filp, handle);
 321        if (gobj == NULL) {
 322                return -ENOENT;
 323        }
 324        robj = gem_to_radeon_bo(gobj);
 325        *offset_p = radeon_bo_mmap_offset(robj);
 326        drm_gem_object_unreference_unlocked(gobj);
 327        return 0;
 328}
 329
 330int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 331                          struct drm_file *filp)
 332{
 333        struct drm_radeon_gem_mmap *args = data;
 334
 335        return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
 336}
 337
 338int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 339                          struct drm_file *filp)
 340{
 341        struct radeon_device *rdev = dev->dev_private;
 342        struct drm_radeon_gem_busy *args = data;
 343        struct drm_gem_object *gobj;
 344        struct radeon_bo *robj;
 345        int r;
 346        uint32_t cur_placement = 0;
 347
 348        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 349        if (gobj == NULL) {
 350                return -ENOENT;
 351        }
 352        robj = gem_to_radeon_bo(gobj);
 353        r = radeon_bo_wait(robj, &cur_placement, true);
 354        switch (cur_placement) {
 355        case TTM_PL_VRAM:
 356                args->domain = RADEON_GEM_DOMAIN_VRAM;
 357                break;
 358        case TTM_PL_TT:
 359                args->domain = RADEON_GEM_DOMAIN_GTT;
 360                break;
 361        case TTM_PL_SYSTEM:
 362                args->domain = RADEON_GEM_DOMAIN_CPU;
 363        default:
 364                break;
 365        }
 366        drm_gem_object_unreference_unlocked(gobj);
 367        r = radeon_gem_handle_lockup(rdev, r);
 368        return r;
 369}
 370
 371int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 372                              struct drm_file *filp)
 373{
 374        struct radeon_device *rdev = dev->dev_private;
 375        struct drm_radeon_gem_wait_idle *args = data;
 376        struct drm_gem_object *gobj;
 377        struct radeon_bo *robj;
 378        int r;
 379
 380        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 381        if (gobj == NULL) {
 382                return -ENOENT;
 383        }
 384        robj = gem_to_radeon_bo(gobj);
 385        r = radeon_bo_wait(robj, NULL, false);
 386        /* callback hw specific functions if any */
 387        if (rdev->asic->ioctl_wait_idle)
 388                robj->rdev->asic->ioctl_wait_idle(rdev, robj);
 389        drm_gem_object_unreference_unlocked(gobj);
 390        r = radeon_gem_handle_lockup(rdev, r);
 391        return r;
 392}
 393
 394int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 395                                struct drm_file *filp)
 396{
 397        struct drm_radeon_gem_set_tiling *args = data;
 398        struct drm_gem_object *gobj;
 399        struct radeon_bo *robj;
 400        int r = 0;
 401
 402        DRM_DEBUG("%d \n", args->handle);
 403        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 404        if (gobj == NULL)
 405                return -ENOENT;
 406        robj = gem_to_radeon_bo(gobj);
 407        r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
 408        drm_gem_object_unreference_unlocked(gobj);
 409        return r;
 410}
 411
 412int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
 413                                struct drm_file *filp)
 414{
 415        struct drm_radeon_gem_get_tiling *args = data;
 416        struct drm_gem_object *gobj;
 417        struct radeon_bo *rbo;
 418        int r = 0;
 419
 420        DRM_DEBUG("\n");
 421        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 422        if (gobj == NULL)
 423                return -ENOENT;
 424        rbo = gem_to_radeon_bo(gobj);
 425        r = radeon_bo_reserve(rbo, false);
 426        if (unlikely(r != 0))
 427                goto out;
 428        radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
 429        radeon_bo_unreserve(rbo);
 430out:
 431        drm_gem_object_unreference_unlocked(gobj);
 432        return r;
 433}
 434
 435int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 436                          struct drm_file *filp)
 437{
 438        struct drm_radeon_gem_va *args = data;
 439        struct drm_gem_object *gobj;
 440        struct radeon_device *rdev = dev->dev_private;
 441        struct radeon_fpriv *fpriv = filp->driver_priv;
 442        struct radeon_bo *rbo;
 443        struct radeon_bo_va *bo_va;
 444        u32 invalid_flags;
 445        int r = 0;
 446
 447        if (!rdev->vm_manager.enabled) {
 448                args->operation = RADEON_VA_RESULT_ERROR;
 449                return -ENOTTY;
 450        }
 451
 452        /* !! DONT REMOVE !!
 453         * We don't support vm_id yet, to be sure we don't have have broken
 454         * userspace, reject anyone trying to use non 0 value thus moving
 455         * forward we can use those fields without breaking existant userspace
 456         */
 457        if (args->vm_id) {
 458                args->operation = RADEON_VA_RESULT_ERROR;
 459                return -EINVAL;
 460        }
 461
 462        if (args->offset < RADEON_VA_RESERVED_SIZE) {
 463                dev_err(&dev->pdev->dev,
 464                        "offset 0x%lX is in reserved area 0x%X\n",
 465                        (unsigned long)args->offset,
 466                        RADEON_VA_RESERVED_SIZE);
 467                args->operation = RADEON_VA_RESULT_ERROR;
 468                return -EINVAL;
 469        }
 470
 471        /* don't remove, we need to enforce userspace to set the snooped flag
 472         * otherwise we will endup with broken userspace and we won't be able
 473         * to enable this feature without adding new interface
 474         */
 475        invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
 476        if ((args->flags & invalid_flags)) {
 477                dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
 478                        args->flags, invalid_flags);
 479                args->operation = RADEON_VA_RESULT_ERROR;
 480                return -EINVAL;
 481        }
 482        if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
 483                dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
 484                args->operation = RADEON_VA_RESULT_ERROR;
 485                return -EINVAL;
 486        }
 487
 488        switch (args->operation) {
 489        case RADEON_VA_MAP:
 490        case RADEON_VA_UNMAP:
 491                break;
 492        default:
 493                dev_err(&dev->pdev->dev, "unsupported operation %d\n",
 494                        args->operation);
 495                args->operation = RADEON_VA_RESULT_ERROR;
 496                return -EINVAL;
 497        }
 498
 499        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 500        if (gobj == NULL) {
 501                args->operation = RADEON_VA_RESULT_ERROR;
 502                return -ENOENT;
 503        }
 504        rbo = gem_to_radeon_bo(gobj);
 505        r = radeon_bo_reserve(rbo, false);
 506        if (r) {
 507                args->operation = RADEON_VA_RESULT_ERROR;
 508                drm_gem_object_unreference_unlocked(gobj);
 509                return r;
 510        }
 511        bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
 512        if (!bo_va) {
 513                args->operation = RADEON_VA_RESULT_ERROR;
 514                drm_gem_object_unreference_unlocked(gobj);
 515                return -ENOENT;
 516        }
 517
 518        switch (args->operation) {
 519        case RADEON_VA_MAP:
 520                if (bo_va->soffset) {
 521                        args->operation = RADEON_VA_RESULT_VA_EXIST;
 522                        args->offset = bo_va->soffset;
 523                        goto out;
 524                }
 525                r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
 526                break;
 527        case RADEON_VA_UNMAP:
 528                r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
 529                break;
 530        default:
 531                break;
 532        }
 533        args->operation = RADEON_VA_RESULT_OK;
 534        if (r) {
 535                args->operation = RADEON_VA_RESULT_ERROR;
 536        }
 537out:
 538        radeon_bo_unreserve(rbo);
 539        drm_gem_object_unreference_unlocked(gobj);
 540        return r;
 541}
 542
 543int radeon_mode_dumb_create(struct drm_file *file_priv,
 544                            struct drm_device *dev,
 545                            struct drm_mode_create_dumb *args)
 546{
 547        struct radeon_device *rdev = dev->dev_private;
 548        struct drm_gem_object *gobj;
 549        uint32_t handle;
 550        int r;
 551
 552        args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
 553        args->size = args->pitch * args->height;
 554        args->size = ALIGN(args->size, PAGE_SIZE);
 555
 556        r = radeon_gem_object_create(rdev, args->size, 0,
 557                                     RADEON_GEM_DOMAIN_VRAM,
 558                                     false, ttm_bo_type_device,
 559                                     &gobj);
 560        if (r)
 561                return -ENOMEM;
 562
 563        r = drm_gem_handle_create(file_priv, gobj, &handle);
 564        /* drop reference from allocate - handle holds it now */
 565        drm_gem_object_unreference_unlocked(gobj);
 566        if (r) {
 567                return r;
 568        }
 569        args->handle = handle;
 570        return 0;
 571}
 572
 573int radeon_mode_dumb_destroy(struct drm_file *file_priv,
 574                             struct drm_device *dev,
 575                             uint32_t handle)
 576{
 577        return drm_gem_handle_delete(file_priv, handle);
 578}
 579
 580#if defined(CONFIG_DEBUG_FS)
 581static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
 582{
 583        struct drm_info_node *node = (struct drm_info_node *)m->private;
 584        struct drm_device *dev = node->minor->dev;
 585        struct radeon_device *rdev = dev->dev_private;
 586        struct radeon_bo *rbo;
 587        unsigned i = 0;
 588
 589        mutex_lock(&rdev->gem.mutex);
 590        list_for_each_entry(rbo, &rdev->gem.objects, list) {
 591                unsigned domain;
 592                const char *placement;
 593
 594                domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
 595                switch (domain) {
 596                case RADEON_GEM_DOMAIN_VRAM:
 597                        placement = "VRAM";
 598                        break;
 599                case RADEON_GEM_DOMAIN_GTT:
 600                        placement = " GTT";
 601                        break;
 602                case RADEON_GEM_DOMAIN_CPU:
 603                default:
 604                        placement = " CPU";
 605                        break;
 606                }
 607                seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
 608                           i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
 609                           placement, (unsigned long)rbo->pid);
 610                i++;
 611        }
 612        mutex_unlock(&rdev->gem.mutex);
 613        return 0;
 614}
 615
 616static struct drm_info_list radeon_debugfs_gem_list[] = {
 617        {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
 618};
 619#endif
 620
 621int radeon_gem_debugfs_init(struct radeon_device *rdev)
 622{
 623#if defined(CONFIG_DEBUG_FS)
 624        return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
 625#endif
 626        return 0;
 627}
 628