linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <linux/pagemap.h>
  30#include <drm/drmP.h>
  31#include <drm/amdgpu_drm.h>
  32#include "amdgpu.h"
  33
  34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  35{
  36        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  37
  38        if (robj) {
  39                if (robj->gem_base.import_attach)
  40                        drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
  41                amdgpu_mn_unregister(robj);
  42                amdgpu_bo_unref(&robj);
  43        }
  44}
  45
  46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  47                                int alignment, u32 initial_domain,
  48                                u64 flags, bool kernel,
  49                                struct drm_gem_object **obj)
  50{
  51        struct amdgpu_bo *robj;
  52        unsigned long max_size;
  53        int r;
  54
  55        *obj = NULL;
  56        /* At least align on page size */
  57        if (alignment < PAGE_SIZE) {
  58                alignment = PAGE_SIZE;
  59        }
  60
  61        if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
  62                /* Maximum bo size is the unpinned gtt size since we use the gtt to
  63                 * handle vram to system pool migrations.
  64                 */
  65                max_size = adev->mc.gtt_size - adev->gart_pin_size;
  66                if (size > max_size) {
  67                        DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
  68                                  size >> 20, max_size >> 20);
  69                        return -ENOMEM;
  70                }
  71        }
  72retry:
  73        r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
  74                             flags, NULL, NULL, &robj);
  75        if (r) {
  76                if (r != -ERESTARTSYS) {
  77                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
  78                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
  79                                goto retry;
  80                        }
  81                        DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  82                                  size, initial_domain, alignment, r);
  83                }
  84                return r;
  85        }
  86        *obj = &robj->gem_base;
  87
  88        return 0;
  89}
  90
  91void amdgpu_gem_force_release(struct amdgpu_device *adev)
  92{
  93        struct drm_device *ddev = adev->ddev;
  94        struct drm_file *file;
  95
  96        mutex_lock(&ddev->filelist_mutex);
  97
  98        list_for_each_entry(file, &ddev->filelist, lhead) {
  99                struct drm_gem_object *gobj;
 100                int handle;
 101
 102                WARN_ONCE(1, "Still active user space clients!\n");
 103                spin_lock(&file->table_lock);
 104                idr_for_each_entry(&file->object_idr, gobj, handle) {
 105                        WARN_ONCE(1, "And also active allocations!\n");
 106                        drm_gem_object_unreference_unlocked(gobj);
 107                }
 108                idr_destroy(&file->object_idr);
 109                spin_unlock(&file->table_lock);
 110        }
 111
 112        mutex_unlock(&ddev->filelist_mutex);
 113}
 114
 115/*
 116 * Call from drm_gem_handle_create which appear in both new and open ioctl
 117 * case.
 118 */
 119int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 120{
 121        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
 122        struct amdgpu_device *adev = rbo->adev;
 123        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 124        struct amdgpu_vm *vm = &fpriv->vm;
 125        struct amdgpu_bo_va *bo_va;
 126        int r;
 127        r = amdgpu_bo_reserve(rbo, false);
 128        if (r)
 129                return r;
 130
 131        bo_va = amdgpu_vm_bo_find(vm, rbo);
 132        if (!bo_va) {
 133                bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
 134        } else {
 135                ++bo_va->ref_count;
 136        }
 137        amdgpu_bo_unreserve(rbo);
 138        return 0;
 139}
 140
 141void amdgpu_gem_object_close(struct drm_gem_object *obj,
 142                             struct drm_file *file_priv)
 143{
 144        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 145        struct amdgpu_device *adev = bo->adev;
 146        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 147        struct amdgpu_vm *vm = &fpriv->vm;
 148
 149        struct amdgpu_bo_list_entry vm_pd;
 150        struct list_head list, duplicates;
 151        struct ttm_validate_buffer tv;
 152        struct ww_acquire_ctx ticket;
 153        struct amdgpu_bo_va *bo_va;
 154        int r;
 155
 156        INIT_LIST_HEAD(&list);
 157        INIT_LIST_HEAD(&duplicates);
 158
 159        tv.bo = &bo->tbo;
 160        tv.shared = true;
 161        list_add(&tv.head, &list);
 162
 163        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 164
 165        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
 166        if (r) {
 167                dev_err(adev->dev, "leaking bo va because "
 168                        "we fail to reserve bo (%d)\n", r);
 169                return;
 170        }
 171        bo_va = amdgpu_vm_bo_find(vm, bo);
 172        if (bo_va) {
 173                if (--bo_va->ref_count == 0) {
 174                        amdgpu_vm_bo_rmv(adev, bo_va);
 175                }
 176        }
 177        ttm_eu_backoff_reservation(&ticket, &list);
 178}
 179
 180static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
 181{
 182        if (r == -EDEADLK) {
 183                r = amdgpu_gpu_reset(adev);
 184                if (!r)
 185                        r = -EAGAIN;
 186        }
 187        return r;
 188}
 189
 190/*
 191 * GEM ioctls.
 192 */
 193int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 194                            struct drm_file *filp)
 195{
 196        struct amdgpu_device *adev = dev->dev_private;
 197        union drm_amdgpu_gem_create *args = data;
 198        uint64_t size = args->in.bo_size;
 199        struct drm_gem_object *gobj;
 200        uint32_t handle;
 201        bool kernel = false;
 202        int r;
 203
 204        /* create a gem object to contain this object in */
 205        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 206            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 207                kernel = true;
 208                if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
 209                        size = size << AMDGPU_GDS_SHIFT;
 210                else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
 211                        size = size << AMDGPU_GWS_SHIFT;
 212                else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
 213                        size = size << AMDGPU_OA_SHIFT;
 214                else {
 215                        r = -EINVAL;
 216                        goto error_unlock;
 217                }
 218        }
 219        size = roundup(size, PAGE_SIZE);
 220
 221        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 222                                     (u32)(0xffffffff & args->in.domains),
 223                                     args->in.domain_flags,
 224                                     kernel, &gobj);
 225        if (r)
 226                goto error_unlock;
 227
 228        r = drm_gem_handle_create(filp, gobj, &handle);
 229        /* drop reference from allocate - handle holds it now */
 230        drm_gem_object_unreference_unlocked(gobj);
 231        if (r)
 232                goto error_unlock;
 233
 234        memset(args, 0, sizeof(*args));
 235        args->out.handle = handle;
 236        return 0;
 237
 238error_unlock:
 239        r = amdgpu_gem_handle_lockup(adev, r);
 240        return r;
 241}
 242
 243int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 244                             struct drm_file *filp)
 245{
 246        struct amdgpu_device *adev = dev->dev_private;
 247        struct drm_amdgpu_gem_userptr *args = data;
 248        struct drm_gem_object *gobj;
 249        struct amdgpu_bo *bo;
 250        uint32_t handle;
 251        int r;
 252
 253        if (offset_in_page(args->addr | args->size))
 254                return -EINVAL;
 255
 256        /* reject unknown flag values */
 257        if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 258            AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 259            AMDGPU_GEM_USERPTR_REGISTER))
 260                return -EINVAL;
 261
 262        if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
 263             !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 264
 265                /* if we want to write to it we must install a MMU notifier */
 266                return -EACCES;
 267        }
 268
 269        /* create a gem object to contain this object in */
 270        r = amdgpu_gem_object_create(adev, args->size, 0,
 271                                     AMDGPU_GEM_DOMAIN_CPU, 0,
 272                                     0, &gobj);
 273        if (r)
 274                goto handle_lockup;
 275
 276        bo = gem_to_amdgpu_bo(gobj);
 277        bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
 278        bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 279        r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
 280        if (r)
 281                goto release_object;
 282
 283        if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
 284                r = amdgpu_mn_register(bo, args->addr);
 285                if (r)
 286                        goto release_object;
 287        }
 288
 289        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 290                down_read(&current->mm->mmap_sem);
 291
 292                r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
 293                                                 bo->tbo.ttm->pages);
 294                if (r)
 295                        goto unlock_mmap_sem;
 296
 297                r = amdgpu_bo_reserve(bo, true);
 298                if (r)
 299                        goto free_pages;
 300
 301                amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 302                r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 303                amdgpu_bo_unreserve(bo);
 304                if (r)
 305                        goto free_pages;
 306
 307                up_read(&current->mm->mmap_sem);
 308        }
 309
 310        r = drm_gem_handle_create(filp, gobj, &handle);
 311        /* drop reference from allocate - handle holds it now */
 312        drm_gem_object_unreference_unlocked(gobj);
 313        if (r)
 314                goto handle_lockup;
 315
 316        args->handle = handle;
 317        return 0;
 318
 319free_pages:
 320        release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
 321
 322unlock_mmap_sem:
 323        up_read(&current->mm->mmap_sem);
 324
 325release_object:
 326        drm_gem_object_unreference_unlocked(gobj);
 327
 328handle_lockup:
 329        r = amdgpu_gem_handle_lockup(adev, r);
 330
 331        return r;
 332}
 333
 334int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 335                          struct drm_device *dev,
 336                          uint32_t handle, uint64_t *offset_p)
 337{
 338        struct drm_gem_object *gobj;
 339        struct amdgpu_bo *robj;
 340
 341        gobj = drm_gem_object_lookup(filp, handle);
 342        if (gobj == NULL) {
 343                return -ENOENT;
 344        }
 345        robj = gem_to_amdgpu_bo(gobj);
 346        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 347            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 348                drm_gem_object_unreference_unlocked(gobj);
 349                return -EPERM;
 350        }
 351        *offset_p = amdgpu_bo_mmap_offset(robj);
 352        drm_gem_object_unreference_unlocked(gobj);
 353        return 0;
 354}
 355
 356int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 357                          struct drm_file *filp)
 358{
 359        union drm_amdgpu_gem_mmap *args = data;
 360        uint32_t handle = args->in.handle;
 361        memset(args, 0, sizeof(*args));
 362        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 363}
 364
 365/**
 366 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 367 *
 368 * @timeout_ns: timeout in ns
 369 *
 370 * Calculate the timeout in jiffies from an absolute timeout in ns.
 371 */
 372unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 373{
 374        unsigned long timeout_jiffies;
 375        ktime_t timeout;
 376
 377        /* clamp timeout if it's to large */
 378        if (((int64_t)timeout_ns) < 0)
 379                return MAX_SCHEDULE_TIMEOUT;
 380
 381        timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 382        if (ktime_to_ns(timeout) < 0)
 383                return 0;
 384
 385        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 386        /*  clamp timeout to avoid unsigned-> signed overflow */
 387        if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
 388                return MAX_SCHEDULE_TIMEOUT - 1;
 389
 390        return timeout_jiffies;
 391}
 392
 393int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 394                              struct drm_file *filp)
 395{
 396        struct amdgpu_device *adev = dev->dev_private;
 397        union drm_amdgpu_gem_wait_idle *args = data;
 398        struct drm_gem_object *gobj;
 399        struct amdgpu_bo *robj;
 400        uint32_t handle = args->in.handle;
 401        unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 402        int r = 0;
 403        long ret;
 404
 405        gobj = drm_gem_object_lookup(filp, handle);
 406        if (gobj == NULL) {
 407                return -ENOENT;
 408        }
 409        robj = gem_to_amdgpu_bo(gobj);
 410        if (timeout == 0)
 411                ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
 412        else
 413                ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
 414
 415        /* ret == 0 means not signaled,
 416         * ret > 0 means signaled
 417         * ret < 0 means interrupted before timeout
 418         */
 419        if (ret >= 0) {
 420                memset(args, 0, sizeof(*args));
 421                args->out.status = (ret == 0);
 422        } else
 423                r = ret;
 424
 425        drm_gem_object_unreference_unlocked(gobj);
 426        r = amdgpu_gem_handle_lockup(adev, r);
 427        return r;
 428}
 429
 430int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 431                                struct drm_file *filp)
 432{
 433        struct drm_amdgpu_gem_metadata *args = data;
 434        struct drm_gem_object *gobj;
 435        struct amdgpu_bo *robj;
 436        int r = -1;
 437
 438        DRM_DEBUG("%d \n", args->handle);
 439        gobj = drm_gem_object_lookup(filp, args->handle);
 440        if (gobj == NULL)
 441                return -ENOENT;
 442        robj = gem_to_amdgpu_bo(gobj);
 443
 444        r = amdgpu_bo_reserve(robj, false);
 445        if (unlikely(r != 0))
 446                goto out;
 447
 448        if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 449                amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 450                r = amdgpu_bo_get_metadata(robj, args->data.data,
 451                                           sizeof(args->data.data),
 452                                           &args->data.data_size_bytes,
 453                                           &args->data.flags);
 454        } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 455                if (args->data.data_size_bytes > sizeof(args->data.data)) {
 456                        r = -EINVAL;
 457                        goto unreserve;
 458                }
 459                r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 460                if (!r)
 461                        r = amdgpu_bo_set_metadata(robj, args->data.data,
 462                                                   args->data.data_size_bytes,
 463                                                   args->data.flags);
 464        }
 465
 466unreserve:
 467        amdgpu_bo_unreserve(robj);
 468out:
 469        drm_gem_object_unreference_unlocked(gobj);
 470        return r;
 471}
 472
 473/**
 474 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 475 *
 476 * @adev: amdgpu_device pointer
 477 * @bo_va: bo_va to update
 478 *
 479 * Update the bo_va directly after setting it's address. Errors are not
 480 * vital here, so they are not reported back to userspace.
 481 */
 482static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 483                                    struct amdgpu_bo_va *bo_va, uint32_t operation)
 484{
 485        struct ttm_validate_buffer tv, *entry;
 486        struct amdgpu_bo_list_entry vm_pd;
 487        struct ww_acquire_ctx ticket;
 488        struct list_head list, duplicates;
 489        unsigned domain;
 490        int r;
 491
 492        INIT_LIST_HEAD(&list);
 493        INIT_LIST_HEAD(&duplicates);
 494
 495        tv.bo = &bo_va->bo->tbo;
 496        tv.shared = true;
 497        list_add(&tv.head, &list);
 498
 499        amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
 500
 501        /* Provide duplicates to avoid -EALREADY */
 502        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 503        if (r)
 504                goto error_print;
 505
 506        amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
 507        list_for_each_entry(entry, &list, head) {
 508                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 509                /* if anything is swapped out don't swap it in here,
 510                   just abort and wait for the next CS */
 511                if (domain == AMDGPU_GEM_DOMAIN_CPU)
 512                        goto error_unreserve;
 513        }
 514        list_for_each_entry(entry, &duplicates, head) {
 515                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 516                /* if anything is swapped out don't swap it in here,
 517                   just abort and wait for the next CS */
 518                if (domain == AMDGPU_GEM_DOMAIN_CPU)
 519                        goto error_unreserve;
 520        }
 521
 522        r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 523        if (r)
 524                goto error_unreserve;
 525
 526        r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 527        if (r)
 528                goto error_unreserve;
 529
 530        if (operation == AMDGPU_VA_OP_MAP)
 531                r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
 532
 533error_unreserve:
 534        ttm_eu_backoff_reservation(&ticket, &list);
 535
 536error_print:
 537        if (r && r != -ERESTARTSYS)
 538                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 539}
 540
 541
 542
 543int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 544                          struct drm_file *filp)
 545{
 546        struct drm_amdgpu_gem_va *args = data;
 547        struct drm_gem_object *gobj;
 548        struct amdgpu_device *adev = dev->dev_private;
 549        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 550        struct amdgpu_bo *rbo;
 551        struct amdgpu_bo_va *bo_va;
 552        struct ttm_validate_buffer tv, tv_pd;
 553        struct ww_acquire_ctx ticket;
 554        struct list_head list, duplicates;
 555        uint32_t invalid_flags, va_flags = 0;
 556        int r = 0;
 557
 558        if (!adev->vm_manager.enabled)
 559                return -ENOTTY;
 560
 561        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
 562                dev_err(&dev->pdev->dev,
 563                        "va_address 0x%lX is in reserved area 0x%X\n",
 564                        (unsigned long)args->va_address,
 565                        AMDGPU_VA_RESERVED_SIZE);
 566                return -EINVAL;
 567        }
 568
 569        invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
 570                        AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
 571        if ((args->flags & invalid_flags)) {
 572                dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
 573                        args->flags, invalid_flags);
 574                return -EINVAL;
 575        }
 576
 577        switch (args->operation) {
 578        case AMDGPU_VA_OP_MAP:
 579        case AMDGPU_VA_OP_UNMAP:
 580                break;
 581        default:
 582                dev_err(&dev->pdev->dev, "unsupported operation %d\n",
 583                        args->operation);
 584                return -EINVAL;
 585        }
 586
 587        gobj = drm_gem_object_lookup(filp, args->handle);
 588        if (gobj == NULL)
 589                return -ENOENT;
 590        rbo = gem_to_amdgpu_bo(gobj);
 591        INIT_LIST_HEAD(&list);
 592        INIT_LIST_HEAD(&duplicates);
 593        tv.bo = &rbo->tbo;
 594        tv.shared = true;
 595        list_add(&tv.head, &list);
 596
 597        tv_pd.bo = &fpriv->vm.page_directory->tbo;
 598        tv_pd.shared = true;
 599        list_add(&tv_pd.head, &list);
 600
 601        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 602        if (r) {
 603                drm_gem_object_unreference_unlocked(gobj);
 604                return r;
 605        }
 606
 607        bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
 608        if (!bo_va) {
 609                ttm_eu_backoff_reservation(&ticket, &list);
 610                drm_gem_object_unreference_unlocked(gobj);
 611                return -ENOENT;
 612        }
 613
 614        switch (args->operation) {
 615        case AMDGPU_VA_OP_MAP:
 616                if (args->flags & AMDGPU_VM_PAGE_READABLE)
 617                        va_flags |= AMDGPU_PTE_READABLE;
 618                if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
 619                        va_flags |= AMDGPU_PTE_WRITEABLE;
 620                if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
 621                        va_flags |= AMDGPU_PTE_EXECUTABLE;
 622                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 623                                     args->offset_in_bo, args->map_size,
 624                                     va_flags);
 625                break;
 626        case AMDGPU_VA_OP_UNMAP:
 627                r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 628                break;
 629        default:
 630                break;
 631        }
 632        ttm_eu_backoff_reservation(&ticket, &list);
 633        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
 634            !amdgpu_vm_debug)
 635                amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 636
 637        drm_gem_object_unreference_unlocked(gobj);
 638        return r;
 639}
 640
 641int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 642                        struct drm_file *filp)
 643{
 644        struct drm_amdgpu_gem_op *args = data;
 645        struct drm_gem_object *gobj;
 646        struct amdgpu_bo *robj;
 647        int r;
 648
 649        gobj = drm_gem_object_lookup(filp, args->handle);
 650        if (gobj == NULL) {
 651                return -ENOENT;
 652        }
 653        robj = gem_to_amdgpu_bo(gobj);
 654
 655        r = amdgpu_bo_reserve(robj, false);
 656        if (unlikely(r))
 657                goto out;
 658
 659        switch (args->op) {
 660        case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 661                struct drm_amdgpu_gem_create_in info;
 662                void __user *out = (void __user *)(long)args->value;
 663
 664                info.bo_size = robj->gem_base.size;
 665                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 666                info.domains = robj->prefered_domains;
 667                info.domain_flags = robj->flags;
 668                amdgpu_bo_unreserve(robj);
 669                if (copy_to_user(out, &info, sizeof(info)))
 670                        r = -EFAULT;
 671                break;
 672        }
 673        case AMDGPU_GEM_OP_SET_PLACEMENT:
 674                if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 675                        r = -EPERM;
 676                        amdgpu_bo_unreserve(robj);
 677                        break;
 678                }
 679                robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 680                                                        AMDGPU_GEM_DOMAIN_GTT |
 681                                                        AMDGPU_GEM_DOMAIN_CPU);
 682                robj->allowed_domains = robj->prefered_domains;
 683                if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 684                        robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 685
 686                amdgpu_bo_unreserve(robj);
 687                break;
 688        default:
 689                amdgpu_bo_unreserve(robj);
 690                r = -EINVAL;
 691        }
 692
 693out:
 694        drm_gem_object_unreference_unlocked(gobj);
 695        return r;
 696}
 697
 698int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 699                            struct drm_device *dev,
 700                            struct drm_mode_create_dumb *args)
 701{
 702        struct amdgpu_device *adev = dev->dev_private;
 703        struct drm_gem_object *gobj;
 704        uint32_t handle;
 705        int r;
 706
 707        args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
 708        args->size = (u64)args->pitch * args->height;
 709        args->size = ALIGN(args->size, PAGE_SIZE);
 710
 711        r = amdgpu_gem_object_create(adev, args->size, 0,
 712                                     AMDGPU_GEM_DOMAIN_VRAM,
 713                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 714                                     ttm_bo_type_device,
 715                                     &gobj);
 716        if (r)
 717                return -ENOMEM;
 718
 719        r = drm_gem_handle_create(file_priv, gobj, &handle);
 720        /* drop reference from allocate - handle holds it now */
 721        drm_gem_object_unreference_unlocked(gobj);
 722        if (r) {
 723                return r;
 724        }
 725        args->handle = handle;
 726        return 0;
 727}
 728
 729#if defined(CONFIG_DEBUG_FS)
 730static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 731{
 732        struct drm_gem_object *gobj = ptr;
 733        struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 734        struct seq_file *m = data;
 735
 736        unsigned domain;
 737        const char *placement;
 738        unsigned pin_count;
 739
 740        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 741        switch (domain) {
 742        case AMDGPU_GEM_DOMAIN_VRAM:
 743                placement = "VRAM";
 744                break;
 745        case AMDGPU_GEM_DOMAIN_GTT:
 746                placement = " GTT";
 747                break;
 748        case AMDGPU_GEM_DOMAIN_CPU:
 749        default:
 750                placement = " CPU";
 751                break;
 752        }
 753        seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
 754                   id, amdgpu_bo_size(bo), placement,
 755                   amdgpu_bo_gpu_offset(bo));
 756
 757        pin_count = ACCESS_ONCE(bo->pin_count);
 758        if (pin_count)
 759                seq_printf(m, " pin count %d", pin_count);
 760        seq_printf(m, "\n");
 761
 762        return 0;
 763}
 764
 765static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 766{
 767        struct drm_info_node *node = (struct drm_info_node *)m->private;
 768        struct drm_device *dev = node->minor->dev;
 769        struct drm_file *file;
 770        int r;
 771
 772        r = mutex_lock_interruptible(&dev->filelist_mutex);
 773        if (r)
 774                return r;
 775
 776        list_for_each_entry(file, &dev->filelist, lhead) {
 777                struct task_struct *task;
 778
 779                /*
 780                 * Although we have a valid reference on file->pid, that does
 781                 * not guarantee that the task_struct who called get_pid() is
 782                 * still alive (e.g. get_pid(current) => fork() => exit()).
 783                 * Therefore, we need to protect this ->comm access using RCU.
 784                 */
 785                rcu_read_lock();
 786                task = pid_task(file->pid, PIDTYPE_PID);
 787                seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
 788                           task ? task->comm : "<unknown>");
 789                rcu_read_unlock();
 790
 791                spin_lock(&file->table_lock);
 792                idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 793                spin_unlock(&file->table_lock);
 794        }
 795
 796        mutex_unlock(&dev->filelist_mutex);
 797        return 0;
 798}
 799
 800static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
 801        {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
 802};
 803#endif
 804
 805int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
 806{
 807#if defined(CONFIG_DEBUG_FS)
 808        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 809#endif
 810        return 0;
 811}
 812