linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <drm/drmP.h>
  30#include <drm/amdgpu_drm.h>
  31#include "amdgpu.h"
  32
  33void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  34{
  35        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  36
  37        if (robj) {
  38                if (robj->gem_base.import_attach)
  39                        drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
  40                amdgpu_mn_unregister(robj);
  41                amdgpu_bo_unref(&robj);
  42        }
  43}
  44
  45int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  46                                int alignment, u32 initial_domain,
  47                                u64 flags, bool kernel,
  48                                struct drm_gem_object **obj)
  49{
  50        struct amdgpu_bo *robj;
  51        unsigned long max_size;
  52        int r;
  53
  54        *obj = NULL;
  55        /* At least align on page size */
  56        if (alignment < PAGE_SIZE) {
  57                alignment = PAGE_SIZE;
  58        }
  59
  60        if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
  61                /* Maximum bo size is the unpinned gtt size since we use the gtt to
  62                 * handle vram to system pool migrations.
  63                 */
  64                max_size = adev->mc.gtt_size - adev->gart_pin_size;
  65                if (size > max_size) {
  66                        DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
  67                                  size >> 20, max_size >> 20);
  68                        return -ENOMEM;
  69                }
  70        }
  71retry:
  72        r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
  73                             flags, NULL, NULL, &robj);
  74        if (r) {
  75                if (r != -ERESTARTSYS) {
  76                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
  77                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
  78                                goto retry;
  79                        }
  80                        DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  81                                  size, initial_domain, alignment, r);
  82                }
  83                return r;
  84        }
  85        *obj = &robj->gem_base;
  86        robj->pid = task_pid_nr(current);
  87
  88        mutex_lock(&adev->gem.mutex);
  89        list_add_tail(&robj->list, &adev->gem.objects);
  90        mutex_unlock(&adev->gem.mutex);
  91
  92        return 0;
  93}
  94
  95int amdgpu_gem_init(struct amdgpu_device *adev)
  96{
  97        INIT_LIST_HEAD(&adev->gem.objects);
  98        return 0;
  99}
 100
 101void amdgpu_gem_fini(struct amdgpu_device *adev)
 102{
 103        amdgpu_bo_force_delete(adev);
 104}
 105
 106/*
 107 * Call from drm_gem_handle_create which appear in both new and open ioctl
 108 * case.
 109 */
 110int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 111{
 112        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
 113        struct amdgpu_device *adev = rbo->adev;
 114        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 115        struct amdgpu_vm *vm = &fpriv->vm;
 116        struct amdgpu_bo_va *bo_va;
 117        int r;
 118        r = amdgpu_bo_reserve(rbo, false);
 119        if (r)
 120                return r;
 121
 122        bo_va = amdgpu_vm_bo_find(vm, rbo);
 123        if (!bo_va) {
 124                bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
 125        } else {
 126                ++bo_va->ref_count;
 127        }
 128        amdgpu_bo_unreserve(rbo);
 129        return 0;
 130}
 131
 132void amdgpu_gem_object_close(struct drm_gem_object *obj,
 133                             struct drm_file *file_priv)
 134{
 135        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
 136        struct amdgpu_device *adev = rbo->adev;
 137        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 138        struct amdgpu_vm *vm = &fpriv->vm;
 139        struct amdgpu_bo_va *bo_va;
 140        int r;
 141        r = amdgpu_bo_reserve(rbo, true);
 142        if (r) {
 143                dev_err(adev->dev, "leaking bo va because "
 144                        "we fail to reserve bo (%d)\n", r);
 145                return;
 146        }
 147        bo_va = amdgpu_vm_bo_find(vm, rbo);
 148        if (bo_va) {
 149                if (--bo_va->ref_count == 0) {
 150                        amdgpu_vm_bo_rmv(adev, bo_va);
 151                }
 152        }
 153        amdgpu_bo_unreserve(rbo);
 154}
 155
 156static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
 157{
 158        if (r == -EDEADLK) {
 159                r = amdgpu_gpu_reset(adev);
 160                if (!r)
 161                        r = -EAGAIN;
 162        }
 163        return r;
 164}
 165
 166/*
 167 * GEM ioctls.
 168 */
 169int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 170                            struct drm_file *filp)
 171{
 172        struct amdgpu_device *adev = dev->dev_private;
 173        union drm_amdgpu_gem_create *args = data;
 174        uint64_t size = args->in.bo_size;
 175        struct drm_gem_object *gobj;
 176        uint32_t handle;
 177        bool kernel = false;
 178        int r;
 179
 180        /* create a gem object to contain this object in */
 181        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 182            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 183                kernel = true;
 184                if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
 185                        size = size << AMDGPU_GDS_SHIFT;
 186                else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
 187                        size = size << AMDGPU_GWS_SHIFT;
 188                else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
 189                        size = size << AMDGPU_OA_SHIFT;
 190                else {
 191                        r = -EINVAL;
 192                        goto error_unlock;
 193                }
 194        }
 195        size = roundup(size, PAGE_SIZE);
 196
 197        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 198                                     (u32)(0xffffffff & args->in.domains),
 199                                     args->in.domain_flags,
 200                                     kernel, &gobj);
 201        if (r)
 202                goto error_unlock;
 203
 204        r = drm_gem_handle_create(filp, gobj, &handle);
 205        /* drop reference from allocate - handle holds it now */
 206        drm_gem_object_unreference_unlocked(gobj);
 207        if (r)
 208                goto error_unlock;
 209
 210        memset(args, 0, sizeof(*args));
 211        args->out.handle = handle;
 212        return 0;
 213
 214error_unlock:
 215        r = amdgpu_gem_handle_lockup(adev, r);
 216        return r;
 217}
 218
 219int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 220                             struct drm_file *filp)
 221{
 222        struct amdgpu_device *adev = dev->dev_private;
 223        struct drm_amdgpu_gem_userptr *args = data;
 224        struct drm_gem_object *gobj;
 225        struct amdgpu_bo *bo;
 226        uint32_t handle;
 227        int r;
 228
 229        if (offset_in_page(args->addr | args->size))
 230                return -EINVAL;
 231
 232        /* reject unknown flag values */
 233        if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 234            AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 235            AMDGPU_GEM_USERPTR_REGISTER))
 236                return -EINVAL;
 237
 238        if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
 239             !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
 240             !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
 241
 242                /* if we want to write to it we must require anonymous
 243                   memory and install a MMU notifier */
 244                return -EACCES;
 245        }
 246
 247        /* create a gem object to contain this object in */
 248        r = amdgpu_gem_object_create(adev, args->size, 0,
 249                                     AMDGPU_GEM_DOMAIN_CPU, 0,
 250                                     0, &gobj);
 251        if (r)
 252                goto handle_lockup;
 253
 254        bo = gem_to_amdgpu_bo(gobj);
 255        r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
 256        if (r)
 257                goto release_object;
 258
 259        if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
 260                r = amdgpu_mn_register(bo, args->addr);
 261                if (r)
 262                        goto release_object;
 263        }
 264
 265        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 266                down_read(&current->mm->mmap_sem);
 267                r = amdgpu_bo_reserve(bo, true);
 268                if (r) {
 269                        up_read(&current->mm->mmap_sem);
 270                        goto release_object;
 271                }
 272
 273                amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 274                r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 275                amdgpu_bo_unreserve(bo);
 276                up_read(&current->mm->mmap_sem);
 277                if (r)
 278                        goto release_object;
 279        }
 280
 281        r = drm_gem_handle_create(filp, gobj, &handle);
 282        /* drop reference from allocate - handle holds it now */
 283        drm_gem_object_unreference_unlocked(gobj);
 284        if (r)
 285                goto handle_lockup;
 286
 287        args->handle = handle;
 288        return 0;
 289
 290release_object:
 291        drm_gem_object_unreference_unlocked(gobj);
 292
 293handle_lockup:
 294        r = amdgpu_gem_handle_lockup(adev, r);
 295
 296        return r;
 297}
 298
 299int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 300                          struct drm_device *dev,
 301                          uint32_t handle, uint64_t *offset_p)
 302{
 303        struct drm_gem_object *gobj;
 304        struct amdgpu_bo *robj;
 305
 306        gobj = drm_gem_object_lookup(dev, filp, handle);
 307        if (gobj == NULL) {
 308                return -ENOENT;
 309        }
 310        robj = gem_to_amdgpu_bo(gobj);
 311        if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
 312            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 313                drm_gem_object_unreference_unlocked(gobj);
 314                return -EPERM;
 315        }
 316        *offset_p = amdgpu_bo_mmap_offset(robj);
 317        drm_gem_object_unreference_unlocked(gobj);
 318        return 0;
 319}
 320
 321int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 322                          struct drm_file *filp)
 323{
 324        union drm_amdgpu_gem_mmap *args = data;
 325        uint32_t handle = args->in.handle;
 326        memset(args, 0, sizeof(*args));
 327        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 328}
 329
 330/**
 331 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 332 *
 333 * @timeout_ns: timeout in ns
 334 *
 335 * Calculate the timeout in jiffies from an absolute timeout in ns.
 336 */
 337unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 338{
 339        unsigned long timeout_jiffies;
 340        ktime_t timeout;
 341
 342        /* clamp timeout if it's to large */
 343        if (((int64_t)timeout_ns) < 0)
 344                return MAX_SCHEDULE_TIMEOUT;
 345
 346        timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 347        if (ktime_to_ns(timeout) < 0)
 348                return 0;
 349
 350        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 351        /*  clamp timeout to avoid unsigned-> signed overflow */
 352        if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
 353                return MAX_SCHEDULE_TIMEOUT - 1;
 354
 355        return timeout_jiffies;
 356}
 357
 358int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 359                              struct drm_file *filp)
 360{
 361        struct amdgpu_device *adev = dev->dev_private;
 362        union drm_amdgpu_gem_wait_idle *args = data;
 363        struct drm_gem_object *gobj;
 364        struct amdgpu_bo *robj;
 365        uint32_t handle = args->in.handle;
 366        unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 367        int r = 0;
 368        long ret;
 369
 370        gobj = drm_gem_object_lookup(dev, filp, handle);
 371        if (gobj == NULL) {
 372                return -ENOENT;
 373        }
 374        robj = gem_to_amdgpu_bo(gobj);
 375        if (timeout == 0)
 376                ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
 377        else
 378                ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
 379
 380        /* ret == 0 means not signaled,
 381         * ret > 0 means signaled
 382         * ret < 0 means interrupted before timeout
 383         */
 384        if (ret >= 0) {
 385                memset(args, 0, sizeof(*args));
 386                args->out.status = (ret == 0);
 387        } else
 388                r = ret;
 389
 390        drm_gem_object_unreference_unlocked(gobj);
 391        r = amdgpu_gem_handle_lockup(adev, r);
 392        return r;
 393}
 394
 395int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 396                                struct drm_file *filp)
 397{
 398        struct drm_amdgpu_gem_metadata *args = data;
 399        struct drm_gem_object *gobj;
 400        struct amdgpu_bo *robj;
 401        int r = -1;
 402
 403        DRM_DEBUG("%d \n", args->handle);
 404        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 405        if (gobj == NULL)
 406                return -ENOENT;
 407        robj = gem_to_amdgpu_bo(gobj);
 408
 409        r = amdgpu_bo_reserve(robj, false);
 410        if (unlikely(r != 0))
 411                goto out;
 412
 413        if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 414                amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 415                r = amdgpu_bo_get_metadata(robj, args->data.data,
 416                                           sizeof(args->data.data),
 417                                           &args->data.data_size_bytes,
 418                                           &args->data.flags);
 419        } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 420                if (args->data.data_size_bytes > sizeof(args->data.data)) {
 421                        r = -EINVAL;
 422                        goto unreserve;
 423                }
 424                r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 425                if (!r)
 426                        r = amdgpu_bo_set_metadata(robj, args->data.data,
 427                                                   args->data.data_size_bytes,
 428                                                   args->data.flags);
 429        }
 430
 431unreserve:
 432        amdgpu_bo_unreserve(robj);
 433out:
 434        drm_gem_object_unreference_unlocked(gobj);
 435        return r;
 436}
 437
 438/**
 439 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 440 *
 441 * @adev: amdgpu_device pointer
 442 * @bo_va: bo_va to update
 443 *
 444 * Update the bo_va directly after setting it's address. Errors are not
 445 * vital here, so they are not reported back to userspace.
 446 */
 447static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 448                                    struct amdgpu_bo_va *bo_va, uint32_t operation)
 449{
 450        struct ttm_validate_buffer tv, *entry;
 451        struct amdgpu_bo_list_entry vm_pd;
 452        struct ww_acquire_ctx ticket;
 453        struct list_head list, duplicates;
 454        unsigned domain;
 455        int r;
 456
 457        INIT_LIST_HEAD(&list);
 458        INIT_LIST_HEAD(&duplicates);
 459
 460        tv.bo = &bo_va->bo->tbo;
 461        tv.shared = true;
 462        list_add(&tv.head, &list);
 463
 464        amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
 465
 466        /* Provide duplicates to avoid -EALREADY */
 467        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 468        if (r)
 469                goto error_print;
 470
 471        amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
 472        list_for_each_entry(entry, &list, head) {
 473                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 474                /* if anything is swapped out don't swap it in here,
 475                   just abort and wait for the next CS */
 476                if (domain == AMDGPU_GEM_DOMAIN_CPU)
 477                        goto error_unreserve;
 478        }
 479        list_for_each_entry(entry, &duplicates, head) {
 480                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 481                /* if anything is swapped out don't swap it in here,
 482                   just abort and wait for the next CS */
 483                if (domain == AMDGPU_GEM_DOMAIN_CPU)
 484                        goto error_unreserve;
 485        }
 486
 487        r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 488        if (r)
 489                goto error_unreserve;
 490
 491        r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 492        if (r)
 493                goto error_unreserve;
 494
 495        if (operation == AMDGPU_VA_OP_MAP)
 496                r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
 497
 498error_unreserve:
 499        ttm_eu_backoff_reservation(&ticket, &list);
 500
 501error_print:
 502        if (r && r != -ERESTARTSYS)
 503                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 504}
 505
 506
 507
 508int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 509                          struct drm_file *filp)
 510{
 511        struct drm_amdgpu_gem_va *args = data;
 512        struct drm_gem_object *gobj;
 513        struct amdgpu_device *adev = dev->dev_private;
 514        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 515        struct amdgpu_bo *rbo;
 516        struct amdgpu_bo_va *bo_va;
 517        struct ttm_validate_buffer tv, tv_pd;
 518        struct ww_acquire_ctx ticket;
 519        struct list_head list, duplicates;
 520        uint32_t invalid_flags, va_flags = 0;
 521        int r = 0;
 522
 523        if (!adev->vm_manager.enabled)
 524                return -ENOTTY;
 525
 526        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
 527                dev_err(&dev->pdev->dev,
 528                        "va_address 0x%lX is in reserved area 0x%X\n",
 529                        (unsigned long)args->va_address,
 530                        AMDGPU_VA_RESERVED_SIZE);
 531                return -EINVAL;
 532        }
 533
 534        invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
 535                        AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
 536        if ((args->flags & invalid_flags)) {
 537                dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
 538                        args->flags, invalid_flags);
 539                return -EINVAL;
 540        }
 541
 542        switch (args->operation) {
 543        case AMDGPU_VA_OP_MAP:
 544        case AMDGPU_VA_OP_UNMAP:
 545                break;
 546        default:
 547                dev_err(&dev->pdev->dev, "unsupported operation %d\n",
 548                        args->operation);
 549                return -EINVAL;
 550        }
 551
 552        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 553        if (gobj == NULL)
 554                return -ENOENT;
 555        rbo = gem_to_amdgpu_bo(gobj);
 556        INIT_LIST_HEAD(&list);
 557        INIT_LIST_HEAD(&duplicates);
 558        tv.bo = &rbo->tbo;
 559        tv.shared = true;
 560        list_add(&tv.head, &list);
 561
 562        if (args->operation == AMDGPU_VA_OP_MAP) {
 563                tv_pd.bo = &fpriv->vm.page_directory->tbo;
 564                tv_pd.shared = true;
 565                list_add(&tv_pd.head, &list);
 566        }
 567        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 568        if (r) {
 569                drm_gem_object_unreference_unlocked(gobj);
 570                return r;
 571        }
 572
 573        bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
 574        if (!bo_va) {
 575                ttm_eu_backoff_reservation(&ticket, &list);
 576                drm_gem_object_unreference_unlocked(gobj);
 577                return -ENOENT;
 578        }
 579
 580        switch (args->operation) {
 581        case AMDGPU_VA_OP_MAP:
 582                if (args->flags & AMDGPU_VM_PAGE_READABLE)
 583                        va_flags |= AMDGPU_PTE_READABLE;
 584                if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
 585                        va_flags |= AMDGPU_PTE_WRITEABLE;
 586                if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
 587                        va_flags |= AMDGPU_PTE_EXECUTABLE;
 588                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 589                                     args->offset_in_bo, args->map_size,
 590                                     va_flags);
 591                break;
 592        case AMDGPU_VA_OP_UNMAP:
 593                r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 594                break;
 595        default:
 596                break;
 597        }
 598        ttm_eu_backoff_reservation(&ticket, &list);
 599        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
 600            !amdgpu_vm_debug)
 601                amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 602
 603        drm_gem_object_unreference_unlocked(gobj);
 604        return r;
 605}
 606
 607int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 608                        struct drm_file *filp)
 609{
 610        struct drm_amdgpu_gem_op *args = data;
 611        struct drm_gem_object *gobj;
 612        struct amdgpu_bo *robj;
 613        int r;
 614
 615        gobj = drm_gem_object_lookup(dev, filp, args->handle);
 616        if (gobj == NULL) {
 617                return -ENOENT;
 618        }
 619        robj = gem_to_amdgpu_bo(gobj);
 620
 621        r = amdgpu_bo_reserve(robj, false);
 622        if (unlikely(r))
 623                goto out;
 624
 625        switch (args->op) {
 626        case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 627                struct drm_amdgpu_gem_create_in info;
 628                void __user *out = (void __user *)(long)args->value;
 629
 630                info.bo_size = robj->gem_base.size;
 631                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 632                info.domains = robj->initial_domain;
 633                info.domain_flags = robj->flags;
 634                amdgpu_bo_unreserve(robj);
 635                if (copy_to_user(out, &info, sizeof(info)))
 636                        r = -EFAULT;
 637                break;
 638        }
 639        case AMDGPU_GEM_OP_SET_PLACEMENT:
 640                if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
 641                        r = -EPERM;
 642                        amdgpu_bo_unreserve(robj);
 643                        break;
 644                }
 645                robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 646                                                      AMDGPU_GEM_DOMAIN_GTT |
 647                                                      AMDGPU_GEM_DOMAIN_CPU);
 648                amdgpu_bo_unreserve(robj);
 649                break;
 650        default:
 651                amdgpu_bo_unreserve(robj);
 652                r = -EINVAL;
 653        }
 654
 655out:
 656        drm_gem_object_unreference_unlocked(gobj);
 657        return r;
 658}
 659
 660int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 661                            struct drm_device *dev,
 662                            struct drm_mode_create_dumb *args)
 663{
 664        struct amdgpu_device *adev = dev->dev_private;
 665        struct drm_gem_object *gobj;
 666        uint32_t handle;
 667        int r;
 668
 669        args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
 670        args->size = (u64)args->pitch * args->height;
 671        args->size = ALIGN(args->size, PAGE_SIZE);
 672
 673        r = amdgpu_gem_object_create(adev, args->size, 0,
 674                                     AMDGPU_GEM_DOMAIN_VRAM,
 675                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 676                                     ttm_bo_type_device,
 677                                     &gobj);
 678        if (r)
 679                return -ENOMEM;
 680
 681        r = drm_gem_handle_create(file_priv, gobj, &handle);
 682        /* drop reference from allocate - handle holds it now */
 683        drm_gem_object_unreference_unlocked(gobj);
 684        if (r) {
 685                return r;
 686        }
 687        args->handle = handle;
 688        return 0;
 689}
 690
 691#if defined(CONFIG_DEBUG_FS)
 692static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 693{
 694        struct drm_info_node *node = (struct drm_info_node *)m->private;
 695        struct drm_device *dev = node->minor->dev;
 696        struct amdgpu_device *adev = dev->dev_private;
 697        struct amdgpu_bo *rbo;
 698        unsigned i = 0;
 699
 700        mutex_lock(&adev->gem.mutex);
 701        list_for_each_entry(rbo, &adev->gem.objects, list) {
 702                unsigned domain;
 703                const char *placement;
 704
 705                domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
 706                switch (domain) {
 707                case AMDGPU_GEM_DOMAIN_VRAM:
 708                        placement = "VRAM";
 709                        break;
 710                case AMDGPU_GEM_DOMAIN_GTT:
 711                        placement = " GTT";
 712                        break;
 713                case AMDGPU_GEM_DOMAIN_CPU:
 714                default:
 715                        placement = " CPU";
 716                        break;
 717                }
 718                seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
 719                           i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
 720                           placement, (unsigned long)rbo->pid);
 721                i++;
 722        }
 723        mutex_unlock(&adev->gem.mutex);
 724        return 0;
 725}
 726
 727static struct drm_info_list amdgpu_debugfs_gem_list[] = {
 728        {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
 729};
 730#endif
 731
 732int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
 733{
 734#if defined(CONFIG_DEBUG_FS)
 735        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 736#endif
 737        return 0;
 738}
 739