linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <linux/module.h>
  30#include <linux/pagemap.h>
  31#include <linux/pci.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_debugfs.h>
  35
  36#include "amdgpu.h"
  37#include "amdgpu_display.h"
  38#include "amdgpu_xgmi.h"
  39
  40void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  41{
  42        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  43
  44        if (robj) {
  45                amdgpu_mn_unregister(robj);
  46                amdgpu_bo_unref(&robj);
  47        }
  48}
  49
  50int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  51                             int alignment, u32 initial_domain,
  52                             u64 flags, enum ttm_bo_type type,
  53                             struct reservation_object *resv,
  54                             struct drm_gem_object **obj)
  55{
  56        struct amdgpu_bo *bo;
  57        struct amdgpu_bo_param bp;
  58        int r;
  59
  60        memset(&bp, 0, sizeof(bp));
  61        *obj = NULL;
  62
  63        bp.size = size;
  64        bp.byte_align = alignment;
  65        bp.type = type;
  66        bp.resv = resv;
  67        bp.preferred_domain = initial_domain;
  68retry:
  69        bp.flags = flags;
  70        bp.domain = initial_domain;
  71        r = amdgpu_bo_create(adev, &bp, &bo);
  72        if (r) {
  73                if (r != -ERESTARTSYS) {
  74                        if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
  75                                flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  76                                goto retry;
  77                        }
  78
  79                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
  80                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
  81                                goto retry;
  82                        }
  83                        DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  84                                  size, initial_domain, alignment, r);
  85                }
  86                return r;
  87        }
  88        *obj = &bo->gem_base;
  89
  90        return 0;
  91}
  92
  93void amdgpu_gem_force_release(struct amdgpu_device *adev)
  94{
  95        struct drm_device *ddev = adev->ddev;
  96        struct drm_file *file;
  97
  98        mutex_lock(&ddev->filelist_mutex);
  99
 100        list_for_each_entry(file, &ddev->filelist, lhead) {
 101                struct drm_gem_object *gobj;
 102                int handle;
 103
 104                WARN_ONCE(1, "Still active user space clients!\n");
 105                spin_lock(&file->table_lock);
 106                idr_for_each_entry(&file->object_idr, gobj, handle) {
 107                        WARN_ONCE(1, "And also active allocations!\n");
 108                        drm_gem_object_put_unlocked(gobj);
 109                }
 110                idr_destroy(&file->object_idr);
 111                spin_unlock(&file->table_lock);
 112        }
 113
 114        mutex_unlock(&ddev->filelist_mutex);
 115}
 116
 117/*
 118 * Call from drm_gem_handle_create which appear in both new and open ioctl
 119 * case.
 120 */
 121int amdgpu_gem_object_open(struct drm_gem_object *obj,
 122                           struct drm_file *file_priv)
 123{
 124        struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
 125        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 126        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 127        struct amdgpu_vm *vm = &fpriv->vm;
 128        struct amdgpu_bo_va *bo_va;
 129        struct mm_struct *mm;
 130        int r;
 131
 132        mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
 133        if (mm && mm != current->mm)
 134                return -EPERM;
 135
 136        if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
 137            abo->tbo.resv != vm->root.base.bo->tbo.resv)
 138                return -EPERM;
 139
 140        r = amdgpu_bo_reserve(abo, false);
 141        if (r)
 142                return r;
 143
 144        bo_va = amdgpu_vm_bo_find(vm, abo);
 145        if (!bo_va) {
 146                bo_va = amdgpu_vm_bo_add(adev, vm, abo);
 147        } else {
 148                ++bo_va->ref_count;
 149        }
 150        amdgpu_bo_unreserve(abo);
 151        return 0;
 152}
 153
 154void amdgpu_gem_object_close(struct drm_gem_object *obj,
 155                             struct drm_file *file_priv)
 156{
 157        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 158        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 159        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 160        struct amdgpu_vm *vm = &fpriv->vm;
 161
 162        struct amdgpu_bo_list_entry vm_pd;
 163        struct list_head list, duplicates;
 164        struct ttm_validate_buffer tv;
 165        struct ww_acquire_ctx ticket;
 166        struct amdgpu_bo_va *bo_va;
 167        int r;
 168
 169        INIT_LIST_HEAD(&list);
 170        INIT_LIST_HEAD(&duplicates);
 171
 172        tv.bo = &bo->tbo;
 173        tv.num_shared = 1;
 174        list_add(&tv.head, &list);
 175
 176        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 177
 178        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
 179        if (r) {
 180                dev_err(adev->dev, "leaking bo va because "
 181                        "we fail to reserve bo (%d)\n", r);
 182                return;
 183        }
 184        bo_va = amdgpu_vm_bo_find(vm, bo);
 185        if (bo_va && --bo_va->ref_count == 0) {
 186                amdgpu_vm_bo_rmv(adev, bo_va);
 187
 188                if (amdgpu_vm_ready(vm)) {
 189                        struct dma_fence *fence = NULL;
 190
 191                        r = amdgpu_vm_clear_freed(adev, vm, &fence);
 192                        if (unlikely(r)) {
 193                                dev_err(adev->dev, "failed to clear page "
 194                                        "tables on GEM object close (%d)\n", r);
 195                        }
 196
 197                        if (fence) {
 198                                amdgpu_bo_fence(bo, fence, true);
 199                                dma_fence_put(fence);
 200                        }
 201                }
 202        }
 203        ttm_eu_backoff_reservation(&ticket, &list);
 204}
 205
 206/*
 207 * GEM ioctls.
 208 */
 209int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 210                            struct drm_file *filp)
 211{
 212        struct amdgpu_device *adev = dev->dev_private;
 213        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 214        struct amdgpu_vm *vm = &fpriv->vm;
 215        union drm_amdgpu_gem_create *args = data;
 216        uint64_t flags = args->in.domain_flags;
 217        uint64_t size = args->in.bo_size;
 218        struct reservation_object *resv = NULL;
 219        struct drm_gem_object *gobj;
 220        uint32_t handle;
 221        int r;
 222
 223        /* reject invalid gem flags */
 224        if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 225                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 226                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 227                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
 228                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
 229                      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
 230
 231                return -EINVAL;
 232
 233        /* reject invalid gem domains */
 234        if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
 235                return -EINVAL;
 236
 237        /* create a gem object to contain this object in */
 238        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 239            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 240                if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 241                        /* if gds bo is created from user space, it must be
 242                         * passed to bo list
 243                         */
 244                        DRM_ERROR("GDS bo cannot be per-vm-bo\n");
 245                        return -EINVAL;
 246                }
 247                flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 248        }
 249
 250        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 251                r = amdgpu_bo_reserve(vm->root.base.bo, false);
 252                if (r)
 253                        return r;
 254
 255                resv = vm->root.base.bo->tbo.resv;
 256        }
 257
 258        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 259                                     (u32)(0xffffffff & args->in.domains),
 260                                     flags, ttm_bo_type_device, resv, &gobj);
 261        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 262                if (!r) {
 263                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 264
 265                        abo->parent = amdgpu_bo_ref(vm->root.base.bo);
 266                }
 267                amdgpu_bo_unreserve(vm->root.base.bo);
 268        }
 269        if (r)
 270                return r;
 271
 272        r = drm_gem_handle_create(filp, gobj, &handle);
 273        /* drop reference from allocate - handle holds it now */
 274        drm_gem_object_put_unlocked(gobj);
 275        if (r)
 276                return r;
 277
 278        memset(args, 0, sizeof(*args));
 279        args->out.handle = handle;
 280        return 0;
 281}
 282
 283int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 284                             struct drm_file *filp)
 285{
 286        struct ttm_operation_ctx ctx = { true, false };
 287        struct amdgpu_device *adev = dev->dev_private;
 288        struct drm_amdgpu_gem_userptr *args = data;
 289        struct drm_gem_object *gobj;
 290        struct amdgpu_bo *bo;
 291        uint32_t handle;
 292        int r;
 293
 294        if (offset_in_page(args->addr | args->size))
 295                return -EINVAL;
 296
 297        /* reject unknown flag values */
 298        if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 299            AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 300            AMDGPU_GEM_USERPTR_REGISTER))
 301                return -EINVAL;
 302
 303        if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
 304             !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 305
 306                /* if we want to write to it we must install a MMU notifier */
 307                return -EACCES;
 308        }
 309
 310        /* create a gem object to contain this object in */
 311        r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
 312                                     0, ttm_bo_type_device, NULL, &gobj);
 313        if (r)
 314                return r;
 315
 316        bo = gem_to_amdgpu_bo(gobj);
 317        bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 318        bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 319        r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
 320        if (r)
 321                goto release_object;
 322
 323        if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
 324                r = amdgpu_mn_register(bo, args->addr);
 325                if (r)
 326                        goto release_object;
 327        }
 328
 329        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 330                r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 331                if (r)
 332                        goto release_object;
 333
 334                r = amdgpu_bo_reserve(bo, true);
 335                if (r)
 336                        goto user_pages_done;
 337
 338                amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 339                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 340                amdgpu_bo_unreserve(bo);
 341                if (r)
 342                        goto user_pages_done;
 343        }
 344
 345        r = drm_gem_handle_create(filp, gobj, &handle);
 346        if (r)
 347                goto user_pages_done;
 348
 349        args->handle = handle;
 350
 351user_pages_done:
 352        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
 353                amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 354
 355release_object:
 356        drm_gem_object_put_unlocked(gobj);
 357
 358        return r;
 359}
 360
 361int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 362                          struct drm_device *dev,
 363                          uint32_t handle, uint64_t *offset_p)
 364{
 365        struct drm_gem_object *gobj;
 366        struct amdgpu_bo *robj;
 367
 368        gobj = drm_gem_object_lookup(filp, handle);
 369        if (gobj == NULL) {
 370                return -ENOENT;
 371        }
 372        robj = gem_to_amdgpu_bo(gobj);
 373        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 374            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 375                drm_gem_object_put_unlocked(gobj);
 376                return -EPERM;
 377        }
 378        *offset_p = amdgpu_bo_mmap_offset(robj);
 379        drm_gem_object_put_unlocked(gobj);
 380        return 0;
 381}
 382
 383int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 384                          struct drm_file *filp)
 385{
 386        union drm_amdgpu_gem_mmap *args = data;
 387        uint32_t handle = args->in.handle;
 388        memset(args, 0, sizeof(*args));
 389        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 390}
 391
 392/**
 393 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 394 *
 395 * @timeout_ns: timeout in ns
 396 *
 397 * Calculate the timeout in jiffies from an absolute timeout in ns.
 398 */
 399unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 400{
 401        unsigned long timeout_jiffies;
 402        ktime_t timeout;
 403
 404        /* clamp timeout if it's to large */
 405        if (((int64_t)timeout_ns) < 0)
 406                return MAX_SCHEDULE_TIMEOUT;
 407
 408        timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 409        if (ktime_to_ns(timeout) < 0)
 410                return 0;
 411
 412        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 413        /*  clamp timeout to avoid unsigned-> signed overflow */
 414        if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
 415                return MAX_SCHEDULE_TIMEOUT - 1;
 416
 417        return timeout_jiffies;
 418}
 419
 420int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 421                              struct drm_file *filp)
 422{
 423        union drm_amdgpu_gem_wait_idle *args = data;
 424        struct drm_gem_object *gobj;
 425        struct amdgpu_bo *robj;
 426        uint32_t handle = args->in.handle;
 427        unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 428        int r = 0;
 429        long ret;
 430
 431        gobj = drm_gem_object_lookup(filp, handle);
 432        if (gobj == NULL) {
 433                return -ENOENT;
 434        }
 435        robj = gem_to_amdgpu_bo(gobj);
 436        ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
 437                                                  timeout);
 438
 439        /* ret == 0 means not signaled,
 440         * ret > 0 means signaled
 441         * ret < 0 means interrupted before timeout
 442         */
 443        if (ret >= 0) {
 444                memset(args, 0, sizeof(*args));
 445                args->out.status = (ret == 0);
 446        } else
 447                r = ret;
 448
 449        drm_gem_object_put_unlocked(gobj);
 450        return r;
 451}
 452
 453int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 454                                struct drm_file *filp)
 455{
 456        struct drm_amdgpu_gem_metadata *args = data;
 457        struct drm_gem_object *gobj;
 458        struct amdgpu_bo *robj;
 459        int r = -1;
 460
 461        DRM_DEBUG("%d \n", args->handle);
 462        gobj = drm_gem_object_lookup(filp, args->handle);
 463        if (gobj == NULL)
 464                return -ENOENT;
 465        robj = gem_to_amdgpu_bo(gobj);
 466
 467        r = amdgpu_bo_reserve(robj, false);
 468        if (unlikely(r != 0))
 469                goto out;
 470
 471        if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 472                amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 473                r = amdgpu_bo_get_metadata(robj, args->data.data,
 474                                           sizeof(args->data.data),
 475                                           &args->data.data_size_bytes,
 476                                           &args->data.flags);
 477        } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 478                if (args->data.data_size_bytes > sizeof(args->data.data)) {
 479                        r = -EINVAL;
 480                        goto unreserve;
 481                }
 482                r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 483                if (!r)
 484                        r = amdgpu_bo_set_metadata(robj, args->data.data,
 485                                                   args->data.data_size_bytes,
 486                                                   args->data.flags);
 487        }
 488
 489unreserve:
 490        amdgpu_bo_unreserve(robj);
 491out:
 492        drm_gem_object_put_unlocked(gobj);
 493        return r;
 494}
 495
 496/**
 497 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 498 *
 499 * @adev: amdgpu_device pointer
 500 * @vm: vm to update
 501 * @bo_va: bo_va to update
 502 * @operation: map, unmap or clear
 503 *
 504 * Update the bo_va directly after setting its address. Errors are not
 505 * vital here, so they are not reported back to userspace.
 506 */
 507static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 508                                    struct amdgpu_vm *vm,
 509                                    struct amdgpu_bo_va *bo_va,
 510                                    uint32_t operation)
 511{
 512        int r;
 513
 514        if (!amdgpu_vm_ready(vm))
 515                return;
 516
 517        r = amdgpu_vm_clear_freed(adev, vm, NULL);
 518        if (r)
 519                goto error;
 520
 521        if (operation == AMDGPU_VA_OP_MAP ||
 522            operation == AMDGPU_VA_OP_REPLACE) {
 523                r = amdgpu_vm_bo_update(adev, bo_va, false);
 524                if (r)
 525                        goto error;
 526        }
 527
 528        r = amdgpu_vm_update_directories(adev, vm);
 529
 530error:
 531        if (r && r != -ERESTARTSYS)
 532                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 533}
 534
 535int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 536                          struct drm_file *filp)
 537{
 538        const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
 539                AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
 540                AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
 541        const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
 542                AMDGPU_VM_PAGE_PRT;
 543
 544        struct drm_amdgpu_gem_va *args = data;
 545        struct drm_gem_object *gobj;
 546        struct amdgpu_device *adev = dev->dev_private;
 547        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 548        struct amdgpu_bo *abo;
 549        struct amdgpu_bo_va *bo_va;
 550        struct amdgpu_bo_list_entry vm_pd;
 551        struct ttm_validate_buffer tv;
 552        struct ww_acquire_ctx ticket;
 553        struct list_head list, duplicates;
 554        uint64_t va_flags;
 555        int r = 0;
 556
 557        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
 558                dev_dbg(&dev->pdev->dev,
 559                        "va_address 0x%LX is in reserved area 0x%LX\n",
 560                        args->va_address, AMDGPU_VA_RESERVED_SIZE);
 561                return -EINVAL;
 562        }
 563
 564        if (args->va_address >= AMDGPU_GMC_HOLE_START &&
 565            args->va_address < AMDGPU_GMC_HOLE_END) {
 566                dev_dbg(&dev->pdev->dev,
 567                        "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
 568                        args->va_address, AMDGPU_GMC_HOLE_START,
 569                        AMDGPU_GMC_HOLE_END);
 570                return -EINVAL;
 571        }
 572
 573        args->va_address &= AMDGPU_GMC_HOLE_MASK;
 574
 575        if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
 576                dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
 577                        args->flags);
 578                return -EINVAL;
 579        }
 580
 581        switch (args->operation) {
 582        case AMDGPU_VA_OP_MAP:
 583        case AMDGPU_VA_OP_UNMAP:
 584        case AMDGPU_VA_OP_CLEAR:
 585        case AMDGPU_VA_OP_REPLACE:
 586                break;
 587        default:
 588                dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
 589                        args->operation);
 590                return -EINVAL;
 591        }
 592
 593        INIT_LIST_HEAD(&list);
 594        INIT_LIST_HEAD(&duplicates);
 595        if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 596            !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 597                gobj = drm_gem_object_lookup(filp, args->handle);
 598                if (gobj == NULL)
 599                        return -ENOENT;
 600                abo = gem_to_amdgpu_bo(gobj);
 601                tv.bo = &abo->tbo;
 602                if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 603                        tv.num_shared = 1;
 604                else
 605                        tv.num_shared = 0;
 606                list_add(&tv.head, &list);
 607        } else {
 608                gobj = NULL;
 609                abo = NULL;
 610        }
 611
 612        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 613
 614        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
 615        if (r)
 616                goto error_unref;
 617
 618        if (abo) {
 619                bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 620                if (!bo_va) {
 621                        r = -ENOENT;
 622                        goto error_backoff;
 623                }
 624        } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 625                bo_va = fpriv->prt_va;
 626        } else {
 627                bo_va = NULL;
 628        }
 629
 630        switch (args->operation) {
 631        case AMDGPU_VA_OP_MAP:
 632                va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
 633                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 634                                     args->offset_in_bo, args->map_size,
 635                                     va_flags);
 636                break;
 637        case AMDGPU_VA_OP_UNMAP:
 638                r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 639                break;
 640
 641        case AMDGPU_VA_OP_CLEAR:
 642                r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
 643                                                args->va_address,
 644                                                args->map_size);
 645                break;
 646        case AMDGPU_VA_OP_REPLACE:
 647                va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
 648                r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
 649                                             args->offset_in_bo, args->map_size,
 650                                             va_flags);
 651                break;
 652        default:
 653                break;
 654        }
 655        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
 656                amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 657                                        args->operation);
 658
 659error_backoff:
 660        ttm_eu_backoff_reservation(&ticket, &list);
 661
 662error_unref:
 663        drm_gem_object_put_unlocked(gobj);
 664        return r;
 665}
 666
 667int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 668                        struct drm_file *filp)
 669{
 670        struct amdgpu_device *adev = dev->dev_private;
 671        struct drm_amdgpu_gem_op *args = data;
 672        struct drm_gem_object *gobj;
 673        struct amdgpu_vm_bo_base *base;
 674        struct amdgpu_bo *robj;
 675        int r;
 676
 677        gobj = drm_gem_object_lookup(filp, args->handle);
 678        if (gobj == NULL) {
 679                return -ENOENT;
 680        }
 681        robj = gem_to_amdgpu_bo(gobj);
 682
 683        r = amdgpu_bo_reserve(robj, false);
 684        if (unlikely(r))
 685                goto out;
 686
 687        switch (args->op) {
 688        case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 689                struct drm_amdgpu_gem_create_in info;
 690                void __user *out = u64_to_user_ptr(args->value);
 691
 692                info.bo_size = robj->gem_base.size;
 693                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 694                info.domains = robj->preferred_domains;
 695                info.domain_flags = robj->flags;
 696                amdgpu_bo_unreserve(robj);
 697                if (copy_to_user(out, &info, sizeof(info)))
 698                        r = -EFAULT;
 699                break;
 700        }
 701        case AMDGPU_GEM_OP_SET_PLACEMENT:
 702                if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
 703                        r = -EINVAL;
 704                        amdgpu_bo_unreserve(robj);
 705                        break;
 706                }
 707                if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 708                        r = -EPERM;
 709                        amdgpu_bo_unreserve(robj);
 710                        break;
 711                }
 712                for (base = robj->vm_bo; base; base = base->next)
 713                        if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
 714                                amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
 715                                r = -EINVAL;
 716                                amdgpu_bo_unreserve(robj);
 717                                goto out;
 718                        }
 719
 720
 721                robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 722                                                        AMDGPU_GEM_DOMAIN_GTT |
 723                                                        AMDGPU_GEM_DOMAIN_CPU);
 724                robj->allowed_domains = robj->preferred_domains;
 725                if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 726                        robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 727
 728                if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 729                        amdgpu_vm_bo_invalidate(adev, robj, true);
 730
 731                amdgpu_bo_unreserve(robj);
 732                break;
 733        default:
 734                amdgpu_bo_unreserve(robj);
 735                r = -EINVAL;
 736        }
 737
 738out:
 739        drm_gem_object_put_unlocked(gobj);
 740        return r;
 741}
 742
 743int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 744                            struct drm_device *dev,
 745                            struct drm_mode_create_dumb *args)
 746{
 747        struct amdgpu_device *adev = dev->dev_private;
 748        struct drm_gem_object *gobj;
 749        uint32_t handle;
 750        u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 751        u32 domain;
 752        int r;
 753
 754        /*
 755         * The buffer returned from this function should be cleared, but
 756         * it can only be done if the ring is enabled or we'll fail to
 757         * create the buffer.
 758         */
 759        if (adev->mman.buffer_funcs_enabled)
 760                flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
 761
 762        args->pitch = amdgpu_align_pitch(adev, args->width,
 763                                         DIV_ROUND_UP(args->bpp, 8), 0);
 764        args->size = (u64)args->pitch * args->height;
 765        args->size = ALIGN(args->size, PAGE_SIZE);
 766        domain = amdgpu_bo_get_preferred_pin_domain(adev,
 767                                amdgpu_display_supported_domains(adev));
 768        r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
 769                                     ttm_bo_type_device, NULL, &gobj);
 770        if (r)
 771                return -ENOMEM;
 772
 773        r = drm_gem_handle_create(file_priv, gobj, &handle);
 774        /* drop reference from allocate - handle holds it now */
 775        drm_gem_object_put_unlocked(gobj);
 776        if (r) {
 777                return r;
 778        }
 779        args->handle = handle;
 780        return 0;
 781}
 782
 783#if defined(CONFIG_DEBUG_FS)
 784
 785#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)   \
 786        if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
 787                seq_printf((m), " " #flag);             \
 788        }
 789
 790static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 791{
 792        struct drm_gem_object *gobj = ptr;
 793        struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 794        struct seq_file *m = data;
 795
 796        struct dma_buf_attachment *attachment;
 797        struct dma_buf *dma_buf;
 798        unsigned domain;
 799        const char *placement;
 800        unsigned pin_count;
 801
 802        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 803        switch (domain) {
 804        case AMDGPU_GEM_DOMAIN_VRAM:
 805                placement = "VRAM";
 806                break;
 807        case AMDGPU_GEM_DOMAIN_GTT:
 808                placement = " GTT";
 809                break;
 810        case AMDGPU_GEM_DOMAIN_CPU:
 811        default:
 812                placement = " CPU";
 813                break;
 814        }
 815        seq_printf(m, "\t0x%08x: %12ld byte %s",
 816                   id, amdgpu_bo_size(bo), placement);
 817
 818        pin_count = READ_ONCE(bo->pin_count);
 819        if (pin_count)
 820                seq_printf(m, " pin count %d", pin_count);
 821
 822        dma_buf = READ_ONCE(bo->gem_base.dma_buf);
 823        attachment = READ_ONCE(bo->gem_base.import_attach);
 824
 825        if (attachment)
 826                seq_printf(m, " imported from %p", dma_buf);
 827        else if (dma_buf)
 828                seq_printf(m, " exported as %p", dma_buf);
 829
 830        amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
 831        amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
 832        amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
 833        amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
 834        amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
 835        amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
 836        amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
 837        amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
 838
 839        seq_printf(m, "\n");
 840
 841        return 0;
 842}
 843
 844static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 845{
 846        struct drm_info_node *node = (struct drm_info_node *)m->private;
 847        struct drm_device *dev = node->minor->dev;
 848        struct drm_file *file;
 849        int r;
 850
 851        r = mutex_lock_interruptible(&dev->filelist_mutex);
 852        if (r)
 853                return r;
 854
 855        list_for_each_entry(file, &dev->filelist, lhead) {
 856                struct task_struct *task;
 857
 858                /*
 859                 * Although we have a valid reference on file->pid, that does
 860                 * not guarantee that the task_struct who called get_pid() is
 861                 * still alive (e.g. get_pid(current) => fork() => exit()).
 862                 * Therefore, we need to protect this ->comm access using RCU.
 863                 */
 864                rcu_read_lock();
 865                task = pid_task(file->pid, PIDTYPE_PID);
 866                seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
 867                           task ? task->comm : "<unknown>");
 868                rcu_read_unlock();
 869
 870                spin_lock(&file->table_lock);
 871                idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 872                spin_unlock(&file->table_lock);
 873        }
 874
 875        mutex_unlock(&dev->filelist_mutex);
 876        return 0;
 877}
 878
 879static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
 880        {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
 881};
 882#endif
 883
 884int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
 885{
 886#if defined(CONFIG_DEBUG_FS)
 887        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 888#endif
 889        return 0;
 890}
 891