linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <linux/module.h>
  30#include <linux/pagemap.h>
  31#include <linux/pci.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_debugfs.h>
  36
  37#include "amdgpu.h"
  38#include "amdgpu_display.h"
  39#include "amdgpu_xgmi.h"
  40
  41void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  42{
  43        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  44
  45        if (robj) {
  46                amdgpu_mn_unregister(robj);
  47                amdgpu_bo_unref(&robj);
  48        }
  49}
  50
  51int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  52                             int alignment, u32 initial_domain,
  53                             u64 flags, enum ttm_bo_type type,
  54                             struct dma_resv *resv,
  55                             struct drm_gem_object **obj)
  56{
  57        struct amdgpu_bo *bo;
  58        struct amdgpu_bo_param bp;
  59        int r;
  60
  61        memset(&bp, 0, sizeof(bp));
  62        *obj = NULL;
  63
  64        bp.size = size;
  65        bp.byte_align = alignment;
  66        bp.type = type;
  67        bp.resv = resv;
  68        bp.preferred_domain = initial_domain;
  69        bp.flags = flags;
  70        bp.domain = initial_domain;
  71        r = amdgpu_bo_create(adev, &bp, &bo);
  72        if (r)
  73                return r;
  74
  75        *obj = &bo->tbo.base;
  76
  77        return 0;
  78}
  79
  80void amdgpu_gem_force_release(struct amdgpu_device *adev)
  81{
  82        struct drm_device *ddev = adev_to_drm(adev);
  83        struct drm_file *file;
  84
  85        mutex_lock(&ddev->filelist_mutex);
  86
  87        list_for_each_entry(file, &ddev->filelist, lhead) {
  88                struct drm_gem_object *gobj;
  89                int handle;
  90
  91                WARN_ONCE(1, "Still active user space clients!\n");
  92                spin_lock(&file->table_lock);
  93                idr_for_each_entry(&file->object_idr, gobj, handle) {
  94                        WARN_ONCE(1, "And also active allocations!\n");
  95                        drm_gem_object_put(gobj);
  96                }
  97                idr_destroy(&file->object_idr);
  98                spin_unlock(&file->table_lock);
  99        }
 100
 101        mutex_unlock(&ddev->filelist_mutex);
 102}
 103
 104/*
 105 * Call from drm_gem_handle_create which appear in both new and open ioctl
 106 * case.
 107 */
 108int amdgpu_gem_object_open(struct drm_gem_object *obj,
 109                           struct drm_file *file_priv)
 110{
 111        struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
 112        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 113        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 114        struct amdgpu_vm *vm = &fpriv->vm;
 115        struct amdgpu_bo_va *bo_va;
 116        struct mm_struct *mm;
 117        int r;
 118
 119        mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
 120        if (mm && mm != current->mm)
 121                return -EPERM;
 122
 123        if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
 124            abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
 125                return -EPERM;
 126
 127        r = amdgpu_bo_reserve(abo, false);
 128        if (r)
 129                return r;
 130
 131        bo_va = amdgpu_vm_bo_find(vm, abo);
 132        if (!bo_va) {
 133                bo_va = amdgpu_vm_bo_add(adev, vm, abo);
 134        } else {
 135                ++bo_va->ref_count;
 136        }
 137        amdgpu_bo_unreserve(abo);
 138        return 0;
 139}
 140
 141void amdgpu_gem_object_close(struct drm_gem_object *obj,
 142                             struct drm_file *file_priv)
 143{
 144        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 145        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 146        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 147        struct amdgpu_vm *vm = &fpriv->vm;
 148
 149        struct amdgpu_bo_list_entry vm_pd;
 150        struct list_head list, duplicates;
 151        struct dma_fence *fence = NULL;
 152        struct ttm_validate_buffer tv;
 153        struct ww_acquire_ctx ticket;
 154        struct amdgpu_bo_va *bo_va;
 155        long r;
 156
 157        INIT_LIST_HEAD(&list);
 158        INIT_LIST_HEAD(&duplicates);
 159
 160        tv.bo = &bo->tbo;
 161        tv.num_shared = 2;
 162        list_add(&tv.head, &list);
 163
 164        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 165
 166        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
 167        if (r) {
 168                dev_err(adev->dev, "leaking bo va because "
 169                        "we fail to reserve bo (%ld)\n", r);
 170                return;
 171        }
 172        bo_va = amdgpu_vm_bo_find(vm, bo);
 173        if (!bo_va || --bo_va->ref_count)
 174                goto out_unlock;
 175
 176        amdgpu_vm_bo_rmv(adev, bo_va);
 177        if (!amdgpu_vm_ready(vm))
 178                goto out_unlock;
 179
 180        fence = dma_resv_get_excl(bo->tbo.base.resv);
 181        if (fence) {
 182                amdgpu_bo_fence(bo, fence, true);
 183                fence = NULL;
 184        }
 185
 186        r = amdgpu_vm_clear_freed(adev, vm, &fence);
 187        if (r || !fence)
 188                goto out_unlock;
 189
 190        amdgpu_bo_fence(bo, fence, true);
 191        dma_fence_put(fence);
 192
 193out_unlock:
 194        if (unlikely(r < 0))
 195                dev_err(adev->dev, "failed to clear page "
 196                        "tables on GEM object close (%ld)\n", r);
 197        ttm_eu_backoff_reservation(&ticket, &list);
 198}
 199
 200/*
 201 * GEM ioctls.
 202 */
 203int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 204                            struct drm_file *filp)
 205{
 206        struct amdgpu_device *adev = drm_to_adev(dev);
 207        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 208        struct amdgpu_vm *vm = &fpriv->vm;
 209        union drm_amdgpu_gem_create *args = data;
 210        uint64_t flags = args->in.domain_flags;
 211        uint64_t size = args->in.bo_size;
 212        struct dma_resv *resv = NULL;
 213        struct drm_gem_object *gobj;
 214        uint32_t handle, initial_domain;
 215        int r;
 216
 217        /* reject invalid gem flags */
 218        if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 219                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 220                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 221                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
 222                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
 223                      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
 224                      AMDGPU_GEM_CREATE_ENCRYPTED))
 225
 226                return -EINVAL;
 227
 228        /* reject invalid gem domains */
 229        if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
 230                return -EINVAL;
 231
 232        if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
 233                DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
 234                return -EINVAL;
 235        }
 236
 237        /* create a gem object to contain this object in */
 238        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 239            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 240                if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 241                        /* if gds bo is created from user space, it must be
 242                         * passed to bo list
 243                         */
 244                        DRM_ERROR("GDS bo cannot be per-vm-bo\n");
 245                        return -EINVAL;
 246                }
 247                flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 248        }
 249
 250        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 251                r = amdgpu_bo_reserve(vm->root.base.bo, false);
 252                if (r)
 253                        return r;
 254
 255                resv = vm->root.base.bo->tbo.base.resv;
 256        }
 257
 258retry:
 259        initial_domain = (u32)(0xffffffff & args->in.domains);
 260        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 261                                     initial_domain,
 262                                     flags, ttm_bo_type_device, resv, &gobj);
 263        if (r) {
 264                if (r != -ERESTARTSYS) {
 265                        if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 266                                flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 267                                goto retry;
 268                        }
 269
 270                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 271                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 272                                goto retry;
 273                        }
 274                        DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
 275                                  size, initial_domain, args->in.alignment, r);
 276                }
 277                return r;
 278        }
 279
 280        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 281                if (!r) {
 282                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 283
 284                        abo->parent = amdgpu_bo_ref(vm->root.base.bo);
 285                }
 286                amdgpu_bo_unreserve(vm->root.base.bo);
 287        }
 288        if (r)
 289                return r;
 290
 291        r = drm_gem_handle_create(filp, gobj, &handle);
 292        /* drop reference from allocate - handle holds it now */
 293        drm_gem_object_put(gobj);
 294        if (r)
 295                return r;
 296
 297        memset(args, 0, sizeof(*args));
 298        args->out.handle = handle;
 299        return 0;
 300}
 301
 302int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 303                             struct drm_file *filp)
 304{
 305        struct ttm_operation_ctx ctx = { true, false };
 306        struct amdgpu_device *adev = drm_to_adev(dev);
 307        struct drm_amdgpu_gem_userptr *args = data;
 308        struct drm_gem_object *gobj;
 309        struct amdgpu_bo *bo;
 310        uint32_t handle;
 311        int r;
 312
 313        args->addr = untagged_addr(args->addr);
 314
 315        if (offset_in_page(args->addr | args->size))
 316                return -EINVAL;
 317
 318        /* reject unknown flag values */
 319        if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 320            AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 321            AMDGPU_GEM_USERPTR_REGISTER))
 322                return -EINVAL;
 323
 324        if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
 325             !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 326
 327                /* if we want to write to it we must install a MMU notifier */
 328                return -EACCES;
 329        }
 330
 331        /* create a gem object to contain this object in */
 332        r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
 333                                     0, ttm_bo_type_device, NULL, &gobj);
 334        if (r)
 335                return r;
 336
 337        bo = gem_to_amdgpu_bo(gobj);
 338        bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 339        bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 340        r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
 341        if (r)
 342                goto release_object;
 343
 344        if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
 345                r = amdgpu_mn_register(bo, args->addr);
 346                if (r)
 347                        goto release_object;
 348        }
 349
 350        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 351                r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 352                if (r)
 353                        goto release_object;
 354
 355                r = amdgpu_bo_reserve(bo, true);
 356                if (r)
 357                        goto user_pages_done;
 358
 359                amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 360                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 361                amdgpu_bo_unreserve(bo);
 362                if (r)
 363                        goto user_pages_done;
 364        }
 365
 366        r = drm_gem_handle_create(filp, gobj, &handle);
 367        if (r)
 368                goto user_pages_done;
 369
 370        args->handle = handle;
 371
 372user_pages_done:
 373        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
 374                amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 375
 376release_object:
 377        drm_gem_object_put(gobj);
 378
 379        return r;
 380}
 381
 382int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 383                          struct drm_device *dev,
 384                          uint32_t handle, uint64_t *offset_p)
 385{
 386        struct drm_gem_object *gobj;
 387        struct amdgpu_bo *robj;
 388
 389        gobj = drm_gem_object_lookup(filp, handle);
 390        if (gobj == NULL) {
 391                return -ENOENT;
 392        }
 393        robj = gem_to_amdgpu_bo(gobj);
 394        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 395            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 396                drm_gem_object_put(gobj);
 397                return -EPERM;
 398        }
 399        *offset_p = amdgpu_bo_mmap_offset(robj);
 400        drm_gem_object_put(gobj);
 401        return 0;
 402}
 403
 404int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 405                          struct drm_file *filp)
 406{
 407        union drm_amdgpu_gem_mmap *args = data;
 408        uint32_t handle = args->in.handle;
 409        memset(args, 0, sizeof(*args));
 410        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 411}
 412
 413/**
 414 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 415 *
 416 * @timeout_ns: timeout in ns
 417 *
 418 * Calculate the timeout in jiffies from an absolute timeout in ns.
 419 */
 420unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 421{
 422        unsigned long timeout_jiffies;
 423        ktime_t timeout;
 424
 425        /* clamp timeout if it's to large */
 426        if (((int64_t)timeout_ns) < 0)
 427                return MAX_SCHEDULE_TIMEOUT;
 428
 429        timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 430        if (ktime_to_ns(timeout) < 0)
 431                return 0;
 432
 433        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 434        /*  clamp timeout to avoid unsigned-> signed overflow */
 435        if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
 436                return MAX_SCHEDULE_TIMEOUT - 1;
 437
 438        return timeout_jiffies;
 439}
 440
 441int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 442                              struct drm_file *filp)
 443{
 444        union drm_amdgpu_gem_wait_idle *args = data;
 445        struct drm_gem_object *gobj;
 446        struct amdgpu_bo *robj;
 447        uint32_t handle = args->in.handle;
 448        unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 449        int r = 0;
 450        long ret;
 451
 452        gobj = drm_gem_object_lookup(filp, handle);
 453        if (gobj == NULL) {
 454                return -ENOENT;
 455        }
 456        robj = gem_to_amdgpu_bo(gobj);
 457        ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
 458                                                  timeout);
 459
 460        /* ret == 0 means not signaled,
 461         * ret > 0 means signaled
 462         * ret < 0 means interrupted before timeout
 463         */
 464        if (ret >= 0) {
 465                memset(args, 0, sizeof(*args));
 466                args->out.status = (ret == 0);
 467        } else
 468                r = ret;
 469
 470        drm_gem_object_put(gobj);
 471        return r;
 472}
 473
 474int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 475                                struct drm_file *filp)
 476{
 477        struct drm_amdgpu_gem_metadata *args = data;
 478        struct drm_gem_object *gobj;
 479        struct amdgpu_bo *robj;
 480        int r = -1;
 481
 482        DRM_DEBUG("%d \n", args->handle);
 483        gobj = drm_gem_object_lookup(filp, args->handle);
 484        if (gobj == NULL)
 485                return -ENOENT;
 486        robj = gem_to_amdgpu_bo(gobj);
 487
 488        r = amdgpu_bo_reserve(robj, false);
 489        if (unlikely(r != 0))
 490                goto out;
 491
 492        if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 493                amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 494                r = amdgpu_bo_get_metadata(robj, args->data.data,
 495                                           sizeof(args->data.data),
 496                                           &args->data.data_size_bytes,
 497                                           &args->data.flags);
 498        } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 499                if (args->data.data_size_bytes > sizeof(args->data.data)) {
 500                        r = -EINVAL;
 501                        goto unreserve;
 502                }
 503                r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 504                if (!r)
 505                        r = amdgpu_bo_set_metadata(robj, args->data.data,
 506                                                   args->data.data_size_bytes,
 507                                                   args->data.flags);
 508        }
 509
 510unreserve:
 511        amdgpu_bo_unreserve(robj);
 512out:
 513        drm_gem_object_put(gobj);
 514        return r;
 515}
 516
 517/**
 518 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 519 *
 520 * @adev: amdgpu_device pointer
 521 * @vm: vm to update
 522 * @bo_va: bo_va to update
 523 * @operation: map, unmap or clear
 524 *
 525 * Update the bo_va directly after setting its address. Errors are not
 526 * vital here, so they are not reported back to userspace.
 527 */
 528static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 529                                    struct amdgpu_vm *vm,
 530                                    struct amdgpu_bo_va *bo_va,
 531                                    uint32_t operation)
 532{
 533        int r;
 534
 535        if (!amdgpu_vm_ready(vm))
 536                return;
 537
 538        r = amdgpu_vm_clear_freed(adev, vm, NULL);
 539        if (r)
 540                goto error;
 541
 542        if (operation == AMDGPU_VA_OP_MAP ||
 543            operation == AMDGPU_VA_OP_REPLACE) {
 544                r = amdgpu_vm_bo_update(adev, bo_va, false);
 545                if (r)
 546                        goto error;
 547        }
 548
 549        r = amdgpu_vm_update_pdes(adev, vm, false);
 550
 551error:
 552        if (r && r != -ERESTARTSYS)
 553                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 554}
 555
 556/**
 557 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
 558 *
 559 * @adev: amdgpu_device pointer
 560 * @flags: GEM UAPI flags
 561 *
 562 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
 563 */
 564uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
 565{
 566        uint64_t pte_flag = 0;
 567
 568        if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 569                pte_flag |= AMDGPU_PTE_EXECUTABLE;
 570        if (flags & AMDGPU_VM_PAGE_READABLE)
 571                pte_flag |= AMDGPU_PTE_READABLE;
 572        if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 573                pte_flag |= AMDGPU_PTE_WRITEABLE;
 574        if (flags & AMDGPU_VM_PAGE_PRT)
 575                pte_flag |= AMDGPU_PTE_PRT;
 576
 577        if (adev->gmc.gmc_funcs->map_mtype)
 578                pte_flag |= amdgpu_gmc_map_mtype(adev,
 579                                                 flags & AMDGPU_VM_MTYPE_MASK);
 580
 581        return pte_flag;
 582}
 583
 584int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 585                          struct drm_file *filp)
 586{
 587        const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
 588                AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
 589                AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
 590        const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
 591                AMDGPU_VM_PAGE_PRT;
 592
 593        struct drm_amdgpu_gem_va *args = data;
 594        struct drm_gem_object *gobj;
 595        struct amdgpu_device *adev = drm_to_adev(dev);
 596        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 597        struct amdgpu_bo *abo;
 598        struct amdgpu_bo_va *bo_va;
 599        struct amdgpu_bo_list_entry vm_pd;
 600        struct ttm_validate_buffer tv;
 601        struct ww_acquire_ctx ticket;
 602        struct list_head list, duplicates;
 603        uint64_t va_flags;
 604        uint64_t vm_size;
 605        int r = 0;
 606
 607        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
 608                dev_dbg(&dev->pdev->dev,
 609                        "va_address 0x%LX is in reserved area 0x%LX\n",
 610                        args->va_address, AMDGPU_VA_RESERVED_SIZE);
 611                return -EINVAL;
 612        }
 613
 614        if (args->va_address >= AMDGPU_GMC_HOLE_START &&
 615            args->va_address < AMDGPU_GMC_HOLE_END) {
 616                dev_dbg(&dev->pdev->dev,
 617                        "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
 618                        args->va_address, AMDGPU_GMC_HOLE_START,
 619                        AMDGPU_GMC_HOLE_END);
 620                return -EINVAL;
 621        }
 622
 623        args->va_address &= AMDGPU_GMC_HOLE_MASK;
 624
 625        vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 626        vm_size -= AMDGPU_VA_RESERVED_SIZE;
 627        if (args->va_address + args->map_size > vm_size) {
 628                dev_dbg(&dev->pdev->dev,
 629                        "va_address 0x%llx is in top reserved area 0x%llx\n",
 630                        args->va_address + args->map_size, vm_size);
 631                return -EINVAL;
 632        }
 633
 634        if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
 635                dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
 636                        args->flags);
 637                return -EINVAL;
 638        }
 639
 640        switch (args->operation) {
 641        case AMDGPU_VA_OP_MAP:
 642        case AMDGPU_VA_OP_UNMAP:
 643        case AMDGPU_VA_OP_CLEAR:
 644        case AMDGPU_VA_OP_REPLACE:
 645                break;
 646        default:
 647                dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
 648                        args->operation);
 649                return -EINVAL;
 650        }
 651
 652        INIT_LIST_HEAD(&list);
 653        INIT_LIST_HEAD(&duplicates);
 654        if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 655            !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 656                gobj = drm_gem_object_lookup(filp, args->handle);
 657                if (gobj == NULL)
 658                        return -ENOENT;
 659                abo = gem_to_amdgpu_bo(gobj);
 660                tv.bo = &abo->tbo;
 661                if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 662                        tv.num_shared = 1;
 663                else
 664                        tv.num_shared = 0;
 665                list_add(&tv.head, &list);
 666        } else {
 667                gobj = NULL;
 668                abo = NULL;
 669        }
 670
 671        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 672
 673        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 674        if (r)
 675                goto error_unref;
 676
 677        if (abo) {
 678                bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 679                if (!bo_va) {
 680                        r = -ENOENT;
 681                        goto error_backoff;
 682                }
 683        } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 684                bo_va = fpriv->prt_va;
 685        } else {
 686                bo_va = NULL;
 687        }
 688
 689        switch (args->operation) {
 690        case AMDGPU_VA_OP_MAP:
 691                va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 692                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 693                                     args->offset_in_bo, args->map_size,
 694                                     va_flags);
 695                break;
 696        case AMDGPU_VA_OP_UNMAP:
 697                r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 698                break;
 699
 700        case AMDGPU_VA_OP_CLEAR:
 701                r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
 702                                                args->va_address,
 703                                                args->map_size);
 704                break;
 705        case AMDGPU_VA_OP_REPLACE:
 706                va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 707                r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
 708                                             args->offset_in_bo, args->map_size,
 709                                             va_flags);
 710                break;
 711        default:
 712                break;
 713        }
 714        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
 715                amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 716                                        args->operation);
 717
 718error_backoff:
 719        ttm_eu_backoff_reservation(&ticket, &list);
 720
 721error_unref:
 722        drm_gem_object_put(gobj);
 723        return r;
 724}
 725
 726int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 727                        struct drm_file *filp)
 728{
 729        struct amdgpu_device *adev = drm_to_adev(dev);
 730        struct drm_amdgpu_gem_op *args = data;
 731        struct drm_gem_object *gobj;
 732        struct amdgpu_vm_bo_base *base;
 733        struct amdgpu_bo *robj;
 734        int r;
 735
 736        gobj = drm_gem_object_lookup(filp, args->handle);
 737        if (gobj == NULL) {
 738                return -ENOENT;
 739        }
 740        robj = gem_to_amdgpu_bo(gobj);
 741
 742        r = amdgpu_bo_reserve(robj, false);
 743        if (unlikely(r))
 744                goto out;
 745
 746        switch (args->op) {
 747        case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 748                struct drm_amdgpu_gem_create_in info;
 749                void __user *out = u64_to_user_ptr(args->value);
 750
 751                info.bo_size = robj->tbo.base.size;
 752                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 753                info.domains = robj->preferred_domains;
 754                info.domain_flags = robj->flags;
 755                amdgpu_bo_unreserve(robj);
 756                if (copy_to_user(out, &info, sizeof(info)))
 757                        r = -EFAULT;
 758                break;
 759        }
 760        case AMDGPU_GEM_OP_SET_PLACEMENT:
 761                if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
 762                        r = -EINVAL;
 763                        amdgpu_bo_unreserve(robj);
 764                        break;
 765                }
 766                if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 767                        r = -EPERM;
 768                        amdgpu_bo_unreserve(robj);
 769                        break;
 770                }
 771                for (base = robj->vm_bo; base; base = base->next)
 772                        if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
 773                                amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
 774                                r = -EINVAL;
 775                                amdgpu_bo_unreserve(robj);
 776                                goto out;
 777                        }
 778
 779
 780                robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 781                                                        AMDGPU_GEM_DOMAIN_GTT |
 782                                                        AMDGPU_GEM_DOMAIN_CPU);
 783                robj->allowed_domains = robj->preferred_domains;
 784                if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 785                        robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 786
 787                if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 788                        amdgpu_vm_bo_invalidate(adev, robj, true);
 789
 790                amdgpu_bo_unreserve(robj);
 791                break;
 792        default:
 793                amdgpu_bo_unreserve(robj);
 794                r = -EINVAL;
 795        }
 796
 797out:
 798        drm_gem_object_put(gobj);
 799        return r;
 800}
 801
 802int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 803                            struct drm_device *dev,
 804                            struct drm_mode_create_dumb *args)
 805{
 806        struct amdgpu_device *adev = drm_to_adev(dev);
 807        struct drm_gem_object *gobj;
 808        uint32_t handle;
 809        u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 810                    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 811        u32 domain;
 812        int r;
 813
 814        /*
 815         * The buffer returned from this function should be cleared, but
 816         * it can only be done if the ring is enabled or we'll fail to
 817         * create the buffer.
 818         */
 819        if (adev->mman.buffer_funcs_enabled)
 820                flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
 821
 822        args->pitch = amdgpu_align_pitch(adev, args->width,
 823                                         DIV_ROUND_UP(args->bpp, 8), 0);
 824        args->size = (u64)args->pitch * args->height;
 825        args->size = ALIGN(args->size, PAGE_SIZE);
 826        domain = amdgpu_bo_get_preferred_pin_domain(adev,
 827                                amdgpu_display_supported_domains(adev, flags));
 828        r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
 829                                     ttm_bo_type_device, NULL, &gobj);
 830        if (r)
 831                return -ENOMEM;
 832
 833        r = drm_gem_handle_create(file_priv, gobj, &handle);
 834        /* drop reference from allocate - handle holds it now */
 835        drm_gem_object_put(gobj);
 836        if (r) {
 837                return r;
 838        }
 839        args->handle = handle;
 840        return 0;
 841}
 842
 843#if defined(CONFIG_DEBUG_FS)
 844
 845#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)   \
 846        if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
 847                seq_printf((m), " " #flag);             \
 848        }
 849
 850static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 851{
 852        struct drm_gem_object *gobj = ptr;
 853        struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 854        struct seq_file *m = data;
 855
 856        struct dma_buf_attachment *attachment;
 857        struct dma_buf *dma_buf;
 858        unsigned domain;
 859        const char *placement;
 860        unsigned pin_count;
 861
 862        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 863        switch (domain) {
 864        case AMDGPU_GEM_DOMAIN_VRAM:
 865                placement = "VRAM";
 866                break;
 867        case AMDGPU_GEM_DOMAIN_GTT:
 868                placement = " GTT";
 869                break;
 870        case AMDGPU_GEM_DOMAIN_CPU:
 871        default:
 872                placement = " CPU";
 873                break;
 874        }
 875        seq_printf(m, "\t0x%08x: %12ld byte %s",
 876                   id, amdgpu_bo_size(bo), placement);
 877
 878        pin_count = READ_ONCE(bo->pin_count);
 879        if (pin_count)
 880                seq_printf(m, " pin count %d", pin_count);
 881
 882        dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
 883        attachment = READ_ONCE(bo->tbo.base.import_attach);
 884
 885        if (attachment)
 886                seq_printf(m, " imported from %p%s", dma_buf,
 887                           attachment->peer2peer ? " P2P" : "");
 888        else if (dma_buf)
 889                seq_printf(m, " exported as %p", dma_buf);
 890
 891        amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
 892        amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
 893        amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
 894        amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
 895        amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
 896        amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
 897        amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
 898        amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
 899
 900        seq_printf(m, "\n");
 901
 902        return 0;
 903}
 904
 905static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 906{
 907        struct drm_info_node *node = (struct drm_info_node *)m->private;
 908        struct drm_device *dev = node->minor->dev;
 909        struct drm_file *file;
 910        int r;
 911
 912        r = mutex_lock_interruptible(&dev->filelist_mutex);
 913        if (r)
 914                return r;
 915
 916        list_for_each_entry(file, &dev->filelist, lhead) {
 917                struct task_struct *task;
 918
 919                /*
 920                 * Although we have a valid reference on file->pid, that does
 921                 * not guarantee that the task_struct who called get_pid() is
 922                 * still alive (e.g. get_pid(current) => fork() => exit()).
 923                 * Therefore, we need to protect this ->comm access using RCU.
 924                 */
 925                rcu_read_lock();
 926                task = pid_task(file->pid, PIDTYPE_PID);
 927                seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
 928                           task ? task->comm : "<unknown>");
 929                rcu_read_unlock();
 930
 931                spin_lock(&file->table_lock);
 932                idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 933                spin_unlock(&file->table_lock);
 934        }
 935
 936        mutex_unlock(&dev->filelist_mutex);
 937        return 0;
 938}
 939
 940static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
 941        {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
 942};
 943#endif
 944
 945int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
 946{
 947#if defined(CONFIG_DEBUG_FS)
 948        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
 949                                        ARRAY_SIZE(amdgpu_debugfs_gem_list));
 950#endif
 951        return 0;
 952}
 953