linux/drivers/gpu/drm/nouveau/nouveau_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <subdev/fb.h>
  28
  29#include "nouveau_drm.h"
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36
  37int
  38nouveau_gem_object_new(struct drm_gem_object *gem)
  39{
  40        return 0;
  41}
  42
  43void
  44nouveau_gem_object_del(struct drm_gem_object *gem)
  45{
  46        struct nouveau_bo *nvbo = gem->driver_private;
  47        struct ttm_buffer_object *bo = &nvbo->bo;
  48
  49        if (!nvbo)
  50                return;
  51        nvbo->gem = NULL;
  52
  53        if (gem->import_attach)
  54                drm_prime_gem_destroy(gem, nvbo->bo.sg);
  55
  56        ttm_bo_unref(&bo);
  57
  58        drm_gem_object_release(gem);
  59        kfree(gem);
  60}
  61
  62int
  63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
  64{
  65        struct nouveau_cli *cli = nouveau_cli(file_priv);
  66        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  67        struct nouveau_vma *vma;
  68        int ret;
  69
  70        if (!cli->base.vm)
  71                return 0;
  72
  73        ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  74        if (ret)
  75                return ret;
  76
  77        vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
  78        if (!vma) {
  79                vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  80                if (!vma) {
  81                        ret = -ENOMEM;
  82                        goto out;
  83                }
  84
  85                ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
  86                if (ret) {
  87                        kfree(vma);
  88                        goto out;
  89                }
  90        } else {
  91                vma->refcount++;
  92        }
  93
  94out:
  95        ttm_bo_unreserve(&nvbo->bo);
  96        return ret;
  97}
  98
  99static void
 100nouveau_gem_object_delete(void *data)
 101{
 102        struct nouveau_vma *vma = data;
 103        nouveau_vm_unmap(vma);
 104        nouveau_vm_put(vma);
 105        kfree(vma);
 106}
 107
 108static void
 109nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 110{
 111        const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 112        struct nouveau_fence *fence = NULL;
 113
 114        list_del(&vma->head);
 115
 116        if (mapped) {
 117                spin_lock(&nvbo->bo.bdev->fence_lock);
 118                if (nvbo->bo.sync_obj)
 119                        fence = nouveau_fence_ref(nvbo->bo.sync_obj);
 120                spin_unlock(&nvbo->bo.bdev->fence_lock);
 121        }
 122
 123        if (fence) {
 124                nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
 125        } else {
 126                if (mapped)
 127                        nouveau_vm_unmap(vma);
 128                nouveau_vm_put(vma);
 129                kfree(vma);
 130        }
 131        nouveau_fence_unref(&fence);
 132}
 133
 134void
 135nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 136{
 137        struct nouveau_cli *cli = nouveau_cli(file_priv);
 138        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 139        struct nouveau_vma *vma;
 140        int ret;
 141
 142        if (!cli->base.vm)
 143                return;
 144
 145        ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 146        if (ret)
 147                return;
 148
 149        vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
 150        if (vma) {
 151                if (--vma->refcount == 0)
 152                        nouveau_gem_object_unmap(nvbo, vma);
 153        }
 154        ttm_bo_unreserve(&nvbo->bo);
 155}
 156
 157int
 158nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
 159                uint32_t tile_mode, uint32_t tile_flags,
 160                struct nouveau_bo **pnvbo)
 161{
 162        struct nouveau_drm *drm = nouveau_drm(dev);
 163        struct nouveau_bo *nvbo;
 164        u32 flags = 0;
 165        int ret;
 166
 167        if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
 168                flags |= TTM_PL_FLAG_VRAM;
 169        if (domain & NOUVEAU_GEM_DOMAIN_GART)
 170                flags |= TTM_PL_FLAG_TT;
 171        if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
 172                flags |= TTM_PL_FLAG_SYSTEM;
 173
 174        ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
 175                             tile_flags, NULL, pnvbo);
 176        if (ret)
 177                return ret;
 178        nvbo = *pnvbo;
 179
 180        /* we restrict allowed domains on nv50+ to only the types
 181         * that were requested at creation time.  not possibly on
 182         * earlier chips without busting the ABI.
 183         */
 184        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 185                              NOUVEAU_GEM_DOMAIN_GART;
 186        if (nv_device(drm->device)->card_type >= NV_50)
 187                nvbo->valid_domains &= domain;
 188
 189        nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
 190        if (!nvbo->gem) {
 191                nouveau_bo_ref(NULL, pnvbo);
 192                return -ENOMEM;
 193        }
 194
 195        nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
 196        nvbo->gem->driver_private = nvbo;
 197        return 0;
 198}
 199
 200static int
 201nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 202                 struct drm_nouveau_gem_info *rep)
 203{
 204        struct nouveau_cli *cli = nouveau_cli(file_priv);
 205        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 206        struct nouveau_vma *vma;
 207
 208        if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 209                rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 210        else
 211                rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 212
 213        rep->offset = nvbo->bo.offset;
 214        if (cli->base.vm) {
 215                vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
 216                if (!vma)
 217                        return -EINVAL;
 218
 219                rep->offset = vma->offset;
 220        }
 221
 222        rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 223        rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
 224        rep->tile_mode = nvbo->tile_mode;
 225        rep->tile_flags = nvbo->tile_flags;
 226        return 0;
 227}
 228
 229int
 230nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 231                      struct drm_file *file_priv)
 232{
 233        struct nouveau_drm *drm = nouveau_drm(dev);
 234        struct nouveau_cli *cli = nouveau_cli(file_priv);
 235        struct nouveau_fb *pfb = nouveau_fb(drm->device);
 236        struct drm_nouveau_gem_new *req = data;
 237        struct nouveau_bo *nvbo = NULL;
 238        int ret = 0;
 239
 240        drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
 241
 242        if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
 243                NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
 244                return -EINVAL;
 245        }
 246
 247        ret = nouveau_gem_new(dev, req->info.size, req->align,
 248                              req->info.domain, req->info.tile_mode,
 249                              req->info.tile_flags, &nvbo);
 250        if (ret)
 251                return ret;
 252
 253        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
 254        if (ret == 0) {
 255                ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
 256                if (ret)
 257                        drm_gem_handle_delete(file_priv, req->info.handle);
 258        }
 259
 260        /* drop reference from allocate - handle holds it now */
 261        drm_gem_object_unreference_unlocked(nvbo->gem);
 262        return ret;
 263}
 264
 265static int
 266nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 267                       uint32_t write_domains, uint32_t valid_domains)
 268{
 269        struct nouveau_bo *nvbo = gem->driver_private;
 270        struct ttm_buffer_object *bo = &nvbo->bo;
 271        uint32_t domains = valid_domains & nvbo->valid_domains &
 272                (write_domains ? write_domains : read_domains);
 273        uint32_t pref_flags = 0, valid_flags = 0;
 274
 275        if (!domains)
 276                return -EINVAL;
 277
 278        if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 279                valid_flags |= TTM_PL_FLAG_VRAM;
 280
 281        if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 282                valid_flags |= TTM_PL_FLAG_TT;
 283
 284        if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 285            bo->mem.mem_type == TTM_PL_VRAM)
 286                pref_flags |= TTM_PL_FLAG_VRAM;
 287
 288        else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 289                 bo->mem.mem_type == TTM_PL_TT)
 290                pref_flags |= TTM_PL_FLAG_TT;
 291
 292        else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 293                pref_flags |= TTM_PL_FLAG_VRAM;
 294
 295        else
 296                pref_flags |= TTM_PL_FLAG_TT;
 297
 298        nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
 299
 300        return 0;
 301}
 302
 303struct validate_op {
 304        struct list_head vram_list;
 305        struct list_head gart_list;
 306        struct list_head both_list;
 307        struct ww_acquire_ctx ticket;
 308};
 309
 310static void
 311validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
 312                   struct ww_acquire_ctx *ticket)
 313{
 314        struct list_head *entry, *tmp;
 315        struct nouveau_bo *nvbo;
 316
 317        list_for_each_safe(entry, tmp, list) {
 318                nvbo = list_entry(entry, struct nouveau_bo, entry);
 319
 320                nouveau_bo_fence(nvbo, fence);
 321
 322                if (unlikely(nvbo->validate_mapped)) {
 323                        ttm_bo_kunmap(&nvbo->kmap);
 324                        nvbo->validate_mapped = false;
 325                }
 326
 327                list_del(&nvbo->entry);
 328                nvbo->reserved_by = NULL;
 329                ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
 330                drm_gem_object_unreference_unlocked(nvbo->gem);
 331        }
 332}
 333
 334static void
 335validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
 336{
 337        validate_fini_list(&op->vram_list, fence, &op->ticket);
 338        validate_fini_list(&op->gart_list, fence, &op->ticket);
 339        validate_fini_list(&op->both_list, fence, &op->ticket);
 340}
 341
 342static void
 343validate_fini(struct validate_op *op, struct nouveau_fence *fence)
 344{
 345        validate_fini_no_ticket(op, fence);
 346        ww_acquire_fini(&op->ticket);
 347}
 348
 349static int
 350validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 351              struct drm_nouveau_gem_pushbuf_bo *pbbo,
 352              int nr_buffers, struct validate_op *op)
 353{
 354        struct nouveau_cli *cli = nouveau_cli(file_priv);
 355        struct drm_device *dev = chan->drm->dev;
 356        int trycnt = 0;
 357        int ret, i;
 358        struct nouveau_bo *res_bo = NULL;
 359
 360        ww_acquire_init(&op->ticket, &reservation_ww_class);
 361retry:
 362        if (++trycnt > 100000) {
 363                NV_ERROR(cli, "%s failed and gave up.\n", __func__);
 364                return -EINVAL;
 365        }
 366
 367        for (i = 0; i < nr_buffers; i++) {
 368                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 369                struct drm_gem_object *gem;
 370                struct nouveau_bo *nvbo;
 371
 372                gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 373                if (!gem) {
 374                        NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
 375                        ww_acquire_done(&op->ticket);
 376                        validate_fini(op, NULL);
 377                        return -ENOENT;
 378                }
 379                nvbo = gem->driver_private;
 380                if (nvbo == res_bo) {
 381                        res_bo = NULL;
 382                        drm_gem_object_unreference_unlocked(gem);
 383                        continue;
 384                }
 385
 386                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 387                        NV_ERROR(cli, "multiple instances of buffer %d on "
 388                                      "validation list\n", b->handle);
 389                        drm_gem_object_unreference_unlocked(gem);
 390                        ww_acquire_done(&op->ticket);
 391                        validate_fini(op, NULL);
 392                        return -EINVAL;
 393                }
 394
 395                ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
 396                if (ret) {
 397                        validate_fini_no_ticket(op, NULL);
 398                        if (unlikely(ret == -EDEADLK)) {
 399                                ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 400                                                              &op->ticket);
 401                                if (!ret)
 402                                        res_bo = nvbo;
 403                        }
 404                        if (unlikely(ret)) {
 405                                ww_acquire_done(&op->ticket);
 406                                ww_acquire_fini(&op->ticket);
 407                                drm_gem_object_unreference_unlocked(gem);
 408                                if (ret != -ERESTARTSYS)
 409                                        NV_ERROR(cli, "fail reserve\n");
 410                                return ret;
 411                        }
 412                }
 413
 414                b->user_priv = (uint64_t)(unsigned long)nvbo;
 415                nvbo->reserved_by = file_priv;
 416                nvbo->pbbo_index = i;
 417                if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 418                    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 419                        list_add_tail(&nvbo->entry, &op->both_list);
 420                else
 421                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 422                        list_add_tail(&nvbo->entry, &op->vram_list);
 423                else
 424                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 425                        list_add_tail(&nvbo->entry, &op->gart_list);
 426                else {
 427                        NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
 428                                 b->valid_domains);
 429                        list_add_tail(&nvbo->entry, &op->both_list);
 430                        ww_acquire_done(&op->ticket);
 431                        validate_fini(op, NULL);
 432                        return -EINVAL;
 433                }
 434                if (nvbo == res_bo)
 435                        goto retry;
 436        }
 437
 438        ww_acquire_done(&op->ticket);
 439        return 0;
 440}
 441
 442static int
 443validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
 444{
 445        struct nouveau_fence *fence = NULL;
 446        int ret = 0;
 447
 448        spin_lock(&nvbo->bo.bdev->fence_lock);
 449        if (nvbo->bo.sync_obj)
 450                fence = nouveau_fence_ref(nvbo->bo.sync_obj);
 451        spin_unlock(&nvbo->bo.bdev->fence_lock);
 452
 453        if (fence) {
 454                ret = nouveau_fence_sync(fence, chan);
 455                nouveau_fence_unref(&fence);
 456        }
 457
 458        return ret;
 459}
 460
 461static int
 462validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 463              struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
 464              uint64_t user_pbbo_ptr)
 465{
 466        struct nouveau_drm *drm = chan->drm;
 467        struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 468                                (void __force __user *)(uintptr_t)user_pbbo_ptr;
 469        struct nouveau_bo *nvbo;
 470        int ret, relocs = 0;
 471
 472        list_for_each_entry(nvbo, list, entry) {
 473                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 474
 475                ret = validate_sync(chan, nvbo);
 476                if (unlikely(ret)) {
 477                        NV_ERROR(cli, "fail pre-validate sync\n");
 478                        return ret;
 479                }
 480
 481                ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
 482                                             b->write_domains,
 483                                             b->valid_domains);
 484                if (unlikely(ret)) {
 485                        NV_ERROR(cli, "fail set_domain\n");
 486                        return ret;
 487                }
 488
 489                ret = nouveau_bo_validate(nvbo, true, false);
 490                if (unlikely(ret)) {
 491                        if (ret != -ERESTARTSYS)
 492                                NV_ERROR(cli, "fail ttm_validate\n");
 493                        return ret;
 494                }
 495
 496                ret = validate_sync(chan, nvbo);
 497                if (unlikely(ret)) {
 498                        NV_ERROR(cli, "fail post-validate sync\n");
 499                        return ret;
 500                }
 501
 502                if (nv_device(drm->device)->card_type < NV_50) {
 503                        if (nvbo->bo.offset == b->presumed.offset &&
 504                            ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
 505                              b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 506                             (nvbo->bo.mem.mem_type == TTM_PL_TT &&
 507                              b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 508                                continue;
 509
 510                        if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 511                                b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 512                        else
 513                                b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 514                        b->presumed.offset = nvbo->bo.offset;
 515                        b->presumed.valid = 0;
 516                        relocs++;
 517
 518                        if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
 519                                             &b->presumed, sizeof(b->presumed)))
 520                                return -EFAULT;
 521                }
 522        }
 523
 524        return relocs;
 525}
 526
 527static int
 528nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 529                             struct drm_file *file_priv,
 530                             struct drm_nouveau_gem_pushbuf_bo *pbbo,
 531                             uint64_t user_buffers, int nr_buffers,
 532                             struct validate_op *op, int *apply_relocs)
 533{
 534        struct nouveau_cli *cli = nouveau_cli(file_priv);
 535        int ret, relocs = 0;
 536
 537        INIT_LIST_HEAD(&op->vram_list);
 538        INIT_LIST_HEAD(&op->gart_list);
 539        INIT_LIST_HEAD(&op->both_list);
 540
 541        if (nr_buffers == 0)
 542                return 0;
 543
 544        ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 545        if (unlikely(ret)) {
 546                if (ret != -ERESTARTSYS)
 547                        NV_ERROR(cli, "validate_init\n");
 548                return ret;
 549        }
 550
 551        ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
 552        if (unlikely(ret < 0)) {
 553                if (ret != -ERESTARTSYS)
 554                        NV_ERROR(cli, "validate vram_list\n");
 555                validate_fini(op, NULL);
 556                return ret;
 557        }
 558        relocs += ret;
 559
 560        ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
 561        if (unlikely(ret < 0)) {
 562                if (ret != -ERESTARTSYS)
 563                        NV_ERROR(cli, "validate gart_list\n");
 564                validate_fini(op, NULL);
 565                return ret;
 566        }
 567        relocs += ret;
 568
 569        ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
 570        if (unlikely(ret < 0)) {
 571                if (ret != -ERESTARTSYS)
 572                        NV_ERROR(cli, "validate both_list\n");
 573                validate_fini(op, NULL);
 574                return ret;
 575        }
 576        relocs += ret;
 577
 578        *apply_relocs = relocs;
 579        return 0;
 580}
 581
 582static inline void
 583u_free(void *addr)
 584{
 585        if (!is_vmalloc_addr(addr))
 586                kfree(addr);
 587        else
 588                vfree(addr);
 589}
 590
 591static inline void *
 592u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 593{
 594        void *mem;
 595        void __user *userptr = (void __force __user *)(uintptr_t)user;
 596
 597        size *= nmemb;
 598
 599        mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
 600        if (!mem)
 601                mem = vmalloc(size);
 602        if (!mem)
 603                return ERR_PTR(-ENOMEM);
 604
 605        if (DRM_COPY_FROM_USER(mem, userptr, size)) {
 606                u_free(mem);
 607                return ERR_PTR(-EFAULT);
 608        }
 609
 610        return mem;
 611}
 612
 613static int
 614nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 615                                struct drm_nouveau_gem_pushbuf *req,
 616                                struct drm_nouveau_gem_pushbuf_bo *bo)
 617{
 618        struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 619        int ret = 0;
 620        unsigned i;
 621
 622        reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 623        if (IS_ERR(reloc))
 624                return PTR_ERR(reloc);
 625
 626        for (i = 0; i < req->nr_relocs; i++) {
 627                struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 628                struct drm_nouveau_gem_pushbuf_bo *b;
 629                struct nouveau_bo *nvbo;
 630                uint32_t data;
 631
 632                if (unlikely(r->bo_index > req->nr_buffers)) {
 633                        NV_ERROR(cli, "reloc bo index invalid\n");
 634                        ret = -EINVAL;
 635                        break;
 636                }
 637
 638                b = &bo[r->bo_index];
 639                if (b->presumed.valid)
 640                        continue;
 641
 642                if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
 643                        NV_ERROR(cli, "reloc container bo index invalid\n");
 644                        ret = -EINVAL;
 645                        break;
 646                }
 647                nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 648
 649                if (unlikely(r->reloc_bo_offset + 4 >
 650                             nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
 651                        NV_ERROR(cli, "reloc outside of bo\n");
 652                        ret = -EINVAL;
 653                        break;
 654                }
 655
 656                if (!nvbo->kmap.virtual) {
 657                        ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 658                                          &nvbo->kmap);
 659                        if (ret) {
 660                                NV_ERROR(cli, "failed kmap for reloc\n");
 661                                break;
 662                        }
 663                        nvbo->validate_mapped = true;
 664                }
 665
 666                if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 667                        data = b->presumed.offset + r->data;
 668                else
 669                if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 670                        data = (b->presumed.offset + r->data) >> 32;
 671                else
 672                        data = r->data;
 673
 674                if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 675                        if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 676                                data |= r->tor;
 677                        else
 678                                data |= r->vor;
 679                }
 680
 681                spin_lock(&nvbo->bo.bdev->fence_lock);
 682                ret = ttm_bo_wait(&nvbo->bo, false, false, false);
 683                spin_unlock(&nvbo->bo.bdev->fence_lock);
 684                if (ret) {
 685                        NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
 686                        break;
 687                }
 688
 689                nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 690        }
 691
 692        u_free(reloc);
 693        return ret;
 694}
 695
 696int
 697nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 698                          struct drm_file *file_priv)
 699{
 700        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 701        struct nouveau_cli *cli = nouveau_cli(file_priv);
 702        struct nouveau_abi16_chan *temp;
 703        struct nouveau_drm *drm = nouveau_drm(dev);
 704        struct drm_nouveau_gem_pushbuf *req = data;
 705        struct drm_nouveau_gem_pushbuf_push *push;
 706        struct drm_nouveau_gem_pushbuf_bo *bo;
 707        struct nouveau_channel *chan = NULL;
 708        struct validate_op op;
 709        struct nouveau_fence *fence = NULL;
 710        int i, j, ret = 0, do_reloc = 0;
 711
 712        if (unlikely(!abi16))
 713                return -ENOMEM;
 714
 715        list_for_each_entry(temp, &abi16->channels, head) {
 716                if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
 717                        chan = temp->chan;
 718                        break;
 719                }
 720        }
 721
 722        if (!chan)
 723                return nouveau_abi16_put(abi16, -ENOENT);
 724
 725        req->vram_available = drm->gem.vram_available;
 726        req->gart_available = drm->gem.gart_available;
 727        if (unlikely(req->nr_push == 0))
 728                goto out_next;
 729
 730        if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 731                NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
 732                         req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 733                return nouveau_abi16_put(abi16, -EINVAL);
 734        }
 735
 736        if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 737                NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
 738                         req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 739                return nouveau_abi16_put(abi16, -EINVAL);
 740        }
 741
 742        if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 743                NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 744                         req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 745                return nouveau_abi16_put(abi16, -EINVAL);
 746        }
 747
 748        push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 749        if (IS_ERR(push))
 750                return nouveau_abi16_put(abi16, PTR_ERR(push));
 751
 752        bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 753        if (IS_ERR(bo)) {
 754                u_free(push);
 755                return nouveau_abi16_put(abi16, PTR_ERR(bo));
 756        }
 757
 758        /* Ensure all push buffers are on validate list */
 759        for (i = 0; i < req->nr_push; i++) {
 760                if (push[i].bo_index >= req->nr_buffers) {
 761                        NV_ERROR(cli, "push %d buffer not in list\n", i);
 762                        ret = -EINVAL;
 763                        goto out_prevalid;
 764                }
 765        }
 766
 767        /* Validate buffer list */
 768        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 769                                           req->nr_buffers, &op, &do_reloc);
 770        if (ret) {
 771                if (ret != -ERESTARTSYS)
 772                        NV_ERROR(cli, "validate: %d\n", ret);
 773                goto out_prevalid;
 774        }
 775
 776        /* Apply any relocations that are required */
 777        if (do_reloc) {
 778                ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 779                if (ret) {
 780                        NV_ERROR(cli, "reloc apply: %d\n", ret);
 781                        goto out;
 782                }
 783        }
 784
 785        if (chan->dma.ib_max) {
 786                ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 787                if (ret) {
 788                        NV_ERROR(cli, "nv50cal_space: %d\n", ret);
 789                        goto out;
 790                }
 791
 792                for (i = 0; i < req->nr_push; i++) {
 793                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 794                                bo[push[i].bo_index].user_priv;
 795
 796                        nv50_dma_push(chan, nvbo, push[i].offset,
 797                                      push[i].length);
 798                }
 799        } else
 800        if (nv_device(drm->device)->chipset >= 0x25) {
 801                ret = RING_SPACE(chan, req->nr_push * 2);
 802                if (ret) {
 803                        NV_ERROR(cli, "cal_space: %d\n", ret);
 804                        goto out;
 805                }
 806
 807                for (i = 0; i < req->nr_push; i++) {
 808                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 809                                bo[push[i].bo_index].user_priv;
 810
 811                        OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
 812                        OUT_RING(chan, 0);
 813                }
 814        } else {
 815                ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 816                if (ret) {
 817                        NV_ERROR(cli, "jmp_space: %d\n", ret);
 818                        goto out;
 819                }
 820
 821                for (i = 0; i < req->nr_push; i++) {
 822                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 823                                bo[push[i].bo_index].user_priv;
 824                        uint32_t cmd;
 825
 826                        cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
 827                        cmd |= 0x20000000;
 828                        if (unlikely(cmd != req->suffix0)) {
 829                                if (!nvbo->kmap.virtual) {
 830                                        ret = ttm_bo_kmap(&nvbo->bo, 0,
 831                                                          nvbo->bo.mem.
 832                                                          num_pages,
 833                                                          &nvbo->kmap);
 834                                        if (ret) {
 835                                                WIND_RING(chan);
 836                                                goto out;
 837                                        }
 838                                        nvbo->validate_mapped = true;
 839                                }
 840
 841                                nouveau_bo_wr32(nvbo, (push[i].offset +
 842                                                push[i].length - 8) / 4, cmd);
 843                        }
 844
 845                        OUT_RING(chan, 0x20000000 |
 846                                      (nvbo->bo.offset + push[i].offset));
 847                        OUT_RING(chan, 0);
 848                        for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 849                                OUT_RING(chan, 0);
 850                }
 851        }
 852
 853        ret = nouveau_fence_new(chan, false, &fence);
 854        if (ret) {
 855                NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
 856                WIND_RING(chan);
 857                goto out;
 858        }
 859
 860out:
 861        validate_fini(&op, fence);
 862        nouveau_fence_unref(&fence);
 863
 864out_prevalid:
 865        u_free(bo);
 866        u_free(push);
 867
 868out_next:
 869        if (chan->dma.ib_max) {
 870                req->suffix0 = 0x00000000;
 871                req->suffix1 = 0x00000000;
 872        } else
 873        if (nv_device(drm->device)->chipset >= 0x25) {
 874                req->suffix0 = 0x00020000;
 875                req->suffix1 = 0x00000000;
 876        } else {
 877                req->suffix0 = 0x20000000 |
 878                              (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
 879                req->suffix1 = 0x00000000;
 880        }
 881
 882        return nouveau_abi16_put(abi16, ret);
 883}
 884
 885static inline uint32_t
 886domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
 887{
 888        uint32_t flags = 0;
 889
 890        if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
 891                flags |= TTM_PL_FLAG_VRAM;
 892        if (domain & NOUVEAU_GEM_DOMAIN_GART)
 893                flags |= TTM_PL_FLAG_TT;
 894
 895        return flags;
 896}
 897
 898int
 899nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 900                           struct drm_file *file_priv)
 901{
 902        struct drm_nouveau_gem_cpu_prep *req = data;
 903        struct drm_gem_object *gem;
 904        struct nouveau_bo *nvbo;
 905        bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 906        int ret = -EINVAL;
 907
 908        gem = drm_gem_object_lookup(dev, file_priv, req->handle);
 909        if (!gem)
 910                return -ENOENT;
 911        nvbo = nouveau_gem_object(gem);
 912
 913        spin_lock(&nvbo->bo.bdev->fence_lock);
 914        ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
 915        spin_unlock(&nvbo->bo.bdev->fence_lock);
 916        drm_gem_object_unreference_unlocked(gem);
 917        return ret;
 918}
 919
 920int
 921nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 922                           struct drm_file *file_priv)
 923{
 924        return 0;
 925}
 926
 927int
 928nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
 929                       struct drm_file *file_priv)
 930{
 931        struct drm_nouveau_gem_info *req = data;
 932        struct drm_gem_object *gem;
 933        int ret;
 934
 935        gem = drm_gem_object_lookup(dev, file_priv, req->handle);
 936        if (!gem)
 937                return -ENOENT;
 938
 939        ret = nouveau_gem_info(file_priv, gem, req);
 940        drm_gem_object_unreference_unlocked(gem);
 941        return ret;
 942}
 943
 944