linux/drivers/gpu/drm/nouveau/nouveau_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26#include "drmP.h"
  27#include "drm.h"
  28
  29#include "nouveau_drv.h"
  30#include "nouveau_drm.h"
  31#include "nouveau_dma.h"
  32
  33#define nouveau_gem_pushbuf_sync(chan) 0
  34
  35int
  36nouveau_gem_object_new(struct drm_gem_object *gem)
  37{
  38        return 0;
  39}
  40
  41void
  42nouveau_gem_object_del(struct drm_gem_object *gem)
  43{
  44        struct nouveau_bo *nvbo = gem->driver_private;
  45        struct ttm_buffer_object *bo = &nvbo->bo;
  46
  47        if (!nvbo)
  48                return;
  49        nvbo->gem = NULL;
  50
  51        if (unlikely(nvbo->pin_refcnt)) {
  52                nvbo->pin_refcnt = 1;
  53                nouveau_bo_unpin(nvbo);
  54        }
  55
  56        ttm_bo_unref(&bo);
  57
  58        drm_gem_object_release(gem);
  59        kfree(gem);
  60}
  61
  62int
  63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
  64                int size, int align, uint32_t domain, uint32_t tile_mode,
  65                uint32_t tile_flags, struct nouveau_bo **pnvbo)
  66{
  67        struct drm_nouveau_private *dev_priv = dev->dev_private;
  68        struct nouveau_bo *nvbo;
  69        u32 flags = 0;
  70        int ret;
  71
  72        if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
  73                flags |= TTM_PL_FLAG_VRAM;
  74        if (domain & NOUVEAU_GEM_DOMAIN_GART)
  75                flags |= TTM_PL_FLAG_TT;
  76        if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
  77                flags |= TTM_PL_FLAG_SYSTEM;
  78
  79        ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
  80                             tile_flags, pnvbo);
  81        if (ret)
  82                return ret;
  83        nvbo = *pnvbo;
  84
  85        /* we restrict allowed domains on nv50+ to only the types
  86         * that were requested at creation time.  not possibly on
  87         * earlier chips without busting the ABI.
  88         */
  89        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
  90                              NOUVEAU_GEM_DOMAIN_GART;
  91        if (dev_priv->card_type >= NV_50)
  92                nvbo->valid_domains &= domain;
  93
  94        nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
  95        if (!nvbo->gem) {
  96                nouveau_bo_ref(NULL, pnvbo);
  97                return -ENOMEM;
  98        }
  99
 100        nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
 101        nvbo->gem->driver_private = nvbo;
 102        return 0;
 103}
 104
 105static int
 106nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
 107{
 108        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 109
 110        if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 111                rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 112        else
 113                rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 114
 115        rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 116        rep->offset = nvbo->bo.offset;
 117        rep->map_handle = nvbo->bo.addr_space_offset;
 118        rep->tile_mode = nvbo->tile_mode;
 119        rep->tile_flags = nvbo->tile_flags;
 120        return 0;
 121}
 122
 123int
 124nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 125                      struct drm_file *file_priv)
 126{
 127        struct drm_nouveau_private *dev_priv = dev->dev_private;
 128        struct drm_nouveau_gem_new *req = data;
 129        struct nouveau_bo *nvbo = NULL;
 130        struct nouveau_channel *chan = NULL;
 131        int ret = 0;
 132
 133        if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
 134                dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
 135
 136        if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
 137                NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
 138                return -EINVAL;
 139        }
 140
 141        if (req->channel_hint) {
 142                chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
 143                if (IS_ERR(chan))
 144                        return PTR_ERR(chan);
 145        }
 146
 147        ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
 148                              req->info.domain, req->info.tile_mode,
 149                              req->info.tile_flags, &nvbo);
 150        if (chan)
 151                nouveau_channel_put(&chan);
 152        if (ret)
 153                return ret;
 154
 155        ret = nouveau_gem_info(nvbo->gem, &req->info);
 156        if (ret)
 157                goto out;
 158
 159        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
 160        /* drop reference from allocate - handle holds it now */
 161        drm_gem_object_unreference_unlocked(nvbo->gem);
 162out:
 163        return ret;
 164}
 165
 166static int
 167nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 168                       uint32_t write_domains, uint32_t valid_domains)
 169{
 170        struct nouveau_bo *nvbo = gem->driver_private;
 171        struct ttm_buffer_object *bo = &nvbo->bo;
 172        uint32_t domains = valid_domains & nvbo->valid_domains &
 173                (write_domains ? write_domains : read_domains);
 174        uint32_t pref_flags = 0, valid_flags = 0;
 175
 176        if (!domains)
 177                return -EINVAL;
 178
 179        if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 180                valid_flags |= TTM_PL_FLAG_VRAM;
 181
 182        if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 183                valid_flags |= TTM_PL_FLAG_TT;
 184
 185        if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 186            bo->mem.mem_type == TTM_PL_VRAM)
 187                pref_flags |= TTM_PL_FLAG_VRAM;
 188
 189        else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 190                 bo->mem.mem_type == TTM_PL_TT)
 191                pref_flags |= TTM_PL_FLAG_TT;
 192
 193        else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 194                pref_flags |= TTM_PL_FLAG_VRAM;
 195
 196        else
 197                pref_flags |= TTM_PL_FLAG_TT;
 198
 199        nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
 200
 201        return 0;
 202}
 203
 204struct validate_op {
 205        struct list_head vram_list;
 206        struct list_head gart_list;
 207        struct list_head both_list;
 208};
 209
 210static void
 211validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
 212{
 213        struct list_head *entry, *tmp;
 214        struct nouveau_bo *nvbo;
 215
 216        list_for_each_safe(entry, tmp, list) {
 217                nvbo = list_entry(entry, struct nouveau_bo, entry);
 218
 219                nouveau_bo_fence(nvbo, fence);
 220
 221                if (unlikely(nvbo->validate_mapped)) {
 222                        ttm_bo_kunmap(&nvbo->kmap);
 223                        nvbo->validate_mapped = false;
 224                }
 225
 226                list_del(&nvbo->entry);
 227                nvbo->reserved_by = NULL;
 228                ttm_bo_unreserve(&nvbo->bo);
 229                drm_gem_object_unreference_unlocked(nvbo->gem);
 230        }
 231}
 232
 233static void
 234validate_fini(struct validate_op *op, struct nouveau_fence* fence)
 235{
 236        validate_fini_list(&op->vram_list, fence);
 237        validate_fini_list(&op->gart_list, fence);
 238        validate_fini_list(&op->both_list, fence);
 239}
 240
 241static int
 242validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 243              struct drm_nouveau_gem_pushbuf_bo *pbbo,
 244              int nr_buffers, struct validate_op *op)
 245{
 246        struct drm_device *dev = chan->dev;
 247        struct drm_nouveau_private *dev_priv = dev->dev_private;
 248        uint32_t sequence;
 249        int trycnt = 0;
 250        int ret, i;
 251
 252        sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
 253retry:
 254        if (++trycnt > 100000) {
 255                NV_ERROR(dev, "%s failed and gave up.\n", __func__);
 256                return -EINVAL;
 257        }
 258
 259        for (i = 0; i < nr_buffers; i++) {
 260                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 261                struct drm_gem_object *gem;
 262                struct nouveau_bo *nvbo;
 263
 264                gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 265                if (!gem) {
 266                        NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
 267                        validate_fini(op, NULL);
 268                        return -ENOENT;
 269                }
 270                nvbo = gem->driver_private;
 271
 272                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 273                        NV_ERROR(dev, "multiple instances of buffer %d on "
 274                                      "validation list\n", b->handle);
 275                        validate_fini(op, NULL);
 276                        return -EINVAL;
 277                }
 278
 279                ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
 280                if (ret) {
 281                        validate_fini(op, NULL);
 282                        if (unlikely(ret == -EAGAIN))
 283                                ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
 284                        drm_gem_object_unreference_unlocked(gem);
 285                        if (unlikely(ret)) {
 286                                if (ret != -ERESTARTSYS)
 287                                        NV_ERROR(dev, "fail reserve\n");
 288                                return ret;
 289                        }
 290                        goto retry;
 291                }
 292
 293                b->user_priv = (uint64_t)(unsigned long)nvbo;
 294                nvbo->reserved_by = file_priv;
 295                nvbo->pbbo_index = i;
 296                if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 297                    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 298                        list_add_tail(&nvbo->entry, &op->both_list);
 299                else
 300                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 301                        list_add_tail(&nvbo->entry, &op->vram_list);
 302                else
 303                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 304                        list_add_tail(&nvbo->entry, &op->gart_list);
 305                else {
 306                        NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
 307                                 b->valid_domains);
 308                        list_add_tail(&nvbo->entry, &op->both_list);
 309                        validate_fini(op, NULL);
 310                        return -EINVAL;
 311                }
 312        }
 313
 314        return 0;
 315}
 316
 317static int
 318validate_list(struct nouveau_channel *chan, struct list_head *list,
 319              struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
 320{
 321        struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 322                                (void __force __user *)(uintptr_t)user_pbbo_ptr;
 323        struct drm_device *dev = chan->dev;
 324        struct nouveau_bo *nvbo;
 325        int ret, relocs = 0;
 326
 327        list_for_each_entry(nvbo, list, entry) {
 328                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 329
 330                ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
 331                if (unlikely(ret)) {
 332                        NV_ERROR(dev, "fail pre-validate sync\n");
 333                        return ret;
 334                }
 335
 336                ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
 337                                             b->write_domains,
 338                                             b->valid_domains);
 339                if (unlikely(ret)) {
 340                        NV_ERROR(dev, "fail set_domain\n");
 341                        return ret;
 342                }
 343
 344                nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
 345                ret = nouveau_bo_validate(nvbo, true, false, false);
 346                nvbo->channel = NULL;
 347                if (unlikely(ret)) {
 348                        if (ret != -ERESTARTSYS)
 349                                NV_ERROR(dev, "fail ttm_validate\n");
 350                        return ret;
 351                }
 352
 353                ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
 354                if (unlikely(ret)) {
 355                        NV_ERROR(dev, "fail post-validate sync\n");
 356                        return ret;
 357                }
 358
 359                if (nvbo->bo.offset == b->presumed.offset &&
 360                    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
 361                      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 362                     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
 363                      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 364                        continue;
 365
 366                if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 367                        b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 368                else
 369                        b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 370                b->presumed.offset = nvbo->bo.offset;
 371                b->presumed.valid = 0;
 372                relocs++;
 373
 374                if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
 375                                     &b->presumed, sizeof(b->presumed)))
 376                        return -EFAULT;
 377        }
 378
 379        return relocs;
 380}
 381
 382static int
 383nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 384                             struct drm_file *file_priv,
 385                             struct drm_nouveau_gem_pushbuf_bo *pbbo,
 386                             uint64_t user_buffers, int nr_buffers,
 387                             struct validate_op *op, int *apply_relocs)
 388{
 389        struct drm_device *dev = chan->dev;
 390        int ret, relocs = 0;
 391
 392        INIT_LIST_HEAD(&op->vram_list);
 393        INIT_LIST_HEAD(&op->gart_list);
 394        INIT_LIST_HEAD(&op->both_list);
 395
 396        if (nr_buffers == 0)
 397                return 0;
 398
 399        ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 400        if (unlikely(ret)) {
 401                if (ret != -ERESTARTSYS)
 402                        NV_ERROR(dev, "validate_init\n");
 403                return ret;
 404        }
 405
 406        ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
 407        if (unlikely(ret < 0)) {
 408                if (ret != -ERESTARTSYS)
 409                        NV_ERROR(dev, "validate vram_list\n");
 410                validate_fini(op, NULL);
 411                return ret;
 412        }
 413        relocs += ret;
 414
 415        ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
 416        if (unlikely(ret < 0)) {
 417                if (ret != -ERESTARTSYS)
 418                        NV_ERROR(dev, "validate gart_list\n");
 419                validate_fini(op, NULL);
 420                return ret;
 421        }
 422        relocs += ret;
 423
 424        ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
 425        if (unlikely(ret < 0)) {
 426                if (ret != -ERESTARTSYS)
 427                        NV_ERROR(dev, "validate both_list\n");
 428                validate_fini(op, NULL);
 429                return ret;
 430        }
 431        relocs += ret;
 432
 433        *apply_relocs = relocs;
 434        return 0;
 435}
 436
 437static inline void *
 438u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 439{
 440        void *mem;
 441        void __user *userptr = (void __force __user *)(uintptr_t)user;
 442
 443        mem = kmalloc(nmemb * size, GFP_KERNEL);
 444        if (!mem)
 445                return ERR_PTR(-ENOMEM);
 446
 447        if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
 448                kfree(mem);
 449                return ERR_PTR(-EFAULT);
 450        }
 451
 452        return mem;
 453}
 454
 455static int
 456nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 457                                struct drm_nouveau_gem_pushbuf *req,
 458                                struct drm_nouveau_gem_pushbuf_bo *bo)
 459{
 460        struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 461        int ret = 0;
 462        unsigned i;
 463
 464        reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 465        if (IS_ERR(reloc))
 466                return PTR_ERR(reloc);
 467
 468        for (i = 0; i < req->nr_relocs; i++) {
 469                struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 470                struct drm_nouveau_gem_pushbuf_bo *b;
 471                struct nouveau_bo *nvbo;
 472                uint32_t data;
 473
 474                if (unlikely(r->bo_index > req->nr_buffers)) {
 475                        NV_ERROR(dev, "reloc bo index invalid\n");
 476                        ret = -EINVAL;
 477                        break;
 478                }
 479
 480                b = &bo[r->bo_index];
 481                if (b->presumed.valid)
 482                        continue;
 483
 484                if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
 485                        NV_ERROR(dev, "reloc container bo index invalid\n");
 486                        ret = -EINVAL;
 487                        break;
 488                }
 489                nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 490
 491                if (unlikely(r->reloc_bo_offset + 4 >
 492                             nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
 493                        NV_ERROR(dev, "reloc outside of bo\n");
 494                        ret = -EINVAL;
 495                        break;
 496                }
 497
 498                if (!nvbo->kmap.virtual) {
 499                        ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 500                                          &nvbo->kmap);
 501                        if (ret) {
 502                                NV_ERROR(dev, "failed kmap for reloc\n");
 503                                break;
 504                        }
 505                        nvbo->validate_mapped = true;
 506                }
 507
 508                if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 509                        data = b->presumed.offset + r->data;
 510                else
 511                if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 512                        data = (b->presumed.offset + r->data) >> 32;
 513                else
 514                        data = r->data;
 515
 516                if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 517                        if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 518                                data |= r->tor;
 519                        else
 520                                data |= r->vor;
 521                }
 522
 523                spin_lock(&nvbo->bo.bdev->fence_lock);
 524                ret = ttm_bo_wait(&nvbo->bo, false, false, false);
 525                spin_unlock(&nvbo->bo.bdev->fence_lock);
 526                if (ret) {
 527                        NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
 528                        break;
 529                }
 530
 531                nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 532        }
 533
 534        kfree(reloc);
 535        return ret;
 536}
 537
 538int
 539nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 540                          struct drm_file *file_priv)
 541{
 542        struct drm_nouveau_private *dev_priv = dev->dev_private;
 543        struct drm_nouveau_gem_pushbuf *req = data;
 544        struct drm_nouveau_gem_pushbuf_push *push;
 545        struct drm_nouveau_gem_pushbuf_bo *bo;
 546        struct nouveau_channel *chan;
 547        struct validate_op op;
 548        struct nouveau_fence *fence = NULL;
 549        int i, j, ret = 0, do_reloc = 0;
 550
 551        chan = nouveau_channel_get(dev, file_priv, req->channel);
 552        if (IS_ERR(chan))
 553                return PTR_ERR(chan);
 554
 555        req->vram_available = dev_priv->fb_aper_free;
 556        req->gart_available = dev_priv->gart_info.aper_free;
 557        if (unlikely(req->nr_push == 0))
 558                goto out_next;
 559
 560        if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 561                NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
 562                         req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 563                nouveau_channel_put(&chan);
 564                return -EINVAL;
 565        }
 566
 567        if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 568                NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
 569                         req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 570                nouveau_channel_put(&chan);
 571                return -EINVAL;
 572        }
 573
 574        if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 575                NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
 576                         req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 577                nouveau_channel_put(&chan);
 578                return -EINVAL;
 579        }
 580
 581        push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 582        if (IS_ERR(push)) {
 583                nouveau_channel_put(&chan);
 584                return PTR_ERR(push);
 585        }
 586
 587        bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 588        if (IS_ERR(bo)) {
 589                kfree(push);
 590                nouveau_channel_put(&chan);
 591                return PTR_ERR(bo);
 592        }
 593
 594        /* Mark push buffers as being used on PFIFO, the validation code
 595         * will then make sure that if the pushbuf bo moves, that they
 596         * happen on the kernel channel, which will in turn cause a sync
 597         * to happen before we try and submit the push buffer.
 598         */
 599        for (i = 0; i < req->nr_push; i++) {
 600                if (push[i].bo_index >= req->nr_buffers) {
 601                        NV_ERROR(dev, "push %d buffer not in list\n", i);
 602                        ret = -EINVAL;
 603                        goto out_prevalid;
 604                }
 605
 606                bo[push[i].bo_index].read_domains |= (1 << 31);
 607        }
 608
 609        /* Validate buffer list */
 610        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 611                                           req->nr_buffers, &op, &do_reloc);
 612        if (ret) {
 613                if (ret != -ERESTARTSYS)
 614                        NV_ERROR(dev, "validate: %d\n", ret);
 615                goto out_prevalid;
 616        }
 617
 618        /* Apply any relocations that are required */
 619        if (do_reloc) {
 620                ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
 621                if (ret) {
 622                        NV_ERROR(dev, "reloc apply: %d\n", ret);
 623                        goto out;
 624                }
 625        }
 626
 627        if (chan->dma.ib_max) {
 628                ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
 629                if (ret) {
 630                        NV_INFO(dev, "nv50cal_space: %d\n", ret);
 631                        goto out;
 632                }
 633
 634                for (i = 0; i < req->nr_push; i++) {
 635                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 636                                bo[push[i].bo_index].user_priv;
 637
 638                        nv50_dma_push(chan, nvbo, push[i].offset,
 639                                      push[i].length);
 640                }
 641        } else
 642        if (dev_priv->chipset >= 0x25) {
 643                ret = RING_SPACE(chan, req->nr_push * 2);
 644                if (ret) {
 645                        NV_ERROR(dev, "cal_space: %d\n", ret);
 646                        goto out;
 647                }
 648
 649                for (i = 0; i < req->nr_push; i++) {
 650                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 651                                bo[push[i].bo_index].user_priv;
 652                        struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
 653
 654                        OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
 655                                        push[i].offset) | 2);
 656                        OUT_RING(chan, 0);
 657                }
 658        } else {
 659                ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 660                if (ret) {
 661                        NV_ERROR(dev, "jmp_space: %d\n", ret);
 662                        goto out;
 663                }
 664
 665                for (i = 0; i < req->nr_push; i++) {
 666                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 667                                bo[push[i].bo_index].user_priv;
 668                        struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
 669                        uint32_t cmd;
 670
 671                        cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
 672                        cmd |= 0x20000000;
 673                        if (unlikely(cmd != req->suffix0)) {
 674                                if (!nvbo->kmap.virtual) {
 675                                        ret = ttm_bo_kmap(&nvbo->bo, 0,
 676                                                          nvbo->bo.mem.
 677                                                          num_pages,
 678                                                          &nvbo->kmap);
 679                                        if (ret) {
 680                                                WIND_RING(chan);
 681                                                goto out;
 682                                        }
 683                                        nvbo->validate_mapped = true;
 684                                }
 685
 686                                nouveau_bo_wr32(nvbo, (push[i].offset +
 687                                                push[i].length - 8) / 4, cmd);
 688                        }
 689
 690                        OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
 691                                        push[i].offset) | 0x20000000);
 692                        OUT_RING(chan, 0);
 693                        for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 694                                OUT_RING(chan, 0);
 695                }
 696        }
 697
 698        ret = nouveau_fence_new(chan, &fence, true);
 699        if (ret) {
 700                NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
 701                WIND_RING(chan);
 702                goto out;
 703        }
 704
 705out:
 706        validate_fini(&op, fence);
 707        nouveau_fence_unref(&fence);
 708
 709out_prevalid:
 710        kfree(bo);
 711        kfree(push);
 712
 713out_next:
 714        if (chan->dma.ib_max) {
 715                req->suffix0 = 0x00000000;
 716                req->suffix1 = 0x00000000;
 717        } else
 718        if (dev_priv->chipset >= 0x25) {
 719                req->suffix0 = 0x00020000;
 720                req->suffix1 = 0x00000000;
 721        } else {
 722                req->suffix0 = 0x20000000 |
 723                              (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
 724                req->suffix1 = 0x00000000;
 725        }
 726
 727        nouveau_channel_put(&chan);
 728        return ret;
 729}
 730
 731static inline uint32_t
 732domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
 733{
 734        uint32_t flags = 0;
 735
 736        if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
 737                flags |= TTM_PL_FLAG_VRAM;
 738        if (domain & NOUVEAU_GEM_DOMAIN_GART)
 739                flags |= TTM_PL_FLAG_TT;
 740
 741        return flags;
 742}
 743
 744int
 745nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 746                           struct drm_file *file_priv)
 747{
 748        struct drm_nouveau_gem_cpu_prep *req = data;
 749        struct drm_gem_object *gem;
 750        struct nouveau_bo *nvbo;
 751        bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 752        int ret = -EINVAL;
 753
 754        gem = drm_gem_object_lookup(dev, file_priv, req->handle);
 755        if (!gem)
 756                return -ENOENT;
 757        nvbo = nouveau_gem_object(gem);
 758
 759        spin_lock(&nvbo->bo.bdev->fence_lock);
 760        ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
 761        spin_unlock(&nvbo->bo.bdev->fence_lock);
 762        drm_gem_object_unreference_unlocked(gem);
 763        return ret;
 764}
 765
 766int
 767nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 768                           struct drm_file *file_priv)
 769{
 770        return 0;
 771}
 772
 773int
 774nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
 775                       struct drm_file *file_priv)
 776{
 777        struct drm_nouveau_gem_info *req = data;
 778        struct drm_gem_object *gem;
 779        int ret;
 780
 781        gem = drm_gem_object_lookup(dev, file_priv, req->handle);
 782        if (!gem)
 783                return -ENOENT;
 784
 785        ret = nouveau_gem_info(gem, req);
 786        drm_gem_object_unreference_unlocked(gem);
 787        return ret;
 788}
 789
 790