linux/drivers/gpu/drm/nouveau/nouveau_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44        struct vm_area_struct *vma = vmf->vma;
  45        struct ttm_buffer_object *bo = vma->vm_private_data;
  46        pgprot_t prot;
  47        vm_fault_t ret;
  48
  49        ret = ttm_bo_vm_reserve(bo, vmf);
  50        if (ret)
  51                return ret;
  52
  53        ret = nouveau_ttm_fault_reserve_notify(bo);
  54        if (ret)
  55                goto error_unlock;
  56
  57        nouveau_bo_del_io_reserve_lru(bo);
  58        prot = vm_get_page_prot(vma->vm_flags);
  59        ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
  60        nouveau_bo_add_io_reserve_lru(bo);
  61        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62                return ret;
  63
  64error_unlock:
  65        dma_resv_unlock(bo->base.resv);
  66        return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70        .fault = nouveau_ttm_fault,
  71        .open = ttm_bo_vm_open,
  72        .close = ttm_bo_vm_close,
  73        .access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  81        struct device *dev = drm->dev->dev;
  82        int ret;
  83
  84        ret = pm_runtime_get_sync(dev);
  85        if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86                pm_runtime_put_autosuspend(dev);
  87                return;
  88        }
  89
  90        if (gem->import_attach)
  91                drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93        ttm_bo_put(&nvbo->bo);
  94
  95        pm_runtime_mark_last_busy(dev);
  96        pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102        struct nouveau_cli *cli = nouveau_cli(file_priv);
 103        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 105        struct device *dev = drm->dev->dev;
 106        struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 107        struct nouveau_vma *vma;
 108        int ret;
 109
 110        if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 111                return 0;
 112
 113        ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 114        if (ret)
 115                return ret;
 116
 117        ret = pm_runtime_get_sync(dev);
 118        if (ret < 0 && ret != -EACCES) {
 119                pm_runtime_put_autosuspend(dev);
 120                goto out;
 121        }
 122
 123        ret = nouveau_vma_new(nvbo, vmm, &vma);
 124        pm_runtime_mark_last_busy(dev);
 125        pm_runtime_put_autosuspend(dev);
 126out:
 127        ttm_bo_unreserve(&nvbo->bo);
 128        return ret;
 129}
 130
 131struct nouveau_gem_object_unmap {
 132        struct nouveau_cli_work work;
 133        struct nouveau_vma *vma;
 134};
 135
 136static void
 137nouveau_gem_object_delete(struct nouveau_vma *vma)
 138{
 139        nouveau_fence_unref(&vma->fence);
 140        nouveau_vma_del(&vma);
 141}
 142
 143static void
 144nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 145{
 146        struct nouveau_gem_object_unmap *work =
 147                container_of(w, typeof(*work), work);
 148        nouveau_gem_object_delete(work->vma);
 149        kfree(work);
 150}
 151
 152static void
 153nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 154{
 155        struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 156        struct nouveau_gem_object_unmap *work;
 157
 158        list_del_init(&vma->head);
 159
 160        if (!fence) {
 161                nouveau_gem_object_delete(vma);
 162                return;
 163        }
 164
 165        if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 166                WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 167                nouveau_gem_object_delete(vma);
 168                return;
 169        }
 170
 171        work->work.func = nouveau_gem_object_delete_work;
 172        work->vma = vma;
 173        nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 174}
 175
 176void
 177nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 178{
 179        struct nouveau_cli *cli = nouveau_cli(file_priv);
 180        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 181        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 182        struct device *dev = drm->dev->dev;
 183        struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
 184        struct nouveau_vma *vma;
 185        int ret;
 186
 187        if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 188                return;
 189
 190        ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 191        if (ret)
 192                return;
 193
 194        vma = nouveau_vma_find(nvbo, vmm);
 195        if (vma) {
 196                if (--vma->refs == 0) {
 197                        ret = pm_runtime_get_sync(dev);
 198                        if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 199                                nouveau_gem_object_unmap(nvbo, vma);
 200                                pm_runtime_mark_last_busy(dev);
 201                        }
 202                        pm_runtime_put_autosuspend(dev);
 203                }
 204        }
 205        ttm_bo_unreserve(&nvbo->bo);
 206}
 207
 208const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 209        .free = nouveau_gem_object_del,
 210        .open = nouveau_gem_object_open,
 211        .close = nouveau_gem_object_close,
 212        .pin = nouveau_gem_prime_pin,
 213        .unpin = nouveau_gem_prime_unpin,
 214        .get_sg_table = nouveau_gem_prime_get_sg_table,
 215        .vmap = drm_gem_ttm_vmap,
 216        .vunmap = drm_gem_ttm_vunmap,
 217        .mmap = drm_gem_ttm_mmap,
 218        .vm_ops = &nouveau_ttm_vm_ops,
 219};
 220
 221int
 222nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 223                uint32_t tile_mode, uint32_t tile_flags,
 224                struct nouveau_bo **pnvbo)
 225{
 226        struct nouveau_drm *drm = cli->drm;
 227        struct nouveau_bo *nvbo;
 228        int ret;
 229
 230        if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 231                domain |= NOUVEAU_GEM_DOMAIN_CPU;
 232
 233        nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 234                                tile_flags);
 235        if (IS_ERR(nvbo))
 236                return PTR_ERR(nvbo);
 237
 238        nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 239
 240        /* Initialize the embedded gem-object. We return a single gem-reference
 241         * to the caller, instead of a normal nouveau_bo ttm reference. */
 242        ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 243        if (ret) {
 244                drm_gem_object_release(&nvbo->bo.base);
 245                kfree(nvbo);
 246                return ret;
 247        }
 248
 249        ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
 250        if (ret)
 251                return ret;
 252
 253        /* we restrict allowed domains on nv50+ to only the types
 254         * that were requested at creation time.  not possibly on
 255         * earlier chips without busting the ABI.
 256         */
 257        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 258                              NOUVEAU_GEM_DOMAIN_GART;
 259        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 260                nvbo->valid_domains &= domain;
 261
 262        *pnvbo = nvbo;
 263        return 0;
 264}
 265
 266static int
 267nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 268                 struct drm_nouveau_gem_info *rep)
 269{
 270        struct nouveau_cli *cli = nouveau_cli(file_priv);
 271        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 272        struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 273        struct nouveau_vma *vma;
 274
 275        if (is_power_of_2(nvbo->valid_domains))
 276                rep->domain = nvbo->valid_domains;
 277        else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 278                rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 279        else
 280                rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 281        rep->offset = nvbo->offset;
 282        if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 283                vma = nouveau_vma_find(nvbo, vmm);
 284                if (!vma)
 285                        return -EINVAL;
 286
 287                rep->offset = vma->addr;
 288        }
 289
 290        rep->size = nvbo->bo.base.size;
 291        rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 292        rep->tile_mode = nvbo->mode;
 293        rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 294        if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 295                rep->tile_flags |= nvbo->kind << 8;
 296        else
 297        if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 298                rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 299        else
 300                rep->tile_flags |= nvbo->zeta;
 301        return 0;
 302}
 303
 304int
 305nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 306                      struct drm_file *file_priv)
 307{
 308        struct nouveau_cli *cli = nouveau_cli(file_priv);
 309        struct drm_nouveau_gem_new *req = data;
 310        struct nouveau_bo *nvbo = NULL;
 311        int ret = 0;
 312
 313        ret = nouveau_gem_new(cli, req->info.size, req->align,
 314                              req->info.domain, req->info.tile_mode,
 315                              req->info.tile_flags, &nvbo);
 316        if (ret)
 317                return ret;
 318
 319        ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 320                                    &req->info.handle);
 321        if (ret == 0) {
 322                ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 323                if (ret)
 324                        drm_gem_handle_delete(file_priv, req->info.handle);
 325        }
 326
 327        /* drop reference from allocate - handle holds it now */
 328        drm_gem_object_put(&nvbo->bo.base);
 329        return ret;
 330}
 331
 332static int
 333nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 334                       uint32_t write_domains, uint32_t valid_domains)
 335{
 336        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 337        struct ttm_buffer_object *bo = &nvbo->bo;
 338        uint32_t domains = valid_domains & nvbo->valid_domains &
 339                (write_domains ? write_domains : read_domains);
 340        uint32_t pref_domains = 0;;
 341
 342        if (!domains)
 343                return -EINVAL;
 344
 345        valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 346
 347        if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 348            bo->resource->mem_type == TTM_PL_VRAM)
 349                pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 350
 351        else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 352                 bo->resource->mem_type == TTM_PL_TT)
 353                pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 354
 355        else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 356                pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 357
 358        else
 359                pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 360
 361        nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 362
 363        return 0;
 364}
 365
 366struct validate_op {
 367        struct list_head list;
 368        struct ww_acquire_ctx ticket;
 369};
 370
 371static void
 372validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 373                        struct nouveau_fence *fence,
 374                        struct drm_nouveau_gem_pushbuf_bo *pbbo)
 375{
 376        struct nouveau_bo *nvbo;
 377        struct drm_nouveau_gem_pushbuf_bo *b;
 378
 379        while (!list_empty(&op->list)) {
 380                nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 381                b = &pbbo[nvbo->pbbo_index];
 382
 383                if (likely(fence)) {
 384                        nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 385
 386                        if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 387                                struct nouveau_vma *vma =
 388                                        (void *)(unsigned long)b->user_priv;
 389                                nouveau_fence_unref(&vma->fence);
 390                                dma_fence_get(&fence->base);
 391                                vma->fence = fence;
 392                        }
 393                }
 394
 395                if (unlikely(nvbo->validate_mapped)) {
 396                        ttm_bo_kunmap(&nvbo->kmap);
 397                        nvbo->validate_mapped = false;
 398                }
 399
 400                list_del(&nvbo->entry);
 401                nvbo->reserved_by = NULL;
 402                ttm_bo_unreserve(&nvbo->bo);
 403                drm_gem_object_put(&nvbo->bo.base);
 404        }
 405}
 406
 407static void
 408validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 409              struct nouveau_fence *fence,
 410              struct drm_nouveau_gem_pushbuf_bo *pbbo)
 411{
 412        validate_fini_no_ticket(op, chan, fence, pbbo);
 413        ww_acquire_fini(&op->ticket);
 414}
 415
 416static int
 417validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 418              struct drm_nouveau_gem_pushbuf_bo *pbbo,
 419              int nr_buffers, struct validate_op *op)
 420{
 421        struct nouveau_cli *cli = nouveau_cli(file_priv);
 422        int trycnt = 0;
 423        int ret = -EINVAL, i;
 424        struct nouveau_bo *res_bo = NULL;
 425        LIST_HEAD(gart_list);
 426        LIST_HEAD(vram_list);
 427        LIST_HEAD(both_list);
 428
 429        ww_acquire_init(&op->ticket, &reservation_ww_class);
 430retry:
 431        if (++trycnt > 100000) {
 432                NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 433                return -EINVAL;
 434        }
 435
 436        for (i = 0; i < nr_buffers; i++) {
 437                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 438                struct drm_gem_object *gem;
 439                struct nouveau_bo *nvbo;
 440
 441                gem = drm_gem_object_lookup(file_priv, b->handle);
 442                if (!gem) {
 443                        NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 444                        ret = -ENOENT;
 445                        break;
 446                }
 447                nvbo = nouveau_gem_object(gem);
 448                if (nvbo == res_bo) {
 449                        res_bo = NULL;
 450                        drm_gem_object_put(gem);
 451                        continue;
 452                }
 453
 454                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 455                        NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 456                                      "validation list\n", b->handle);
 457                        drm_gem_object_put(gem);
 458                        ret = -EINVAL;
 459                        break;
 460                }
 461
 462                ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 463                if (ret) {
 464                        list_splice_tail_init(&vram_list, &op->list);
 465                        list_splice_tail_init(&gart_list, &op->list);
 466                        list_splice_tail_init(&both_list, &op->list);
 467                        validate_fini_no_ticket(op, chan, NULL, NULL);
 468                        if (unlikely(ret == -EDEADLK)) {
 469                                ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 470                                                              &op->ticket);
 471                                if (!ret)
 472                                        res_bo = nvbo;
 473                        }
 474                        if (unlikely(ret)) {
 475                                if (ret != -ERESTARTSYS)
 476                                        NV_PRINTK(err, cli, "fail reserve\n");
 477                                break;
 478                        }
 479                }
 480
 481                if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 482                        struct nouveau_vmm *vmm = chan->vmm;
 483                        struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 484                        if (!vma) {
 485                                NV_PRINTK(err, cli, "vma not found!\n");
 486                                ret = -EINVAL;
 487                                break;
 488                        }
 489
 490                        b->user_priv = (uint64_t)(unsigned long)vma;
 491                } else {
 492                        b->user_priv = (uint64_t)(unsigned long)nvbo;
 493                }
 494
 495                nvbo->reserved_by = file_priv;
 496                nvbo->pbbo_index = i;
 497                if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 498                    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 499                        list_add_tail(&nvbo->entry, &both_list);
 500                else
 501                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 502                        list_add_tail(&nvbo->entry, &vram_list);
 503                else
 504                if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 505                        list_add_tail(&nvbo->entry, &gart_list);
 506                else {
 507                        NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 508                                 b->valid_domains);
 509                        list_add_tail(&nvbo->entry, &both_list);
 510                        ret = -EINVAL;
 511                        break;
 512                }
 513                if (nvbo == res_bo)
 514                        goto retry;
 515        }
 516
 517        ww_acquire_done(&op->ticket);
 518        list_splice_tail(&vram_list, &op->list);
 519        list_splice_tail(&gart_list, &op->list);
 520        list_splice_tail(&both_list, &op->list);
 521        if (ret)
 522                validate_fini(op, chan, NULL, NULL);
 523        return ret;
 524
 525}
 526
 527static int
 528validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 529              struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 530{
 531        struct nouveau_drm *drm = chan->drm;
 532        struct nouveau_bo *nvbo;
 533        int ret, relocs = 0;
 534
 535        list_for_each_entry(nvbo, list, entry) {
 536                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 537
 538                ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 539                                             b->write_domains,
 540                                             b->valid_domains);
 541                if (unlikely(ret)) {
 542                        NV_PRINTK(err, cli, "fail set_domain\n");
 543                        return ret;
 544                }
 545
 546                ret = nouveau_bo_validate(nvbo, true, false);
 547                if (unlikely(ret)) {
 548                        if (ret != -ERESTARTSYS)
 549                                NV_PRINTK(err, cli, "fail ttm_validate\n");
 550                        return ret;
 551                }
 552
 553                ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 554                if (unlikely(ret)) {
 555                        if (ret != -ERESTARTSYS)
 556                                NV_PRINTK(err, cli, "fail post-validate sync\n");
 557                        return ret;
 558                }
 559
 560                if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 561                        if (nvbo->offset == b->presumed.offset &&
 562                            ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 563                              b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 564                             (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 565                              b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 566                                continue;
 567
 568                        if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 569                                b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 570                        else
 571                                b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 572                        b->presumed.offset = nvbo->offset;
 573                        b->presumed.valid = 0;
 574                        relocs++;
 575                }
 576        }
 577
 578        return relocs;
 579}
 580
 581static int
 582nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 583                             struct drm_file *file_priv,
 584                             struct drm_nouveau_gem_pushbuf_bo *pbbo,
 585                             int nr_buffers,
 586                             struct validate_op *op, bool *apply_relocs)
 587{
 588        struct nouveau_cli *cli = nouveau_cli(file_priv);
 589        int ret;
 590
 591        INIT_LIST_HEAD(&op->list);
 592
 593        if (nr_buffers == 0)
 594                return 0;
 595
 596        ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 597        if (unlikely(ret)) {
 598                if (ret != -ERESTARTSYS)
 599                        NV_PRINTK(err, cli, "validate_init\n");
 600                return ret;
 601        }
 602
 603        ret = validate_list(chan, cli, &op->list, pbbo);
 604        if (unlikely(ret < 0)) {
 605                if (ret != -ERESTARTSYS)
 606                        NV_PRINTK(err, cli, "validating bo list\n");
 607                validate_fini(op, chan, NULL, NULL);
 608                return ret;
 609        } else if (ret > 0) {
 610                *apply_relocs = true;
 611        }
 612
 613        return 0;
 614}
 615
 616static inline void
 617u_free(void *addr)
 618{
 619        kvfree(addr);
 620}
 621
 622static inline void *
 623u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 624{
 625        void *mem;
 626        void __user *userptr = (void __force __user *)(uintptr_t)user;
 627
 628        size *= nmemb;
 629
 630        mem = kvmalloc(size, GFP_KERNEL);
 631        if (!mem)
 632                return ERR_PTR(-ENOMEM);
 633
 634        if (copy_from_user(mem, userptr, size)) {
 635                u_free(mem);
 636                return ERR_PTR(-EFAULT);
 637        }
 638
 639        return mem;
 640}
 641
 642static int
 643nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 644                                struct drm_nouveau_gem_pushbuf *req,
 645                                struct drm_nouveau_gem_pushbuf_reloc *reloc,
 646                                struct drm_nouveau_gem_pushbuf_bo *bo)
 647{
 648        int ret = 0;
 649        unsigned i;
 650
 651        for (i = 0; i < req->nr_relocs; i++) {
 652                struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 653                struct drm_nouveau_gem_pushbuf_bo *b;
 654                struct nouveau_bo *nvbo;
 655                uint32_t data;
 656
 657                if (unlikely(r->bo_index >= req->nr_buffers)) {
 658                        NV_PRINTK(err, cli, "reloc bo index invalid\n");
 659                        ret = -EINVAL;
 660                        break;
 661                }
 662
 663                b = &bo[r->bo_index];
 664                if (b->presumed.valid)
 665                        continue;
 666
 667                if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 668                        NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 669                        ret = -EINVAL;
 670                        break;
 671                }
 672                nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 673
 674                if (unlikely(r->reloc_bo_offset + 4 >
 675                             nvbo->bo.base.size)) {
 676                        NV_PRINTK(err, cli, "reloc outside of bo\n");
 677                        ret = -EINVAL;
 678                        break;
 679                }
 680
 681                if (!nvbo->kmap.virtual) {
 682                        ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
 683                                          &nvbo->kmap);
 684                        if (ret) {
 685                                NV_PRINTK(err, cli, "failed kmap for reloc\n");
 686                                break;
 687                        }
 688                        nvbo->validate_mapped = true;
 689                }
 690
 691                if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 692                        data = b->presumed.offset + r->data;
 693                else
 694                if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 695                        data = (b->presumed.offset + r->data) >> 32;
 696                else
 697                        data = r->data;
 698
 699                if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 700                        if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 701                                data |= r->tor;
 702                        else
 703                                data |= r->vor;
 704                }
 705
 706                ret = ttm_bo_wait(&nvbo->bo, false, false);
 707                if (ret) {
 708                        NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 709                        break;
 710                }
 711
 712                nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 713        }
 714
 715        return ret;
 716}
 717
 718int
 719nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 720                          struct drm_file *file_priv)
 721{
 722        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 723        struct nouveau_cli *cli = nouveau_cli(file_priv);
 724        struct nouveau_abi16_chan *temp;
 725        struct nouveau_drm *drm = nouveau_drm(dev);
 726        struct drm_nouveau_gem_pushbuf *req = data;
 727        struct drm_nouveau_gem_pushbuf_push *push;
 728        struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 729        struct drm_nouveau_gem_pushbuf_bo *bo;
 730        struct nouveau_channel *chan = NULL;
 731        struct validate_op op;
 732        struct nouveau_fence *fence = NULL;
 733        int i, j, ret = 0;
 734        bool do_reloc = false, sync = false;
 735
 736        if (unlikely(!abi16))
 737                return -ENOMEM;
 738
 739        list_for_each_entry(temp, &abi16->channels, head) {
 740                if (temp->chan->chid == req->channel) {
 741                        chan = temp->chan;
 742                        break;
 743                }
 744        }
 745
 746        if (!chan)
 747                return nouveau_abi16_put(abi16, -ENOENT);
 748        if (unlikely(atomic_read(&chan->killed)))
 749                return nouveau_abi16_put(abi16, -ENODEV);
 750
 751        sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 752
 753        req->vram_available = drm->gem.vram_available;
 754        req->gart_available = drm->gem.gart_available;
 755        if (unlikely(req->nr_push == 0))
 756                goto out_next;
 757
 758        if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 759                NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 760                         req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 761                return nouveau_abi16_put(abi16, -EINVAL);
 762        }
 763
 764        if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 765                NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 766                         req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 767                return nouveau_abi16_put(abi16, -EINVAL);
 768        }
 769
 770        if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 771                NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 772                         req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 773                return nouveau_abi16_put(abi16, -EINVAL);
 774        }
 775
 776        push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 777        if (IS_ERR(push))
 778                return nouveau_abi16_put(abi16, PTR_ERR(push));
 779
 780        bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 781        if (IS_ERR(bo)) {
 782                u_free(push);
 783                return nouveau_abi16_put(abi16, PTR_ERR(bo));
 784        }
 785
 786        /* Ensure all push buffers are on validate list */
 787        for (i = 0; i < req->nr_push; i++) {
 788                if (push[i].bo_index >= req->nr_buffers) {
 789                        NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 790                        ret = -EINVAL;
 791                        goto out_prevalid;
 792                }
 793        }
 794
 795        /* Validate buffer list */
 796revalidate:
 797        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 798                                           req->nr_buffers, &op, &do_reloc);
 799        if (ret) {
 800                if (ret != -ERESTARTSYS)
 801                        NV_PRINTK(err, cli, "validate: %d\n", ret);
 802                goto out_prevalid;
 803        }
 804
 805        /* Apply any relocations that are required */
 806        if (do_reloc) {
 807                if (!reloc) {
 808                        validate_fini(&op, chan, NULL, bo);
 809                        reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 810                        if (IS_ERR(reloc)) {
 811                                ret = PTR_ERR(reloc);
 812                                goto out_prevalid;
 813                        }
 814
 815                        goto revalidate;
 816                }
 817
 818                ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 819                if (ret) {
 820                        NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 821                        goto out;
 822                }
 823        }
 824
 825        if (chan->dma.ib_max) {
 826                ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 827                if (ret) {
 828                        NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 829                        goto out;
 830                }
 831
 832                for (i = 0; i < req->nr_push; i++) {
 833                        struct nouveau_vma *vma = (void *)(unsigned long)
 834                                bo[push[i].bo_index].user_priv;
 835
 836                        nv50_dma_push(chan, vma->addr + push[i].offset,
 837                                      push[i].length);
 838                }
 839        } else
 840        if (drm->client.device.info.chipset >= 0x25) {
 841                ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
 842                if (ret) {
 843                        NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 844                        goto out;
 845                }
 846
 847                for (i = 0; i < req->nr_push; i++) {
 848                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 849                                bo[push[i].bo_index].user_priv;
 850
 851                        PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
 852                        PUSH_DATA(chan->chan.push, 0);
 853                }
 854        } else {
 855                ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 856                if (ret) {
 857                        NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 858                        goto out;
 859                }
 860
 861                for (i = 0; i < req->nr_push; i++) {
 862                        struct nouveau_bo *nvbo = (void *)(unsigned long)
 863                                bo[push[i].bo_index].user_priv;
 864                        uint32_t cmd;
 865
 866                        cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 867                        cmd |= 0x20000000;
 868                        if (unlikely(cmd != req->suffix0)) {
 869                                if (!nvbo->kmap.virtual) {
 870                                        ret = ttm_bo_kmap(&nvbo->bo, 0,
 871                                                          nvbo->bo.resource->
 872                                                          num_pages,
 873                                                          &nvbo->kmap);
 874                                        if (ret) {
 875                                                WIND_RING(chan);
 876                                                goto out;
 877                                        }
 878                                        nvbo->validate_mapped = true;
 879                                }
 880
 881                                nouveau_bo_wr32(nvbo, (push[i].offset +
 882                                                push[i].length - 8) / 4, cmd);
 883                        }
 884
 885                        PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
 886                        PUSH_DATA(chan->chan.push, 0);
 887                        for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 888                                PUSH_DATA(chan->chan.push, 0);
 889                }
 890        }
 891
 892        ret = nouveau_fence_new(chan, false, &fence);
 893        if (ret) {
 894                NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 895                WIND_RING(chan);
 896                goto out;
 897        }
 898
 899        if (sync) {
 900                if (!(ret = nouveau_fence_wait(fence, false, false))) {
 901                        if ((ret = dma_fence_get_status(&fence->base)) == 1)
 902                                ret = 0;
 903                }
 904        }
 905
 906out:
 907        validate_fini(&op, chan, fence, bo);
 908        nouveau_fence_unref(&fence);
 909
 910        if (do_reloc) {
 911                struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 912                        u64_to_user_ptr(req->buffers);
 913
 914                for (i = 0; i < req->nr_buffers; i++) {
 915                        if (bo[i].presumed.valid)
 916                                continue;
 917
 918                        if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 919                                         sizeof(bo[i].presumed))) {
 920                                ret = -EFAULT;
 921                                break;
 922                        }
 923                }
 924        }
 925out_prevalid:
 926        if (!IS_ERR(reloc))
 927                u_free(reloc);
 928        u_free(bo);
 929        u_free(push);
 930
 931out_next:
 932        if (chan->dma.ib_max) {
 933                req->suffix0 = 0x00000000;
 934                req->suffix1 = 0x00000000;
 935        } else
 936        if (drm->client.device.info.chipset >= 0x25) {
 937                req->suffix0 = 0x00020000;
 938                req->suffix1 = 0x00000000;
 939        } else {
 940                req->suffix0 = 0x20000000 |
 941                              (chan->push.addr + ((chan->dma.cur + 2) << 2));
 942                req->suffix1 = 0x00000000;
 943        }
 944
 945        return nouveau_abi16_put(abi16, ret);
 946}
 947
 948int
 949nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 950                           struct drm_file *file_priv)
 951{
 952        struct drm_nouveau_gem_cpu_prep *req = data;
 953        struct drm_gem_object *gem;
 954        struct nouveau_bo *nvbo;
 955        bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 956        bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 957        long lret;
 958        int ret;
 959
 960        gem = drm_gem_object_lookup(file_priv, req->handle);
 961        if (!gem)
 962                return -ENOENT;
 963        nvbo = nouveau_gem_object(gem);
 964
 965        lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
 966                                     no_wait ? 0 : 30 * HZ);
 967        if (!lret)
 968                ret = -EBUSY;
 969        else if (lret > 0)
 970                ret = 0;
 971        else
 972                ret = lret;
 973
 974        nouveau_bo_sync_for_cpu(nvbo);
 975        drm_gem_object_put(gem);
 976
 977        return ret;
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 982                           struct drm_file *file_priv)
 983{
 984        struct drm_nouveau_gem_cpu_fini *req = data;
 985        struct drm_gem_object *gem;
 986        struct nouveau_bo *nvbo;
 987
 988        gem = drm_gem_object_lookup(file_priv, req->handle);
 989        if (!gem)
 990                return -ENOENT;
 991        nvbo = nouveau_gem_object(gem);
 992
 993        nouveau_bo_sync_for_device(nvbo);
 994        drm_gem_object_put(gem);
 995        return 0;
 996}
 997
 998int
 999nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1000                       struct drm_file *file_priv)
1001{
1002        struct drm_nouveau_gem_info *req = data;
1003        struct drm_gem_object *gem;
1004        int ret;
1005
1006        gem = drm_gem_object_lookup(file_priv, req->handle);
1007        if (!gem)
1008                return -ENOENT;
1009
1010        ret = nouveau_gem_info(file_priv, gem, req);
1011        drm_gem_object_put(gem);
1012        return ret;
1013}
1014
1015