linux/drivers/gpu/drm/nouveau/nouveau_object.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006 Ben Skeggs.
   3 *
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining
   7 * a copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sublicense, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial
  16 * portions of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 */
  27
  28/*
  29 * Authors:
  30 *   Ben Skeggs <darktama@iinet.net.au>
  31 */
  32
  33#include "drmP.h"
  34#include "drm.h"
  35#include "nouveau_drv.h"
  36#include "nouveau_drm.h"
  37#include "nouveau_ramht.h"
  38#include "nouveau_vm.h"
  39
  40struct nouveau_gpuobj_method {
  41        struct list_head head;
  42        u32 mthd;
  43        int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
  44};
  45
  46struct nouveau_gpuobj_class {
  47        struct list_head head;
  48        struct list_head methods;
  49        u32 id;
  50        u32 engine;
  51};
  52
  53int
  54nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
  55{
  56        struct drm_nouveau_private *dev_priv = dev->dev_private;
  57        struct nouveau_gpuobj_class *oc;
  58
  59        oc = kzalloc(sizeof(*oc), GFP_KERNEL);
  60        if (!oc)
  61                return -ENOMEM;
  62
  63        INIT_LIST_HEAD(&oc->methods);
  64        oc->id = class;
  65        oc->engine = engine;
  66        list_add(&oc->head, &dev_priv->classes);
  67        return 0;
  68}
  69
  70int
  71nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
  72                        int (*exec)(struct nouveau_channel *, u32, u32, u32))
  73{
  74        struct drm_nouveau_private *dev_priv = dev->dev_private;
  75        struct nouveau_gpuobj_method *om;
  76        struct nouveau_gpuobj_class *oc;
  77
  78        list_for_each_entry(oc, &dev_priv->classes, head) {
  79                if (oc->id == class)
  80                        goto found;
  81        }
  82
  83        return -EINVAL;
  84
  85found:
  86        om = kzalloc(sizeof(*om), GFP_KERNEL);
  87        if (!om)
  88                return -ENOMEM;
  89
  90        om->mthd = mthd;
  91        om->exec = exec;
  92        list_add(&om->head, &oc->methods);
  93        return 0;
  94}
  95
  96int
  97nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
  98                         u32 class, u32 mthd, u32 data)
  99{
 100        struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 101        struct nouveau_gpuobj_method *om;
 102        struct nouveau_gpuobj_class *oc;
 103
 104        list_for_each_entry(oc, &dev_priv->classes, head) {
 105                if (oc->id != class)
 106                        continue;
 107
 108                list_for_each_entry(om, &oc->methods, head) {
 109                        if (om->mthd == mthd)
 110                                return om->exec(chan, class, mthd, data);
 111                }
 112        }
 113
 114        return -ENOENT;
 115}
 116
 117int
 118nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
 119                          u32 class, u32 mthd, u32 data)
 120{
 121        struct drm_nouveau_private *dev_priv = dev->dev_private;
 122        struct nouveau_channel *chan = NULL;
 123        unsigned long flags;
 124        int ret = -EINVAL;
 125
 126        spin_lock_irqsave(&dev_priv->channels.lock, flags);
 127        if (chid > 0 && chid < dev_priv->engine.fifo.channels)
 128                chan = dev_priv->channels.ptr[chid];
 129        if (chan)
 130                ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
 131        spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 132        return ret;
 133}
 134
 135/* NVidia uses context objects to drive drawing operations.
 136
 137   Context objects can be selected into 8 subchannels in the FIFO,
 138   and then used via DMA command buffers.
 139
 140   A context object is referenced by a user defined handle (CARD32). The HW
 141   looks up graphics objects in a hash table in the instance RAM.
 142
 143   An entry in the hash table consists of 2 CARD32. The first CARD32 contains
 144   the handle, the second one a bitfield, that contains the address of the
 145   object in instance RAM.
 146
 147   The format of the second CARD32 seems to be:
 148
 149   NV4 to NV30:
 150
 151   15: 0  instance_addr >> 4
 152   17:16  engine (here uses 1 = graphics)
 153   28:24  channel id (here uses 0)
 154   31     valid (use 1)
 155
 156   NV40:
 157
 158   15: 0  instance_addr >> 4   (maybe 19-0)
 159   21:20  engine (here uses 1 = graphics)
 160   I'm unsure about the other bits, but using 0 seems to work.
 161
 162   The key into the hash table depends on the object handle and channel id and
 163   is given as:
 164*/
 165
 166int
 167nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
 168                   uint32_t size, int align, uint32_t flags,
 169                   struct nouveau_gpuobj **gpuobj_ret)
 170{
 171        struct drm_nouveau_private *dev_priv = dev->dev_private;
 172        struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 173        struct nouveau_gpuobj *gpuobj;
 174        struct drm_mm_node *ramin = NULL;
 175        int ret, i;
 176
 177        NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
 178                 chan ? chan->id : -1, size, align, flags);
 179
 180        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 181        if (!gpuobj)
 182                return -ENOMEM;
 183        NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 184        gpuobj->dev = dev;
 185        gpuobj->flags = flags;
 186        kref_init(&gpuobj->refcount);
 187        gpuobj->size = size;
 188
 189        spin_lock(&dev_priv->ramin_lock);
 190        list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
 191        spin_unlock(&dev_priv->ramin_lock);
 192
 193        if (chan) {
 194                ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
 195                if (ramin)
 196                        ramin = drm_mm_get_block(ramin, size, align);
 197                if (!ramin) {
 198                        nouveau_gpuobj_ref(NULL, &gpuobj);
 199                        return -ENOMEM;
 200                }
 201
 202                gpuobj->pinst = chan->ramin->pinst;
 203                if (gpuobj->pinst != ~0)
 204                        gpuobj->pinst += ramin->start;
 205
 206                gpuobj->cinst = ramin->start;
 207                gpuobj->vinst = ramin->start + chan->ramin->vinst;
 208                gpuobj->node  = ramin;
 209        } else {
 210                ret = instmem->get(gpuobj, size, align);
 211                if (ret) {
 212                        nouveau_gpuobj_ref(NULL, &gpuobj);
 213                        return ret;
 214                }
 215
 216                ret = -ENOSYS;
 217                if (!(flags & NVOBJ_FLAG_DONT_MAP))
 218                        ret = instmem->map(gpuobj);
 219                if (ret)
 220                        gpuobj->pinst = ~0;
 221
 222                gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 223        }
 224
 225        if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
 226                for (i = 0; i < gpuobj->size; i += 4)
 227                        nv_wo32(gpuobj, i, 0);
 228                instmem->flush(dev);
 229        }
 230
 231
 232        *gpuobj_ret = gpuobj;
 233        return 0;
 234}
 235
 236int
 237nouveau_gpuobj_init(struct drm_device *dev)
 238{
 239        struct drm_nouveau_private *dev_priv = dev->dev_private;
 240
 241        NV_DEBUG(dev, "\n");
 242
 243        INIT_LIST_HEAD(&dev_priv->gpuobj_list);
 244        INIT_LIST_HEAD(&dev_priv->classes);
 245        spin_lock_init(&dev_priv->ramin_lock);
 246        dev_priv->ramin_base = ~0;
 247
 248        return 0;
 249}
 250
 251void
 252nouveau_gpuobj_takedown(struct drm_device *dev)
 253{
 254        struct drm_nouveau_private *dev_priv = dev->dev_private;
 255        struct nouveau_gpuobj_method *om, *tm;
 256        struct nouveau_gpuobj_class *oc, *tc;
 257
 258        NV_DEBUG(dev, "\n");
 259
 260        list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
 261                list_for_each_entry_safe(om, tm, &oc->methods, head) {
 262                        list_del(&om->head);
 263                        kfree(om);
 264                }
 265                list_del(&oc->head);
 266                kfree(oc);
 267        }
 268
 269        BUG_ON(!list_empty(&dev_priv->gpuobj_list));
 270}
 271
 272
 273static void
 274nouveau_gpuobj_del(struct kref *ref)
 275{
 276        struct nouveau_gpuobj *gpuobj =
 277                container_of(ref, struct nouveau_gpuobj, refcount);
 278        struct drm_device *dev = gpuobj->dev;
 279        struct drm_nouveau_private *dev_priv = dev->dev_private;
 280        struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 281        int i;
 282
 283        NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 284
 285        if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
 286                for (i = 0; i < gpuobj->size; i += 4)
 287                        nv_wo32(gpuobj, i, 0);
 288                instmem->flush(dev);
 289        }
 290
 291        if (gpuobj->dtor)
 292                gpuobj->dtor(dev, gpuobj);
 293
 294        if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
 295                if (gpuobj->node) {
 296                        instmem->unmap(gpuobj);
 297                        instmem->put(gpuobj);
 298                }
 299        } else {
 300                if (gpuobj->node) {
 301                        spin_lock(&dev_priv->ramin_lock);
 302                        drm_mm_put_block(gpuobj->node);
 303                        spin_unlock(&dev_priv->ramin_lock);
 304                }
 305        }
 306
 307        spin_lock(&dev_priv->ramin_lock);
 308        list_del(&gpuobj->list);
 309        spin_unlock(&dev_priv->ramin_lock);
 310
 311        kfree(gpuobj);
 312}
 313
 314void
 315nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
 316{
 317        if (ref)
 318                kref_get(&ref->refcount);
 319
 320        if (*ptr)
 321                kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
 322
 323        *ptr = ref;
 324}
 325
 326int
 327nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
 328                        u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
 329{
 330        struct drm_nouveau_private *dev_priv = dev->dev_private;
 331        struct nouveau_gpuobj *gpuobj = NULL;
 332        int i;
 333
 334        NV_DEBUG(dev,
 335                 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
 336                 pinst, vinst, size, flags);
 337
 338        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 339        if (!gpuobj)
 340                return -ENOMEM;
 341        NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 342        gpuobj->dev = dev;
 343        gpuobj->flags = flags;
 344        kref_init(&gpuobj->refcount);
 345        gpuobj->size  = size;
 346        gpuobj->pinst = pinst;
 347        gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 348        gpuobj->vinst = vinst;
 349
 350        if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
 351                for (i = 0; i < gpuobj->size; i += 4)
 352                        nv_wo32(gpuobj, i, 0);
 353                dev_priv->engine.instmem.flush(dev);
 354        }
 355
 356        spin_lock(&dev_priv->ramin_lock);
 357        list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
 358        spin_unlock(&dev_priv->ramin_lock);
 359        *pgpuobj = gpuobj;
 360        return 0;
 361}
 362
 363
 364static uint32_t
 365nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
 366{
 367        struct drm_nouveau_private *dev_priv = dev->dev_private;
 368
 369        /*XXX: dodgy hack for now */
 370        if (dev_priv->card_type >= NV_50)
 371                return 24;
 372        if (dev_priv->card_type >= NV_40)
 373                return 32;
 374        return 16;
 375}
 376
 377/*
 378   DMA objects are used to reference a piece of memory in the
 379   framebuffer, PCI or AGP address space. Each object is 16 bytes big
 380   and looks as follows:
 381
 382   entry[0]
 383   11:0  class (seems like I can always use 0 here)
 384   12    page table present?
 385   13    page entry linear?
 386   15:14 access: 0 rw, 1 ro, 2 wo
 387   17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
 388   31:20 dma adjust (bits 0-11 of the address)
 389   entry[1]
 390   dma limit (size of transfer)
 391   entry[X]
 392   1     0 readonly, 1 readwrite
 393   31:12 dma frame address of the page (bits 12-31 of the address)
 394   entry[N]
 395   page table terminator, same value as the first pte, as does nvidia
 396   rivatv uses 0xffffffff
 397
 398   Non linear page tables need a list of frame addresses afterwards,
 399   the rivatv project has some info on this.
 400
 401   The method below creates a DMA object in instance RAM and returns a handle
 402   to it that can be used to set up context objects.
 403*/
 404
 405void
 406nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
 407                     u64 base, u64 size, int target, int access,
 408                     u32 type, u32 comp)
 409{
 410        struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
 411        struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
 412        u32 flags0;
 413
 414        flags0  = (comp << 29) | (type << 22) | class;
 415        flags0 |= 0x00100000;
 416
 417        switch (access) {
 418        case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
 419        case NV_MEM_ACCESS_RW:
 420        case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
 421        default:
 422                break;
 423        }
 424
 425        switch (target) {
 426        case NV_MEM_TARGET_VRAM:
 427                flags0 |= 0x00010000;
 428                break;
 429        case NV_MEM_TARGET_PCI:
 430                flags0 |= 0x00020000;
 431                break;
 432        case NV_MEM_TARGET_PCI_NOSNOOP:
 433                flags0 |= 0x00030000;
 434                break;
 435        case NV_MEM_TARGET_GART:
 436                base += dev_priv->gart_info.aper_base;
 437        default:
 438                flags0 &= ~0x00100000;
 439                break;
 440        }
 441
 442        /* convert to base + limit */
 443        size = (base + size) - 1;
 444
 445        nv_wo32(obj, offset + 0x00, flags0);
 446        nv_wo32(obj, offset + 0x04, lower_32_bits(size));
 447        nv_wo32(obj, offset + 0x08, lower_32_bits(base));
 448        nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
 449                                    upper_32_bits(base));
 450        nv_wo32(obj, offset + 0x10, 0x00000000);
 451        nv_wo32(obj, offset + 0x14, 0x00000000);
 452
 453        pinstmem->flush(obj->dev);
 454}
 455
 456int
 457nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
 458                    int target, int access, u32 type, u32 comp,
 459                    struct nouveau_gpuobj **pobj)
 460{
 461        struct drm_device *dev = chan->dev;
 462        int ret;
 463
 464        ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
 465        if (ret)
 466                return ret;
 467
 468        nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
 469                             access, type, comp);
 470        return 0;
 471}
 472
 473int
 474nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
 475                       u64 size, int access, int target,
 476                       struct nouveau_gpuobj **pobj)
 477{
 478        struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 479        struct drm_device *dev = chan->dev;
 480        struct nouveau_gpuobj *obj;
 481        u32 flags0, flags2;
 482        int ret;
 483
 484        if (dev_priv->card_type >= NV_50) {
 485                u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
 486                u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
 487
 488                return nv50_gpuobj_dma_new(chan, class, base, size,
 489                                           target, access, type, comp, pobj);
 490        }
 491
 492        if (target == NV_MEM_TARGET_GART) {
 493                if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
 494                        target = NV_MEM_TARGET_PCI_NOSNOOP;
 495                        base  += dev_priv->gart_info.aper_base;
 496                } else
 497                if (base != 0) {
 498                        base = nouveau_sgdma_get_physical(dev, base);
 499                        target = NV_MEM_TARGET_PCI;
 500                } else {
 501                        nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
 502                        return 0;
 503                }
 504        }
 505
 506        flags0  = class;
 507        flags0 |= 0x00003000; /* PT present, PT linear */
 508        flags2  = 0;
 509
 510        switch (target) {
 511        case NV_MEM_TARGET_PCI:
 512                flags0 |= 0x00020000;
 513                break;
 514        case NV_MEM_TARGET_PCI_NOSNOOP:
 515                flags0 |= 0x00030000;
 516                break;
 517        default:
 518                break;
 519        }
 520
 521        switch (access) {
 522        case NV_MEM_ACCESS_RO:
 523                flags0 |= 0x00004000;
 524                break;
 525        case NV_MEM_ACCESS_WO:
 526                flags0 |= 0x00008000;
 527        default:
 528                flags2 |= 0x00000002;
 529                break;
 530        }
 531
 532        flags0 |= (base & 0x00000fff) << 20;
 533        flags2 |= (base & 0xfffff000);
 534
 535        ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
 536        if (ret)
 537                return ret;
 538
 539        nv_wo32(obj, 0x00, flags0);
 540        nv_wo32(obj, 0x04, size - 1);
 541        nv_wo32(obj, 0x08, flags2);
 542        nv_wo32(obj, 0x0c, flags2);
 543
 544        obj->engine = NVOBJ_ENGINE_SW;
 545        obj->class  = class;
 546        *pobj = obj;
 547        return 0;
 548}
 549
 550/* Context objects in the instance RAM have the following structure.
 551 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
 552
 553   NV4 - NV30:
 554
 555   entry[0]
 556   11:0 class
 557   12   chroma key enable
 558   13   user clip enable
 559   14   swizzle enable
 560   17:15 patch config:
 561       scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
 562   18   synchronize enable
 563   19   endian: 1 big, 0 little
 564   21:20 dither mode
 565   23    single step enable
 566   24    patch status: 0 invalid, 1 valid
 567   25    context_surface 0: 1 valid
 568   26    context surface 1: 1 valid
 569   27    context pattern: 1 valid
 570   28    context rop: 1 valid
 571   29,30 context beta, beta4
 572   entry[1]
 573   7:0   mono format
 574   15:8  color format
 575   31:16 notify instance address
 576   entry[2]
 577   15:0  dma 0 instance address
 578   31:16 dma 1 instance address
 579   entry[3]
 580   dma method traps
 581
 582   NV40:
 583   No idea what the exact format is. Here's what can be deducted:
 584
 585   entry[0]:
 586   11:0  class  (maybe uses more bits here?)
 587   17    user clip enable
 588   21:19 patch config
 589   25    patch status valid ?
 590   entry[1]:
 591   15:0  DMA notifier  (maybe 20:0)
 592   entry[2]:
 593   15:0  DMA 0 instance (maybe 20:0)
 594   24    big endian
 595   entry[3]:
 596   15:0  DMA 1 instance (maybe 20:0)
 597   entry[4]:
 598   entry[5]:
 599   set to 0?
 600*/
 601static int
 602nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 603                      struct nouveau_gpuobj **gpuobj_ret)
 604{
 605        struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 606        struct nouveau_gpuobj *gpuobj;
 607
 608        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 609        if (!gpuobj)
 610                return -ENOMEM;
 611        gpuobj->dev = chan->dev;
 612        gpuobj->engine = NVOBJ_ENGINE_SW;
 613        gpuobj->class = class;
 614        kref_init(&gpuobj->refcount);
 615        gpuobj->cinst = 0x40;
 616
 617        spin_lock(&dev_priv->ramin_lock);
 618        list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
 619        spin_unlock(&dev_priv->ramin_lock);
 620        *gpuobj_ret = gpuobj;
 621        return 0;
 622}
 623
 624int
 625nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
 626{
 627        struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 628        struct drm_device *dev = chan->dev;
 629        struct nouveau_gpuobj_class *oc;
 630        struct nouveau_gpuobj *gpuobj;
 631        int ret;
 632
 633        NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
 634
 635        list_for_each_entry(oc, &dev_priv->classes, head) {
 636                if (oc->id == class)
 637                        goto found;
 638        }
 639
 640        NV_ERROR(dev, "illegal object class: 0x%x\n", class);
 641        return -EINVAL;
 642
 643found:
 644        switch (oc->engine) {
 645        case NVOBJ_ENGINE_SW:
 646                if (dev_priv->card_type < NV_C0) {
 647                        ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
 648                        if (ret)
 649                                return ret;
 650                        goto insert;
 651                }
 652                break;
 653        case NVOBJ_ENGINE_GR:
 654                if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
 655                    (dev_priv->card_type  < NV_20 && !chan->pgraph_ctx)) {
 656                        struct nouveau_pgraph_engine *pgraph =
 657                                &dev_priv->engine.graph;
 658
 659                        ret = pgraph->create_context(chan);
 660                        if (ret)
 661                                return ret;
 662                }
 663                break;
 664        case NVOBJ_ENGINE_CRYPT:
 665                if (!chan->crypt_ctx) {
 666                        struct nouveau_crypt_engine *pcrypt =
 667                                &dev_priv->engine.crypt;
 668
 669                        ret = pcrypt->create_context(chan);
 670                        if (ret)
 671                                return ret;
 672                }
 673                break;
 674        }
 675
 676        /* we're done if this is fermi */
 677        if (dev_priv->card_type >= NV_C0)
 678                return 0;
 679
 680        ret = nouveau_gpuobj_new(dev, chan,
 681                                 nouveau_gpuobj_class_instmem_size(dev, class),
 682                                 16,
 683                                 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
 684                                 &gpuobj);
 685        if (ret) {
 686                NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
 687                return ret;
 688        }
 689
 690        if (dev_priv->card_type >= NV_50) {
 691                nv_wo32(gpuobj,  0, class);
 692                nv_wo32(gpuobj, 20, 0x00010000);
 693        } else {
 694                switch (class) {
 695                case NV_CLASS_NULL:
 696                        nv_wo32(gpuobj, 0, 0x00001030);
 697                        nv_wo32(gpuobj, 4, 0xFFFFFFFF);
 698                        break;
 699                default:
 700                        if (dev_priv->card_type >= NV_40) {
 701                                nv_wo32(gpuobj, 0, class);
 702#ifdef __BIG_ENDIAN
 703                                nv_wo32(gpuobj, 8, 0x01000000);
 704#endif
 705                        } else {
 706#ifdef __BIG_ENDIAN
 707                                nv_wo32(gpuobj, 0, class | 0x00080000);
 708#else
 709                                nv_wo32(gpuobj, 0, class);
 710#endif
 711                        }
 712                }
 713        }
 714        dev_priv->engine.instmem.flush(dev);
 715
 716        gpuobj->engine = oc->engine;
 717        gpuobj->class  = oc->id;
 718
 719insert:
 720        ret = nouveau_ramht_insert(chan, handle, gpuobj);
 721        if (ret)
 722                NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
 723        nouveau_gpuobj_ref(NULL, &gpuobj);
 724        return ret;
 725}
 726
 727static int
 728nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 729{
 730        struct drm_device *dev = chan->dev;
 731        struct drm_nouveau_private *dev_priv = dev->dev_private;
 732        uint32_t size;
 733        uint32_t base;
 734        int ret;
 735
 736        NV_DEBUG(dev, "ch%d\n", chan->id);
 737
 738        /* Base amount for object storage (4KiB enough?) */
 739        size = 0x2000;
 740        base = 0;
 741
 742        /* PGRAPH context */
 743        size += dev_priv->engine.graph.grctx_size;
 744
 745        if (dev_priv->card_type == NV_50) {
 746                /* Various fixed table thingos */
 747                size += 0x1400; /* mostly unknown stuff */
 748                size += 0x4000; /* vm pd */
 749                base  = 0x6000;
 750                /* RAMHT, not sure about setting size yet, 32KiB to be safe */
 751                size += 0x8000;
 752                /* RAMFC */
 753                size += 0x1000;
 754        }
 755
 756        ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
 757        if (ret) {
 758                NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
 759                return ret;
 760        }
 761
 762        ret = drm_mm_init(&chan->ramin_heap, base, size);
 763        if (ret) {
 764                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
 765                nouveau_gpuobj_ref(NULL, &chan->ramin);
 766                return ret;
 767        }
 768
 769        return 0;
 770}
 771
 772int
 773nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 774                            uint32_t vram_h, uint32_t tt_h)
 775{
 776        struct drm_device *dev = chan->dev;
 777        struct drm_nouveau_private *dev_priv = dev->dev_private;
 778        struct nouveau_gpuobj *vram = NULL, *tt = NULL;
 779        int ret;
 780
 781        NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
 782
 783        if (dev_priv->card_type == NV_C0) {
 784                struct nouveau_vm *vm = dev_priv->chan_vm;
 785                struct nouveau_vm_pgd *vpgd;
 786
 787                ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
 788                                         &chan->ramin);
 789                if (ret)
 790                        return ret;
 791
 792                nouveau_vm_ref(vm, &chan->vm, NULL);
 793
 794                vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
 795                nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
 796                nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
 797                nv_wo32(chan->ramin, 0x0208, 0xffffffff);
 798                nv_wo32(chan->ramin, 0x020c, 0x000000ff);
 799                return 0;
 800        }
 801
 802        /* Allocate a chunk of memory for per-channel object storage */
 803        ret = nouveau_gpuobj_channel_init_pramin(chan);
 804        if (ret) {
 805                NV_ERROR(dev, "init pramin\n");
 806                return ret;
 807        }
 808
 809        /* NV50 VM
 810         *  - Allocate per-channel page-directory
 811         *  - Link with shared channel VM
 812         */
 813        if (dev_priv->chan_vm) {
 814                u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 815                u64 vm_vinst = chan->ramin->vinst + pgd_offs;
 816                u32 vm_pinst = chan->ramin->pinst;
 817
 818                if (vm_pinst != ~0)
 819                        vm_pinst += pgd_offs;
 820
 821                ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
 822                                              0, &chan->vm_pd);
 823                if (ret)
 824                        return ret;
 825
 826                nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
 827        }
 828
 829        /* RAMHT */
 830        if (dev_priv->card_type < NV_50) {
 831                nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
 832        } else {
 833                struct nouveau_gpuobj *ramht = NULL;
 834
 835                ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
 836                                         NVOBJ_FLAG_ZERO_ALLOC, &ramht);
 837                if (ret)
 838                        return ret;
 839
 840                ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
 841                nouveau_gpuobj_ref(NULL, &ramht);
 842                if (ret)
 843                        return ret;
 844        }
 845
 846        /* VRAM ctxdma */
 847        if (dev_priv->card_type >= NV_50) {
 848                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 849                                             0, (1ULL << 40), NV_MEM_ACCESS_RW,
 850                                             NV_MEM_TARGET_VM, &vram);
 851                if (ret) {
 852                        NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 853                        return ret;
 854                }
 855        } else {
 856                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 857                                             0, dev_priv->fb_available_size,
 858                                             NV_MEM_ACCESS_RW,
 859                                             NV_MEM_TARGET_VRAM, &vram);
 860                if (ret) {
 861                        NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 862                        return ret;
 863                }
 864        }
 865
 866        ret = nouveau_ramht_insert(chan, vram_h, vram);
 867        nouveau_gpuobj_ref(NULL, &vram);
 868        if (ret) {
 869                NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
 870                return ret;
 871        }
 872
 873        /* TT memory ctxdma */
 874        if (dev_priv->card_type >= NV_50) {
 875                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 876                                             0, (1ULL << 40), NV_MEM_ACCESS_RW,
 877                                             NV_MEM_TARGET_VM, &tt);
 878        } else {
 879                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 880                                             0, dev_priv->gart_info.aper_size,
 881                                             NV_MEM_ACCESS_RW,
 882                                             NV_MEM_TARGET_GART, &tt);
 883        }
 884
 885        if (ret) {
 886                NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
 887                return ret;
 888        }
 889
 890        ret = nouveau_ramht_insert(chan, tt_h, tt);
 891        nouveau_gpuobj_ref(NULL, &tt);
 892        if (ret) {
 893                NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
 894                return ret;
 895        }
 896
 897        return 0;
 898}
 899
 900void
 901nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 902{
 903        struct drm_device *dev = chan->dev;
 904
 905        NV_DEBUG(dev, "ch%d\n", chan->id);
 906
 907        nouveau_ramht_ref(NULL, &chan->ramht, chan);
 908
 909        nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 910        nouveau_gpuobj_ref(NULL, &chan->vm_pd);
 911
 912        if (chan->ramin_heap.free_stack.next)
 913                drm_mm_takedown(&chan->ramin_heap);
 914        nouveau_gpuobj_ref(NULL, &chan->ramin);
 915}
 916
 917int
 918nouveau_gpuobj_suspend(struct drm_device *dev)
 919{
 920        struct drm_nouveau_private *dev_priv = dev->dev_private;
 921        struct nouveau_gpuobj *gpuobj;
 922        int i;
 923
 924        list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
 925                if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
 926                        continue;
 927
 928                gpuobj->suspend = vmalloc(gpuobj->size);
 929                if (!gpuobj->suspend) {
 930                        nouveau_gpuobj_resume(dev);
 931                        return -ENOMEM;
 932                }
 933
 934                for (i = 0; i < gpuobj->size; i += 4)
 935                        gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
 936        }
 937
 938        return 0;
 939}
 940
 941void
 942nouveau_gpuobj_resume(struct drm_device *dev)
 943{
 944        struct drm_nouveau_private *dev_priv = dev->dev_private;
 945        struct nouveau_gpuobj *gpuobj;
 946        int i;
 947
 948        list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
 949                if (!gpuobj->suspend)
 950                        continue;
 951
 952                for (i = 0; i < gpuobj->size; i += 4)
 953                        nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
 954
 955                vfree(gpuobj->suspend);
 956                gpuobj->suspend = NULL;
 957        }
 958
 959        dev_priv->engine.instmem.flush(dev);
 960}
 961
 962int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
 963                              struct drm_file *file_priv)
 964{
 965        struct drm_nouveau_grobj_alloc *init = data;
 966        struct nouveau_channel *chan;
 967        int ret;
 968
 969        if (init->handle == ~0)
 970                return -EINVAL;
 971
 972        chan = nouveau_channel_get(dev, file_priv, init->channel);
 973        if (IS_ERR(chan))
 974                return PTR_ERR(chan);
 975
 976        if (nouveau_ramht_find(chan, init->handle)) {
 977                ret = -EEXIST;
 978                goto out;
 979        }
 980
 981        ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
 982        if (ret) {
 983                NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
 984                         ret, init->channel, init->handle);
 985        }
 986
 987out:
 988        nouveau_channel_put(&chan);
 989        return ret;
 990}
 991
 992int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
 993                              struct drm_file *file_priv)
 994{
 995        struct drm_nouveau_gpuobj_free *objfree = data;
 996        struct nouveau_channel *chan;
 997        int ret;
 998
 999        chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1000        if (IS_ERR(chan))
1001                return PTR_ERR(chan);
1002
1003        /* Synchronize with the user channel */
1004        nouveau_channel_idle(chan);
1005
1006        ret = nouveau_ramht_remove(chan, objfree->handle);
1007        nouveau_channel_put(&chan);
1008        return ret;
1009}
1010
1011u32
1012nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1013{
1014        struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1015        struct drm_device *dev = gpuobj->dev;
1016
1017        if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1018                u64  ptr = gpuobj->vinst + offset;
1019                u32 base = ptr >> 16;
1020                u32  val;
1021
1022                spin_lock(&dev_priv->ramin_lock);
1023                if (dev_priv->ramin_base != base) {
1024                        dev_priv->ramin_base = base;
1025                        nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1026                }
1027                val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1028                spin_unlock(&dev_priv->ramin_lock);
1029                return val;
1030        }
1031
1032        return nv_ri32(dev, gpuobj->pinst + offset);
1033}
1034
1035void
1036nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1037{
1038        struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1039        struct drm_device *dev = gpuobj->dev;
1040
1041        if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1042                u64  ptr = gpuobj->vinst + offset;
1043                u32 base = ptr >> 16;
1044
1045                spin_lock(&dev_priv->ramin_lock);
1046                if (dev_priv->ramin_base != base) {
1047                        dev_priv->ramin_base = base;
1048                        nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1049                }
1050                nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1051                spin_unlock(&dev_priv->ramin_lock);
1052                return;
1053        }
1054
1055        nv_wi32(dev, gpuobj->pinst + offset, val);
1056}
1057