linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "gf100.h"
  25#include "changf100.h"
  26
  27#include <core/client.h>
  28#include <core/enum.h>
  29#include <core/gpuobj.h>
  30#include <subdev/bar.h>
  31#include <engine/sw.h>
  32
  33#include <nvif/class.h>
  34
  35static void
  36gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  37{
  38        struct nvkm_device *device = fifo->engine.subdev.device;
  39        nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  40}
  41
  42static void
  43gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  44{
  45        struct nvkm_device *device = fifo->engine.subdev.device;
  46        nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  47}
  48
  49void
  50gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
  51{
  52        struct gf100_fifo_chan *chan;
  53        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  54        struct nvkm_device *device = subdev->device;
  55        struct nvkm_memory *cur;
  56        int nr = 0;
  57        int target;
  58
  59        mutex_lock(&subdev->mutex);
  60        cur = fifo->runlist.mem[fifo->runlist.active];
  61        fifo->runlist.active = !fifo->runlist.active;
  62
  63        nvkm_kmap(cur);
  64        list_for_each_entry(chan, &fifo->chan, head) {
  65                nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  66                nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  67                nr++;
  68        }
  69        nvkm_done(cur);
  70
  71        target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
  72
  73        nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
  74                                    (target << 28));
  75        nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  76
  77        if (wait_event_timeout(fifo->runlist.wait,
  78                               !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  79                               msecs_to_jiffies(2000)) == 0)
  80                nvkm_error(subdev, "runlist update timeout\n");
  81        mutex_unlock(&subdev->mutex);
  82}
  83
  84void
  85gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  86{
  87        mutex_lock(&fifo->base.engine.subdev.mutex);
  88        list_del_init(&chan->head);
  89        mutex_unlock(&fifo->base.engine.subdev.mutex);
  90}
  91
  92void
  93gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  94{
  95        mutex_lock(&fifo->base.engine.subdev.mutex);
  96        list_add_tail(&chan->head, &fifo->chan);
  97        mutex_unlock(&fifo->base.engine.subdev.mutex);
  98}
  99
 100static inline int
 101gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
 102{
 103        switch (engn) {
 104        case NVKM_ENGINE_GR    : engn = 0; break;
 105        case NVKM_ENGINE_MSVLD : engn = 1; break;
 106        case NVKM_ENGINE_MSPPP : engn = 2; break;
 107        case NVKM_ENGINE_MSPDEC: engn = 3; break;
 108        case NVKM_ENGINE_CE0   : engn = 4; break;
 109        case NVKM_ENGINE_CE1   : engn = 5; break;
 110        default:
 111                return -1;
 112        }
 113
 114        return engn;
 115}
 116
 117static inline struct nvkm_engine *
 118gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
 119{
 120        struct nvkm_device *device = fifo->base.engine.subdev.device;
 121
 122        switch (engn) {
 123        case 0: engn = NVKM_ENGINE_GR; break;
 124        case 1: engn = NVKM_ENGINE_MSVLD; break;
 125        case 2: engn = NVKM_ENGINE_MSPPP; break;
 126        case 3: engn = NVKM_ENGINE_MSPDEC; break;
 127        case 4: engn = NVKM_ENGINE_CE0; break;
 128        case 5: engn = NVKM_ENGINE_CE1; break;
 129        default:
 130                return NULL;
 131        }
 132
 133        return nvkm_device_engine(device, engn);
 134}
 135
 136static void
 137gf100_fifo_recover_work(struct work_struct *w)
 138{
 139        struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 140        struct nvkm_device *device = fifo->base.engine.subdev.device;
 141        struct nvkm_engine *engine;
 142        unsigned long flags;
 143        u32 engn, engm = 0;
 144        u64 mask, todo;
 145
 146        spin_lock_irqsave(&fifo->base.lock, flags);
 147        mask = fifo->recover.mask;
 148        fifo->recover.mask = 0ULL;
 149        spin_unlock_irqrestore(&fifo->base.lock, flags);
 150
 151        for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
 152                engm |= 1 << gf100_fifo_engidx(fifo, engn);
 153        nvkm_mask(device, 0x002630, engm, engm);
 154
 155        for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
 156                if ((engine = nvkm_device_engine(device, engn))) {
 157                        nvkm_subdev_fini(&engine->subdev, false);
 158                        WARN_ON(nvkm_subdev_init(&engine->subdev));
 159                }
 160        }
 161
 162        gf100_fifo_runlist_commit(fifo);
 163        nvkm_wr32(device, 0x00262c, engm);
 164        nvkm_mask(device, 0x002630, engm, 0x00000000);
 165}
 166
 167static void
 168gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 169                   struct gf100_fifo_chan *chan)
 170{
 171        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 172        struct nvkm_device *device = subdev->device;
 173        u32 chid = chan->base.chid;
 174
 175        nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
 176                   nvkm_subdev_name[engine->subdev.index], chid);
 177        assert_spin_locked(&fifo->base.lock);
 178
 179        nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
 180        list_del_init(&chan->head);
 181        chan->killed = true;
 182
 183        fifo->recover.mask |= 1ULL << engine->subdev.index;
 184        schedule_work(&fifo->recover.work);
 185}
 186
 187static const struct nvkm_enum
 188gf100_fifo_sched_reason[] = {
 189        { 0x0a, "CTXSW_TIMEOUT" },
 190        {}
 191};
 192
 193static void
 194gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 195{
 196        struct nvkm_device *device = fifo->base.engine.subdev.device;
 197        struct nvkm_engine *engine;
 198        struct gf100_fifo_chan *chan;
 199        unsigned long flags;
 200        u32 engn;
 201
 202        spin_lock_irqsave(&fifo->base.lock, flags);
 203        for (engn = 0; engn < 6; engn++) {
 204                u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 205                u32 busy = (stat & 0x80000000);
 206                u32 save = (stat & 0x00100000); /* maybe? */
 207                u32 unk0 = (stat & 0x00040000);
 208                u32 unk1 = (stat & 0x00001000);
 209                u32 chid = (stat & 0x0000007f);
 210                (void)save;
 211
 212                if (busy && unk0 && unk1) {
 213                        list_for_each_entry(chan, &fifo->chan, head) {
 214                                if (chan->base.chid == chid) {
 215                                        engine = gf100_fifo_engine(fifo, engn);
 216                                        if (!engine)
 217                                                break;
 218                                        gf100_fifo_recover(fifo, engine, chan);
 219                                        break;
 220                                }
 221                        }
 222                }
 223        }
 224        spin_unlock_irqrestore(&fifo->base.lock, flags);
 225}
 226
 227static void
 228gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 229{
 230        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 231        struct nvkm_device *device = subdev->device;
 232        u32 intr = nvkm_rd32(device, 0x00254c);
 233        u32 code = intr & 0x000000ff;
 234        const struct nvkm_enum *en;
 235
 236        en = nvkm_enum_find(gf100_fifo_sched_reason, code);
 237
 238        nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 239
 240        switch (code) {
 241        case 0x0a:
 242                gf100_fifo_intr_sched_ctxsw(fifo);
 243                break;
 244        default:
 245                break;
 246        }
 247}
 248
 249static const struct nvkm_enum
 250gf100_fifo_fault_engine[] = {
 251        { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
 252        { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
 253        { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
 254        { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
 255        { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
 256        { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
 257        { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 258        { 0x13, "PCOUNTER" },
 259        { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
 260        { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
 261        { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
 262        { 0x17, "PMU" },
 263        {}
 264};
 265
 266static const struct nvkm_enum
 267gf100_fifo_fault_reason[] = {
 268        { 0x00, "PT_NOT_PRESENT" },
 269        { 0x01, "PT_TOO_SHORT" },
 270        { 0x02, "PAGE_NOT_PRESENT" },
 271        { 0x03, "VM_LIMIT_EXCEEDED" },
 272        { 0x04, "NO_CHANNEL" },
 273        { 0x05, "PAGE_SYSTEM_ONLY" },
 274        { 0x06, "PAGE_READ_ONLY" },
 275        { 0x0a, "COMPRESSED_SYSRAM" },
 276        { 0x0c, "INVALID_STORAGE_TYPE" },
 277        {}
 278};
 279
 280static const struct nvkm_enum
 281gf100_fifo_fault_hubclient[] = {
 282        { 0x01, "PCOPY0" },
 283        { 0x02, "PCOPY1" },
 284        { 0x04, "DISPATCH" },
 285        { 0x05, "CTXCTL" },
 286        { 0x06, "PFIFO" },
 287        { 0x07, "BAR_READ" },
 288        { 0x08, "BAR_WRITE" },
 289        { 0x0b, "PVP" },
 290        { 0x0c, "PMSPPP" },
 291        { 0x0d, "PMSVLD" },
 292        { 0x11, "PCOUNTER" },
 293        { 0x12, "PMU" },
 294        { 0x14, "CCACHE" },
 295        { 0x15, "CCACHE_POST" },
 296        {}
 297};
 298
 299static const struct nvkm_enum
 300gf100_fifo_fault_gpcclient[] = {
 301        { 0x01, "TEX" },
 302        { 0x0c, "ESETUP" },
 303        { 0x0e, "CTXCTL" },
 304        { 0x0f, "PROP" },
 305        {}
 306};
 307
 308static void
 309gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 310{
 311        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 312        struct nvkm_device *device = subdev->device;
 313        u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
 314        u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
 315        u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
 316        u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 317        u32 gpc    = (stat & 0x1f000000) >> 24;
 318        u32 client = (stat & 0x00001f00) >> 8;
 319        u32 write  = (stat & 0x00000080);
 320        u32 hub    = (stat & 0x00000040);
 321        u32 reason = (stat & 0x0000000f);
 322        const struct nvkm_enum *er, *eu, *ec;
 323        struct nvkm_engine *engine = NULL;
 324        struct nvkm_fifo_chan *chan;
 325        unsigned long flags;
 326        char gpcid[8] = "";
 327
 328        er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
 329        eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
 330        if (hub) {
 331                ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
 332        } else {
 333                ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
 334                snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 335        }
 336
 337        if (eu && eu->data2) {
 338                switch (eu->data2) {
 339                case NVKM_SUBDEV_BAR:
 340                        nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
 341                        break;
 342                case NVKM_SUBDEV_INSTMEM:
 343                        nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
 344                        break;
 345                case NVKM_ENGINE_IFB:
 346                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 347                        break;
 348                default:
 349                        engine = nvkm_device_engine(device, eu->data2);
 350                        break;
 351                }
 352        }
 353
 354        chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 355
 356        nvkm_error(subdev,
 357                   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
 358                   "reason %02x [%s] on channel %d [%010llx %s]\n",
 359                   write ? "write" : "read", (u64)vahi << 32 | valo,
 360                   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
 361                   reason, er ? er->name : "", chan ? chan->chid : -1,
 362                   (u64)inst << 12,
 363                   chan ? chan->object.client->name : "unknown");
 364
 365        if (engine && chan)
 366                gf100_fifo_recover(fifo, engine, (void *)chan);
 367        nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 368}
 369
 370static const struct nvkm_bitfield
 371gf100_fifo_pbdma_intr[] = {
 372/*      { 0x00008000, "" }      seen with null ib push */
 373        { 0x00200000, "ILLEGAL_MTHD" },
 374        { 0x00800000, "EMPTY_SUBC" },
 375        {}
 376};
 377
 378static void
 379gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 380{
 381        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 382        struct nvkm_device *device = subdev->device;
 383        u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
 384        u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
 385        u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
 386        u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 387        u32 subc = (addr & 0x00070000) >> 16;
 388        u32 mthd = (addr & 0x00003ffc);
 389        struct nvkm_fifo_chan *chan;
 390        unsigned long flags;
 391        u32 show= stat;
 392        char msg[128];
 393
 394        if (stat & 0x00800000) {
 395                if (device->sw) {
 396                        if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
 397                                show &= ~0x00800000;
 398                }
 399        }
 400
 401        if (show) {
 402                nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
 403                chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
 404                nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
 405                                   "subc %d mthd %04x data %08x\n",
 406                           unit, show, msg, chid, chan ? chan->inst->addr : 0,
 407                           chan ? chan->object.client->name : "unknown",
 408                           subc, mthd, data);
 409                nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 410        }
 411
 412        nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 413        nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 414}
 415
 416static void
 417gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 418{
 419        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 420        struct nvkm_device *device = subdev->device;
 421        u32 intr = nvkm_rd32(device, 0x002a00);
 422
 423        if (intr & 0x10000000) {
 424                wake_up(&fifo->runlist.wait);
 425                nvkm_wr32(device, 0x002a00, 0x10000000);
 426                intr &= ~0x10000000;
 427        }
 428
 429        if (intr) {
 430                nvkm_error(subdev, "RUNLIST %08x\n", intr);
 431                nvkm_wr32(device, 0x002a00, intr);
 432        }
 433}
 434
 435static void
 436gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 437{
 438        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 439        struct nvkm_device *device = subdev->device;
 440        u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
 441        u32 inte = nvkm_rd32(device, 0x002628);
 442        u32 unkn;
 443
 444        nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 445
 446        for (unkn = 0; unkn < 8; unkn++) {
 447                u32 ints = (intr >> (unkn * 0x04)) & inte;
 448                if (ints & 0x1) {
 449                        nvkm_fifo_uevent(&fifo->base);
 450                        ints &= ~1;
 451                }
 452                if (ints) {
 453                        nvkm_error(subdev, "ENGINE %d %d %01x",
 454                                   engn, unkn, ints);
 455                        nvkm_mask(device, 0x002628, ints, 0);
 456                }
 457        }
 458}
 459
 460void
 461gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 462{
 463        struct nvkm_device *device = fifo->base.engine.subdev.device;
 464        u32 mask = nvkm_rd32(device, 0x0025a4);
 465        while (mask) {
 466                u32 unit = __ffs(mask);
 467                gf100_fifo_intr_engine_unit(fifo, unit);
 468                mask &= ~(1 << unit);
 469        }
 470}
 471
 472static void
 473gf100_fifo_intr(struct nvkm_fifo *base)
 474{
 475        struct gf100_fifo *fifo = gf100_fifo(base);
 476        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 477        struct nvkm_device *device = subdev->device;
 478        u32 mask = nvkm_rd32(device, 0x002140);
 479        u32 stat = nvkm_rd32(device, 0x002100) & mask;
 480
 481        if (stat & 0x00000001) {
 482                u32 intr = nvkm_rd32(device, 0x00252c);
 483                nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
 484                nvkm_wr32(device, 0x002100, 0x00000001);
 485                stat &= ~0x00000001;
 486        }
 487
 488        if (stat & 0x00000100) {
 489                gf100_fifo_intr_sched(fifo);
 490                nvkm_wr32(device, 0x002100, 0x00000100);
 491                stat &= ~0x00000100;
 492        }
 493
 494        if (stat & 0x00010000) {
 495                u32 intr = nvkm_rd32(device, 0x00256c);
 496                nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
 497                nvkm_wr32(device, 0x002100, 0x00010000);
 498                stat &= ~0x00010000;
 499        }
 500
 501        if (stat & 0x01000000) {
 502                u32 intr = nvkm_rd32(device, 0x00258c);
 503                nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
 504                nvkm_wr32(device, 0x002100, 0x01000000);
 505                stat &= ~0x01000000;
 506        }
 507
 508        if (stat & 0x10000000) {
 509                u32 mask = nvkm_rd32(device, 0x00259c);
 510                while (mask) {
 511                        u32 unit = __ffs(mask);
 512                        gf100_fifo_intr_fault(fifo, unit);
 513                        nvkm_wr32(device, 0x00259c, (1 << unit));
 514                        mask &= ~(1 << unit);
 515                }
 516                stat &= ~0x10000000;
 517        }
 518
 519        if (stat & 0x20000000) {
 520                u32 mask = nvkm_rd32(device, 0x0025a0);
 521                while (mask) {
 522                        u32 unit = __ffs(mask);
 523                        gf100_fifo_intr_pbdma(fifo, unit);
 524                        nvkm_wr32(device, 0x0025a0, (1 << unit));
 525                        mask &= ~(1 << unit);
 526                }
 527                stat &= ~0x20000000;
 528        }
 529
 530        if (stat & 0x40000000) {
 531                gf100_fifo_intr_runlist(fifo);
 532                stat &= ~0x40000000;
 533        }
 534
 535        if (stat & 0x80000000) {
 536                gf100_fifo_intr_engine(fifo);
 537                stat &= ~0x80000000;
 538        }
 539
 540        if (stat) {
 541                nvkm_error(subdev, "INTR %08x\n", stat);
 542                nvkm_mask(device, 0x002140, stat, 0x00000000);
 543                nvkm_wr32(device, 0x002100, stat);
 544        }
 545}
 546
 547static int
 548gf100_fifo_oneinit(struct nvkm_fifo *base)
 549{
 550        struct gf100_fifo *fifo = gf100_fifo(base);
 551        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 552        struct nvkm_device *device = subdev->device;
 553        int ret;
 554
 555        /* Determine number of PBDMAs by checking valid enable bits. */
 556        nvkm_wr32(device, 0x002204, 0xffffffff);
 557        fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
 558        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 559
 560
 561        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 562                              false, &fifo->runlist.mem[0]);
 563        if (ret)
 564                return ret;
 565
 566        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 567                              false, &fifo->runlist.mem[1]);
 568        if (ret)
 569                return ret;
 570
 571        init_waitqueue_head(&fifo->runlist.wait);
 572
 573        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
 574                              0x1000, false, &fifo->user.mem);
 575        if (ret)
 576                return ret;
 577
 578        ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
 579        if (ret)
 580                return ret;
 581
 582        nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 583        return 0;
 584}
 585
 586static void
 587gf100_fifo_fini(struct nvkm_fifo *base)
 588{
 589        struct gf100_fifo *fifo = gf100_fifo(base);
 590        flush_work(&fifo->recover.work);
 591}
 592
 593static void
 594gf100_fifo_init(struct nvkm_fifo *base)
 595{
 596        struct gf100_fifo *fifo = gf100_fifo(base);
 597        struct nvkm_device *device = fifo->base.engine.subdev.device;
 598        int i;
 599
 600        /* Enable PBDMAs. */
 601        nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
 602        nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
 603
 604        /* Assign engines to PBDMAs. */
 605        if (fifo->pbdma_nr >= 3) {
 606                nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
 607                nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
 608                nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
 609                nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
 610                nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
 611                nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 612        }
 613
 614        /* PBDMA[n] */
 615        for (i = 0; i < fifo->pbdma_nr; i++) {
 616                nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 617                nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 618                nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 619        }
 620
 621        nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
 622        nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 623
 624        nvkm_wr32(device, 0x002100, 0xffffffff);
 625        nvkm_wr32(device, 0x002140, 0x7fffffff);
 626        nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
 627}
 628
 629static void *
 630gf100_fifo_dtor(struct nvkm_fifo *base)
 631{
 632        struct gf100_fifo *fifo = gf100_fifo(base);
 633        nvkm_vm_put(&fifo->user.bar);
 634        nvkm_memory_del(&fifo->user.mem);
 635        nvkm_memory_del(&fifo->runlist.mem[0]);
 636        nvkm_memory_del(&fifo->runlist.mem[1]);
 637        return fifo;
 638}
 639
 640static const struct nvkm_fifo_func
 641gf100_fifo = {
 642        .dtor = gf100_fifo_dtor,
 643        .oneinit = gf100_fifo_oneinit,
 644        .init = gf100_fifo_init,
 645        .fini = gf100_fifo_fini,
 646        .intr = gf100_fifo_intr,
 647        .uevent_init = gf100_fifo_uevent_init,
 648        .uevent_fini = gf100_fifo_uevent_fini,
 649        .chan = {
 650                &gf100_fifo_gpfifo_oclass,
 651                NULL
 652        },
 653};
 654
 655int
 656gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 657{
 658        struct gf100_fifo *fifo;
 659
 660        if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 661                return -ENOMEM;
 662        INIT_LIST_HEAD(&fifo->chan);
 663        INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
 664        *pfifo = &fifo->base;
 665
 666        return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
 667}
 668