linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "gf100.h"
  25#include "changf100.h"
  26
  27#include <core/client.h>
  28#include <core/enum.h>
  29#include <core/gpuobj.h>
  30#include <subdev/bar.h>
  31#include <engine/sw.h>
  32
  33#include <nvif/class.h>
  34
  35static void
  36gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  37{
  38        struct nvkm_device *device = fifo->engine.subdev.device;
  39        nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  40}
  41
  42static void
  43gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  44{
  45        struct nvkm_device *device = fifo->engine.subdev.device;
  46        nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  47}
  48
  49void
  50gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
  51{
  52        struct gf100_fifo_chan *chan;
  53        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  54        struct nvkm_device *device = subdev->device;
  55        struct nvkm_memory *cur;
  56        int nr = 0;
  57        int target;
  58
  59        mutex_lock(&subdev->mutex);
  60        cur = fifo->runlist.mem[fifo->runlist.active];
  61        fifo->runlist.active = !fifo->runlist.active;
  62
  63        nvkm_kmap(cur);
  64        list_for_each_entry(chan, &fifo->chan, head) {
  65                nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  66                nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  67                nr++;
  68        }
  69        nvkm_done(cur);
  70
  71        switch (nvkm_memory_target(cur)) {
  72        case NVKM_MEM_TARGET_VRAM: target = 0; break;
  73        case NVKM_MEM_TARGET_NCOH: target = 3; break;
  74        default:
  75                mutex_unlock(&subdev->mutex);
  76                WARN_ON(1);
  77                return;
  78        }
  79
  80        nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
  81                                    (target << 28));
  82        nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  83
  84        if (wait_event_timeout(fifo->runlist.wait,
  85                               !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  86                               msecs_to_jiffies(2000)) == 0)
  87                nvkm_error(subdev, "runlist update timeout\n");
  88        mutex_unlock(&subdev->mutex);
  89}
  90
  91void
  92gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  93{
  94        mutex_lock(&fifo->base.engine.subdev.mutex);
  95        list_del_init(&chan->head);
  96        mutex_unlock(&fifo->base.engine.subdev.mutex);
  97}
  98
  99void
 100gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
 101{
 102        mutex_lock(&fifo->base.engine.subdev.mutex);
 103        list_add_tail(&chan->head, &fifo->chan);
 104        mutex_unlock(&fifo->base.engine.subdev.mutex);
 105}
 106
 107static inline int
 108gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
 109{
 110        switch (engn) {
 111        case NVKM_ENGINE_GR    : engn = 0; break;
 112        case NVKM_ENGINE_MSVLD : engn = 1; break;
 113        case NVKM_ENGINE_MSPPP : engn = 2; break;
 114        case NVKM_ENGINE_MSPDEC: engn = 3; break;
 115        case NVKM_ENGINE_CE0   : engn = 4; break;
 116        case NVKM_ENGINE_CE1   : engn = 5; break;
 117        default:
 118                return -1;
 119        }
 120
 121        return engn;
 122}
 123
 124static inline struct nvkm_engine *
 125gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
 126{
 127        struct nvkm_device *device = fifo->base.engine.subdev.device;
 128
 129        switch (engn) {
 130        case 0: engn = NVKM_ENGINE_GR; break;
 131        case 1: engn = NVKM_ENGINE_MSVLD; break;
 132        case 2: engn = NVKM_ENGINE_MSPPP; break;
 133        case 3: engn = NVKM_ENGINE_MSPDEC; break;
 134        case 4: engn = NVKM_ENGINE_CE0; break;
 135        case 5: engn = NVKM_ENGINE_CE1; break;
 136        default:
 137                return NULL;
 138        }
 139
 140        return nvkm_device_engine(device, engn);
 141}
 142
 143static void
 144gf100_fifo_recover_work(struct work_struct *w)
 145{
 146        struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 147        struct nvkm_device *device = fifo->base.engine.subdev.device;
 148        struct nvkm_engine *engine;
 149        unsigned long flags;
 150        u32 engn, engm = 0;
 151        u64 mask, todo;
 152
 153        spin_lock_irqsave(&fifo->base.lock, flags);
 154        mask = fifo->recover.mask;
 155        fifo->recover.mask = 0ULL;
 156        spin_unlock_irqrestore(&fifo->base.lock, flags);
 157
 158        for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
 159                engm |= 1 << gf100_fifo_engidx(fifo, engn);
 160        nvkm_mask(device, 0x002630, engm, engm);
 161
 162        for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
 163                if ((engine = nvkm_device_engine(device, engn))) {
 164                        nvkm_subdev_fini(&engine->subdev, false);
 165                        WARN_ON(nvkm_subdev_init(&engine->subdev));
 166                }
 167        }
 168
 169        gf100_fifo_runlist_commit(fifo);
 170        nvkm_wr32(device, 0x00262c, engm);
 171        nvkm_mask(device, 0x002630, engm, 0x00000000);
 172}
 173
 174static void
 175gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 176                   struct gf100_fifo_chan *chan)
 177{
 178        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 179        struct nvkm_device *device = subdev->device;
 180        u32 chid = chan->base.chid;
 181
 182        nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
 183                   nvkm_subdev_name[engine->subdev.index], chid);
 184        assert_spin_locked(&fifo->base.lock);
 185
 186        nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
 187        list_del_init(&chan->head);
 188        chan->killed = true;
 189
 190        if (engine != &fifo->base.engine)
 191                fifo->recover.mask |= 1ULL << engine->subdev.index;
 192        schedule_work(&fifo->recover.work);
 193        nvkm_fifo_kevent(&fifo->base, chid);
 194}
 195
 196static const struct nvkm_enum
 197gf100_fifo_sched_reason[] = {
 198        { 0x0a, "CTXSW_TIMEOUT" },
 199        {}
 200};
 201
 202static void
 203gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 204{
 205        struct nvkm_device *device = fifo->base.engine.subdev.device;
 206        struct nvkm_engine *engine;
 207        struct gf100_fifo_chan *chan;
 208        unsigned long flags;
 209        u32 engn;
 210
 211        spin_lock_irqsave(&fifo->base.lock, flags);
 212        for (engn = 0; engn < 6; engn++) {
 213                u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 214                u32 busy = (stat & 0x80000000);
 215                u32 save = (stat & 0x00100000); /* maybe? */
 216                u32 unk0 = (stat & 0x00040000);
 217                u32 unk1 = (stat & 0x00001000);
 218                u32 chid = (stat & 0x0000007f);
 219                (void)save;
 220
 221                if (busy && unk0 && unk1) {
 222                        list_for_each_entry(chan, &fifo->chan, head) {
 223                                if (chan->base.chid == chid) {
 224                                        engine = gf100_fifo_engine(fifo, engn);
 225                                        if (!engine)
 226                                                break;
 227                                        gf100_fifo_recover(fifo, engine, chan);
 228                                        break;
 229                                }
 230                        }
 231                }
 232        }
 233        spin_unlock_irqrestore(&fifo->base.lock, flags);
 234}
 235
 236static void
 237gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 238{
 239        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 240        struct nvkm_device *device = subdev->device;
 241        u32 intr = nvkm_rd32(device, 0x00254c);
 242        u32 code = intr & 0x000000ff;
 243        const struct nvkm_enum *en;
 244
 245        en = nvkm_enum_find(gf100_fifo_sched_reason, code);
 246
 247        nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 248
 249        switch (code) {
 250        case 0x0a:
 251                gf100_fifo_intr_sched_ctxsw(fifo);
 252                break;
 253        default:
 254                break;
 255        }
 256}
 257
 258static const struct nvkm_enum
 259gf100_fifo_fault_engine[] = {
 260        { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
 261        { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
 262        { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
 263        { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
 264        { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
 265        { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
 266        { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 267        { 0x13, "PCOUNTER" },
 268        { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
 269        { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
 270        { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
 271        { 0x17, "PMU" },
 272        {}
 273};
 274
 275static const struct nvkm_enum
 276gf100_fifo_fault_reason[] = {
 277        { 0x00, "PT_NOT_PRESENT" },
 278        { 0x01, "PT_TOO_SHORT" },
 279        { 0x02, "PAGE_NOT_PRESENT" },
 280        { 0x03, "VM_LIMIT_EXCEEDED" },
 281        { 0x04, "NO_CHANNEL" },
 282        { 0x05, "PAGE_SYSTEM_ONLY" },
 283        { 0x06, "PAGE_READ_ONLY" },
 284        { 0x0a, "COMPRESSED_SYSRAM" },
 285        { 0x0c, "INVALID_STORAGE_TYPE" },
 286        {}
 287};
 288
 289static const struct nvkm_enum
 290gf100_fifo_fault_hubclient[] = {
 291        { 0x01, "PCOPY0" },
 292        { 0x02, "PCOPY1" },
 293        { 0x04, "DISPATCH" },
 294        { 0x05, "CTXCTL" },
 295        { 0x06, "PFIFO" },
 296        { 0x07, "BAR_READ" },
 297        { 0x08, "BAR_WRITE" },
 298        { 0x0b, "PVP" },
 299        { 0x0c, "PMSPPP" },
 300        { 0x0d, "PMSVLD" },
 301        { 0x11, "PCOUNTER" },
 302        { 0x12, "PMU" },
 303        { 0x14, "CCACHE" },
 304        { 0x15, "CCACHE_POST" },
 305        {}
 306};
 307
 308static const struct nvkm_enum
 309gf100_fifo_fault_gpcclient[] = {
 310        { 0x01, "TEX" },
 311        { 0x0c, "ESETUP" },
 312        { 0x0e, "CTXCTL" },
 313        { 0x0f, "PROP" },
 314        {}
 315};
 316
 317static void
 318gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 319{
 320        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 321        struct nvkm_device *device = subdev->device;
 322        u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
 323        u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
 324        u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
 325        u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 326        u32 gpc    = (stat & 0x1f000000) >> 24;
 327        u32 client = (stat & 0x00001f00) >> 8;
 328        u32 write  = (stat & 0x00000080);
 329        u32 hub    = (stat & 0x00000040);
 330        u32 reason = (stat & 0x0000000f);
 331        const struct nvkm_enum *er, *eu, *ec;
 332        struct nvkm_engine *engine = NULL;
 333        struct nvkm_fifo_chan *chan;
 334        unsigned long flags;
 335        char gpcid[8] = "";
 336
 337        er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
 338        eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
 339        if (hub) {
 340                ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
 341        } else {
 342                ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
 343                snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 344        }
 345
 346        if (eu && eu->data2) {
 347                switch (eu->data2) {
 348                case NVKM_SUBDEV_BAR:
 349                        nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
 350                        break;
 351                case NVKM_SUBDEV_INSTMEM:
 352                        nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
 353                        break;
 354                case NVKM_ENGINE_IFB:
 355                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 356                        break;
 357                default:
 358                        engine = nvkm_device_engine(device, eu->data2);
 359                        break;
 360                }
 361        }
 362
 363        chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 364
 365        nvkm_error(subdev,
 366                   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
 367                   "reason %02x [%s] on channel %d [%010llx %s]\n",
 368                   write ? "write" : "read", (u64)vahi << 32 | valo,
 369                   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
 370                   reason, er ? er->name : "", chan ? chan->chid : -1,
 371                   (u64)inst << 12,
 372                   chan ? chan->object.client->name : "unknown");
 373
 374        if (engine && chan)
 375                gf100_fifo_recover(fifo, engine, (void *)chan);
 376        nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 377}
 378
 379static const struct nvkm_bitfield
 380gf100_fifo_pbdma_intr[] = {
 381/*      { 0x00008000, "" }      seen with null ib push */
 382        { 0x00200000, "ILLEGAL_MTHD" },
 383        { 0x00800000, "EMPTY_SUBC" },
 384        {}
 385};
 386
 387static void
 388gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 389{
 390        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 391        struct nvkm_device *device = subdev->device;
 392        u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
 393        u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
 394        u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
 395        u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 396        u32 subc = (addr & 0x00070000) >> 16;
 397        u32 mthd = (addr & 0x00003ffc);
 398        struct nvkm_fifo_chan *chan;
 399        unsigned long flags;
 400        u32 show= stat;
 401        char msg[128];
 402
 403        if (stat & 0x00800000) {
 404                if (device->sw) {
 405                        if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
 406                                show &= ~0x00800000;
 407                }
 408        }
 409
 410        if (show) {
 411                nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
 412                chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
 413                nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
 414                                   "subc %d mthd %04x data %08x\n",
 415                           unit, show, msg, chid, chan ? chan->inst->addr : 0,
 416                           chan ? chan->object.client->name : "unknown",
 417                           subc, mthd, data);
 418                nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 419        }
 420
 421        nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 422        nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 423}
 424
 425static void
 426gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 427{
 428        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 429        struct nvkm_device *device = subdev->device;
 430        u32 intr = nvkm_rd32(device, 0x002a00);
 431
 432        if (intr & 0x10000000) {
 433                wake_up(&fifo->runlist.wait);
 434                nvkm_wr32(device, 0x002a00, 0x10000000);
 435                intr &= ~0x10000000;
 436        }
 437
 438        if (intr) {
 439                nvkm_error(subdev, "RUNLIST %08x\n", intr);
 440                nvkm_wr32(device, 0x002a00, intr);
 441        }
 442}
 443
 444static void
 445gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 446{
 447        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 448        struct nvkm_device *device = subdev->device;
 449        u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
 450        u32 inte = nvkm_rd32(device, 0x002628);
 451        u32 unkn;
 452
 453        nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 454
 455        for (unkn = 0; unkn < 8; unkn++) {
 456                u32 ints = (intr >> (unkn * 0x04)) & inte;
 457                if (ints & 0x1) {
 458                        nvkm_fifo_uevent(&fifo->base);
 459                        ints &= ~1;
 460                }
 461                if (ints) {
 462                        nvkm_error(subdev, "ENGINE %d %d %01x",
 463                                   engn, unkn, ints);
 464                        nvkm_mask(device, 0x002628, ints, 0);
 465                }
 466        }
 467}
 468
 469void
 470gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 471{
 472        struct nvkm_device *device = fifo->base.engine.subdev.device;
 473        u32 mask = nvkm_rd32(device, 0x0025a4);
 474        while (mask) {
 475                u32 unit = __ffs(mask);
 476                gf100_fifo_intr_engine_unit(fifo, unit);
 477                mask &= ~(1 << unit);
 478        }
 479}
 480
 481static void
 482gf100_fifo_intr(struct nvkm_fifo *base)
 483{
 484        struct gf100_fifo *fifo = gf100_fifo(base);
 485        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 486        struct nvkm_device *device = subdev->device;
 487        u32 mask = nvkm_rd32(device, 0x002140);
 488        u32 stat = nvkm_rd32(device, 0x002100) & mask;
 489
 490        if (stat & 0x00000001) {
 491                u32 intr = nvkm_rd32(device, 0x00252c);
 492                nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
 493                nvkm_wr32(device, 0x002100, 0x00000001);
 494                stat &= ~0x00000001;
 495        }
 496
 497        if (stat & 0x00000100) {
 498                gf100_fifo_intr_sched(fifo);
 499                nvkm_wr32(device, 0x002100, 0x00000100);
 500                stat &= ~0x00000100;
 501        }
 502
 503        if (stat & 0x00010000) {
 504                u32 intr = nvkm_rd32(device, 0x00256c);
 505                nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
 506                nvkm_wr32(device, 0x002100, 0x00010000);
 507                stat &= ~0x00010000;
 508        }
 509
 510        if (stat & 0x01000000) {
 511                u32 intr = nvkm_rd32(device, 0x00258c);
 512                nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
 513                nvkm_wr32(device, 0x002100, 0x01000000);
 514                stat &= ~0x01000000;
 515        }
 516
 517        if (stat & 0x10000000) {
 518                u32 mask = nvkm_rd32(device, 0x00259c);
 519                while (mask) {
 520                        u32 unit = __ffs(mask);
 521                        gf100_fifo_intr_fault(fifo, unit);
 522                        nvkm_wr32(device, 0x00259c, (1 << unit));
 523                        mask &= ~(1 << unit);
 524                }
 525                stat &= ~0x10000000;
 526        }
 527
 528        if (stat & 0x20000000) {
 529                u32 mask = nvkm_rd32(device, 0x0025a0);
 530                while (mask) {
 531                        u32 unit = __ffs(mask);
 532                        gf100_fifo_intr_pbdma(fifo, unit);
 533                        nvkm_wr32(device, 0x0025a0, (1 << unit));
 534                        mask &= ~(1 << unit);
 535                }
 536                stat &= ~0x20000000;
 537        }
 538
 539        if (stat & 0x40000000) {
 540                gf100_fifo_intr_runlist(fifo);
 541                stat &= ~0x40000000;
 542        }
 543
 544        if (stat & 0x80000000) {
 545                gf100_fifo_intr_engine(fifo);
 546                stat &= ~0x80000000;
 547        }
 548
 549        if (stat) {
 550                nvkm_error(subdev, "INTR %08x\n", stat);
 551                nvkm_mask(device, 0x002140, stat, 0x00000000);
 552                nvkm_wr32(device, 0x002100, stat);
 553        }
 554}
 555
 556static int
 557gf100_fifo_oneinit(struct nvkm_fifo *base)
 558{
 559        struct gf100_fifo *fifo = gf100_fifo(base);
 560        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 561        struct nvkm_device *device = subdev->device;
 562        int ret;
 563
 564        /* Determine number of PBDMAs by checking valid enable bits. */
 565        nvkm_wr32(device, 0x002204, 0xffffffff);
 566        fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
 567        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 568
 569
 570        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 571                              false, &fifo->runlist.mem[0]);
 572        if (ret)
 573                return ret;
 574
 575        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 576                              false, &fifo->runlist.mem[1]);
 577        if (ret)
 578                return ret;
 579
 580        init_waitqueue_head(&fifo->runlist.wait);
 581
 582        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
 583                              0x1000, false, &fifo->user.mem);
 584        if (ret)
 585                return ret;
 586
 587        ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
 588        if (ret)
 589                return ret;
 590
 591        nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 592        return 0;
 593}
 594
 595static void
 596gf100_fifo_fini(struct nvkm_fifo *base)
 597{
 598        struct gf100_fifo *fifo = gf100_fifo(base);
 599        flush_work(&fifo->recover.work);
 600}
 601
 602static void
 603gf100_fifo_init(struct nvkm_fifo *base)
 604{
 605        struct gf100_fifo *fifo = gf100_fifo(base);
 606        struct nvkm_device *device = fifo->base.engine.subdev.device;
 607        int i;
 608
 609        /* Enable PBDMAs. */
 610        nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
 611        nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
 612
 613        /* Assign engines to PBDMAs. */
 614        if (fifo->pbdma_nr >= 3) {
 615                nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
 616                nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
 617                nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
 618                nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
 619                nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
 620                nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 621        }
 622
 623        /* PBDMA[n] */
 624        for (i = 0; i < fifo->pbdma_nr; i++) {
 625                nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 626                nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 627                nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 628        }
 629
 630        nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
 631        nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 632
 633        nvkm_wr32(device, 0x002100, 0xffffffff);
 634        nvkm_wr32(device, 0x002140, 0x7fffffff);
 635        nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
 636}
 637
 638static void *
 639gf100_fifo_dtor(struct nvkm_fifo *base)
 640{
 641        struct gf100_fifo *fifo = gf100_fifo(base);
 642        nvkm_vm_put(&fifo->user.bar);
 643        nvkm_memory_del(&fifo->user.mem);
 644        nvkm_memory_del(&fifo->runlist.mem[0]);
 645        nvkm_memory_del(&fifo->runlist.mem[1]);
 646        return fifo;
 647}
 648
 649static const struct nvkm_fifo_func
 650gf100_fifo = {
 651        .dtor = gf100_fifo_dtor,
 652        .oneinit = gf100_fifo_oneinit,
 653        .init = gf100_fifo_init,
 654        .fini = gf100_fifo_fini,
 655        .intr = gf100_fifo_intr,
 656        .uevent_init = gf100_fifo_uevent_init,
 657        .uevent_fini = gf100_fifo_uevent_fini,
 658        .chan = {
 659                &gf100_fifo_gpfifo_oclass,
 660                NULL
 661        },
 662};
 663
 664int
 665gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 666{
 667        struct gf100_fifo *fifo;
 668
 669        if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 670                return -ENOMEM;
 671        INIT_LIST_HEAD(&fifo->chan);
 672        INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
 673        *pfifo = &fifo->base;
 674
 675        return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
 676}
 677