linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "gf100.h"
  25#include "changf100.h"
  26
  27#include <core/client.h>
  28#include <core/enum.h>
  29#include <core/gpuobj.h>
  30#include <subdev/bar.h>
  31#include <subdev/fault.h>
  32#include <engine/sw.h>
  33
  34#include <nvif/class.h>
  35
  36static void
  37gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  38{
  39        struct nvkm_device *device = fifo->engine.subdev.device;
  40        nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  41}
  42
  43static void
  44gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  45{
  46        struct nvkm_device *device = fifo->engine.subdev.device;
  47        nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  48}
  49
  50void
  51gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
  52{
  53        struct gf100_fifo_chan *chan;
  54        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  55        struct nvkm_device *device = subdev->device;
  56        struct nvkm_memory *cur;
  57        int nr = 0;
  58        int target;
  59
  60        mutex_lock(&fifo->base.mutex);
  61        cur = fifo->runlist.mem[fifo->runlist.active];
  62        fifo->runlist.active = !fifo->runlist.active;
  63
  64        nvkm_kmap(cur);
  65        list_for_each_entry(chan, &fifo->chan, head) {
  66                nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  67                nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  68                nr++;
  69        }
  70        nvkm_done(cur);
  71
  72        switch (nvkm_memory_target(cur)) {
  73        case NVKM_MEM_TARGET_VRAM: target = 0; break;
  74        case NVKM_MEM_TARGET_NCOH: target = 3; break;
  75        default:
  76                mutex_unlock(&fifo->base.mutex);
  77                WARN_ON(1);
  78                return;
  79        }
  80
  81        nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
  82                                    (target << 28));
  83        nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  84
  85        if (wait_event_timeout(fifo->runlist.wait,
  86                               !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  87                               msecs_to_jiffies(2000)) == 0)
  88                nvkm_error(subdev, "runlist update timeout\n");
  89        mutex_unlock(&fifo->base.mutex);
  90}
  91
  92void
  93gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  94{
  95        mutex_lock(&fifo->base.mutex);
  96        list_del_init(&chan->head);
  97        mutex_unlock(&fifo->base.mutex);
  98}
  99
 100void
 101gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
 102{
 103        mutex_lock(&fifo->base.mutex);
 104        list_add_tail(&chan->head, &fifo->chan);
 105        mutex_unlock(&fifo->base.mutex);
 106}
 107
 108static struct nvkm_engine *
 109gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
 110{
 111        enum nvkm_subdev_type type;
 112        int inst;
 113
 114        switch (engi) {
 115        case GF100_FIFO_ENGN_GR    : type = NVKM_ENGINE_GR    ; inst = 0; break;
 116        case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break;
 117        case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break;
 118        case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break;
 119        case GF100_FIFO_ENGN_CE0   : type = NVKM_ENGINE_CE    ; inst = 0; break;
 120        case GF100_FIFO_ENGN_CE1   : type = NVKM_ENGINE_CE    ; inst = 1; break;
 121        case GF100_FIFO_ENGN_SW    : type = NVKM_ENGINE_SW    ; inst = 0; break;
 122        default:
 123                WARN_ON(1);
 124                return NULL;
 125        }
 126
 127        return nvkm_device_engine(fifo->engine.subdev.device, type, inst);
 128}
 129
 130static int
 131gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
 132{
 133        switch (engine->subdev.type) {
 134        case NVKM_ENGINE_GR    : return GF100_FIFO_ENGN_GR;
 135        case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
 136        case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
 137        case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
 138        case NVKM_ENGINE_CE    : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
 139        case NVKM_ENGINE_SW    : return GF100_FIFO_ENGN_SW;
 140        default:
 141                WARN_ON(1);
 142                return -1;
 143        }
 144}
 145
 146static void
 147gf100_fifo_recover_work(struct work_struct *w)
 148{
 149        struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 150        struct nvkm_device *device = fifo->base.engine.subdev.device;
 151        struct nvkm_engine *engine;
 152        unsigned long flags;
 153        u32 engm, engn, todo;
 154
 155        spin_lock_irqsave(&fifo->base.lock, flags);
 156        engm = fifo->recover.mask;
 157        fifo->recover.mask = 0ULL;
 158        spin_unlock_irqrestore(&fifo->base.lock, flags);
 159
 160        nvkm_mask(device, 0x002630, engm, engm);
 161
 162        for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) {
 163                if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) {
 164                        nvkm_subdev_fini(&engine->subdev, false);
 165                        WARN_ON(nvkm_subdev_init(&engine->subdev));
 166                }
 167        }
 168
 169        gf100_fifo_runlist_commit(fifo);
 170        nvkm_wr32(device, 0x00262c, engm);
 171        nvkm_mask(device, 0x002630, engm, 0x00000000);
 172}
 173
 174static void
 175gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 176                   struct gf100_fifo_chan *chan)
 177{
 178        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 179        struct nvkm_device *device = subdev->device;
 180        u32 chid = chan->base.chid;
 181        int engi = gf100_fifo_engine_id(&fifo->base, engine);
 182
 183        nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
 184                   engine->subdev.name, chid);
 185        assert_spin_locked(&fifo->base.lock);
 186
 187        nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
 188        list_del_init(&chan->head);
 189        chan->killed = true;
 190
 191        if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
 192                fifo->recover.mask |= BIT(engi);
 193        schedule_work(&fifo->recover.work);
 194        nvkm_fifo_kevent(&fifo->base, chid);
 195}
 196
 197static const struct nvkm_enum
 198gf100_fifo_fault_engine[] = {
 199        { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
 200        { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
 201        { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
 202        { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
 203        { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
 204        { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
 205        { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 206        { 0x13, "PCOUNTER" },
 207        { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
 208        { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
 209        { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
 210        { 0x17, "PMU" },
 211        {}
 212};
 213
 214static const struct nvkm_enum
 215gf100_fifo_fault_reason[] = {
 216        { 0x00, "PT_NOT_PRESENT" },
 217        { 0x01, "PT_TOO_SHORT" },
 218        { 0x02, "PAGE_NOT_PRESENT" },
 219        { 0x03, "VM_LIMIT_EXCEEDED" },
 220        { 0x04, "NO_CHANNEL" },
 221        { 0x05, "PAGE_SYSTEM_ONLY" },
 222        { 0x06, "PAGE_READ_ONLY" },
 223        { 0x0a, "COMPRESSED_SYSRAM" },
 224        { 0x0c, "INVALID_STORAGE_TYPE" },
 225        {}
 226};
 227
 228static const struct nvkm_enum
 229gf100_fifo_fault_hubclient[] = {
 230        { 0x01, "PCOPY0" },
 231        { 0x02, "PCOPY1" },
 232        { 0x04, "DISPATCH" },
 233        { 0x05, "CTXCTL" },
 234        { 0x06, "PFIFO" },
 235        { 0x07, "BAR_READ" },
 236        { 0x08, "BAR_WRITE" },
 237        { 0x0b, "PVP" },
 238        { 0x0c, "PMSPPP" },
 239        { 0x0d, "PMSVLD" },
 240        { 0x11, "PCOUNTER" },
 241        { 0x12, "PMU" },
 242        { 0x14, "CCACHE" },
 243        { 0x15, "CCACHE_POST" },
 244        {}
 245};
 246
 247static const struct nvkm_enum
 248gf100_fifo_fault_gpcclient[] = {
 249        { 0x01, "TEX" },
 250        { 0x0c, "ESETUP" },
 251        { 0x0e, "CTXCTL" },
 252        { 0x0f, "PROP" },
 253        {}
 254};
 255
 256static void
 257gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 258{
 259        struct gf100_fifo *fifo = gf100_fifo(base);
 260        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 261        struct nvkm_device *device = subdev->device;
 262        const struct nvkm_enum *er, *eu, *ec;
 263        struct nvkm_engine *engine = NULL;
 264        struct nvkm_fifo_chan *chan;
 265        unsigned long flags;
 266        char gpcid[8] = "";
 267
 268        er = nvkm_enum_find(gf100_fifo_fault_reason, info->reason);
 269        eu = nvkm_enum_find(gf100_fifo_fault_engine, info->engine);
 270        if (info->hub) {
 271                ec = nvkm_enum_find(gf100_fifo_fault_hubclient, info->client);
 272        } else {
 273                ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, info->client);
 274                snprintf(gpcid, sizeof(gpcid), "GPC%d/", info->gpc);
 275        }
 276
 277        if (eu && eu->data2) {
 278                switch (eu->data2) {
 279                case NVKM_SUBDEV_BAR:
 280                        nvkm_bar_bar1_reset(device);
 281                        break;
 282                case NVKM_SUBDEV_INSTMEM:
 283                        nvkm_bar_bar2_reset(device);
 284                        break;
 285                case NVKM_ENGINE_IFB:
 286                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 287                        break;
 288                default:
 289                        engine = nvkm_device_engine(device, eu->data2, eu->inst);
 290                        break;
 291                }
 292        }
 293
 294        chan = nvkm_fifo_chan_inst(&fifo->base, info->inst, &flags);
 295
 296        nvkm_error(subdev,
 297                   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
 298                   "reason %02x [%s] on channel %d [%010llx %s]\n",
 299                   info->access ? "write" : "read", info->addr,
 300                   info->engine, eu ? eu->name : "",
 301                   info->client, gpcid, ec ? ec->name : "",
 302                   info->reason, er ? er->name : "", chan ? chan->chid : -1,
 303                   info->inst, chan ? chan->object.client->name : "unknown");
 304
 305        if (engine && chan)
 306                gf100_fifo_recover(fifo, engine, (void *)chan);
 307        nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 308}
 309
 310static const struct nvkm_enum
 311gf100_fifo_sched_reason[] = {
 312        { 0x0a, "CTXSW_TIMEOUT" },
 313        {}
 314};
 315
 316static void
 317gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 318{
 319        struct nvkm_device *device = fifo->base.engine.subdev.device;
 320        struct nvkm_engine *engine;
 321        struct gf100_fifo_chan *chan;
 322        unsigned long flags;
 323        u32 engn;
 324
 325        spin_lock_irqsave(&fifo->base.lock, flags);
 326        for (engn = 0; engn < 6; engn++) {
 327                u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 328                u32 busy = (stat & 0x80000000);
 329                u32 save = (stat & 0x00100000); /* maybe? */
 330                u32 unk0 = (stat & 0x00040000);
 331                u32 unk1 = (stat & 0x00001000);
 332                u32 chid = (stat & 0x0000007f);
 333                (void)save;
 334
 335                if (busy && unk0 && unk1) {
 336                        list_for_each_entry(chan, &fifo->chan, head) {
 337                                if (chan->base.chid == chid) {
 338                                        engine = gf100_fifo_id_engine(&fifo->base, engn);
 339                                        if (!engine)
 340                                                break;
 341                                        gf100_fifo_recover(fifo, engine, chan);
 342                                        break;
 343                                }
 344                        }
 345                }
 346        }
 347        spin_unlock_irqrestore(&fifo->base.lock, flags);
 348}
 349
 350static void
 351gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 352{
 353        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 354        struct nvkm_device *device = subdev->device;
 355        u32 intr = nvkm_rd32(device, 0x00254c);
 356        u32 code = intr & 0x000000ff;
 357        const struct nvkm_enum *en;
 358
 359        en = nvkm_enum_find(gf100_fifo_sched_reason, code);
 360
 361        nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 362
 363        switch (code) {
 364        case 0x0a:
 365                gf100_fifo_intr_sched_ctxsw(fifo);
 366                break;
 367        default:
 368                break;
 369        }
 370}
 371
 372void
 373gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
 374{
 375        struct nvkm_device *device = fifo->engine.subdev.device;
 376        u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
 377        u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
 378        u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
 379        u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 380        struct nvkm_fault_data info;
 381
 382        info.inst   =  (u64)inst << 12;
 383        info.addr   = ((u64)vahi << 32) | valo;
 384        info.time   = 0;
 385        info.engine = unit;
 386        info.valid  = 1;
 387        info.gpc    = (type & 0x1f000000) >> 24;
 388        info.client = (type & 0x00001f00) >> 8;
 389        info.access = (type & 0x00000080) >> 7;
 390        info.hub    = (type & 0x00000040) >> 6;
 391        info.reason = (type & 0x0000000f);
 392
 393        nvkm_fifo_fault(fifo, &info);
 394}
 395
 396static const struct nvkm_bitfield
 397gf100_fifo_pbdma_intr[] = {
 398/*      { 0x00008000, "" }      seen with null ib push */
 399        { 0x00200000, "ILLEGAL_MTHD" },
 400        { 0x00800000, "EMPTY_SUBC" },
 401        {}
 402};
 403
 404static void
 405gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 406{
 407        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 408        struct nvkm_device *device = subdev->device;
 409        u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
 410        u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
 411        u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
 412        u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 413        u32 subc = (addr & 0x00070000) >> 16;
 414        u32 mthd = (addr & 0x00003ffc);
 415        struct nvkm_fifo_chan *chan;
 416        unsigned long flags;
 417        u32 show= stat;
 418        char msg[128];
 419
 420        if (stat & 0x00800000) {
 421                if (device->sw) {
 422                        if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
 423                                show &= ~0x00800000;
 424                }
 425        }
 426
 427        if (show) {
 428                nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
 429                chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
 430                nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
 431                                   "subc %d mthd %04x data %08x\n",
 432                           unit, show, msg, chid, chan ? chan->inst->addr : 0,
 433                           chan ? chan->object.client->name : "unknown",
 434                           subc, mthd, data);
 435                nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 436        }
 437
 438        nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 439        nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 440}
 441
 442static void
 443gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 444{
 445        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 446        struct nvkm_device *device = subdev->device;
 447        u32 intr = nvkm_rd32(device, 0x002a00);
 448
 449        if (intr & 0x10000000) {
 450                wake_up(&fifo->runlist.wait);
 451                nvkm_wr32(device, 0x002a00, 0x10000000);
 452                intr &= ~0x10000000;
 453        }
 454
 455        if (intr) {
 456                nvkm_error(subdev, "RUNLIST %08x\n", intr);
 457                nvkm_wr32(device, 0x002a00, intr);
 458        }
 459}
 460
 461static void
 462gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 463{
 464        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 465        struct nvkm_device *device = subdev->device;
 466        u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
 467        u32 inte = nvkm_rd32(device, 0x002628);
 468        u32 unkn;
 469
 470        nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 471
 472        for (unkn = 0; unkn < 8; unkn++) {
 473                u32 ints = (intr >> (unkn * 0x04)) & inte;
 474                if (ints & 0x1) {
 475                        nvkm_fifo_uevent(&fifo->base);
 476                        ints &= ~1;
 477                }
 478                if (ints) {
 479                        nvkm_error(subdev, "ENGINE %d %d %01x",
 480                                   engn, unkn, ints);
 481                        nvkm_mask(device, 0x002628, ints, 0);
 482                }
 483        }
 484}
 485
 486void
 487gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 488{
 489        struct nvkm_device *device = fifo->base.engine.subdev.device;
 490        u32 mask = nvkm_rd32(device, 0x0025a4);
 491        while (mask) {
 492                u32 unit = __ffs(mask);
 493                gf100_fifo_intr_engine_unit(fifo, unit);
 494                mask &= ~(1 << unit);
 495        }
 496}
 497
 498static void
 499gf100_fifo_intr(struct nvkm_fifo *base)
 500{
 501        struct gf100_fifo *fifo = gf100_fifo(base);
 502        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 503        struct nvkm_device *device = subdev->device;
 504        u32 mask = nvkm_rd32(device, 0x002140);
 505        u32 stat = nvkm_rd32(device, 0x002100) & mask;
 506
 507        if (stat & 0x00000001) {
 508                u32 intr = nvkm_rd32(device, 0x00252c);
 509                nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
 510                nvkm_wr32(device, 0x002100, 0x00000001);
 511                stat &= ~0x00000001;
 512        }
 513
 514        if (stat & 0x00000100) {
 515                gf100_fifo_intr_sched(fifo);
 516                nvkm_wr32(device, 0x002100, 0x00000100);
 517                stat &= ~0x00000100;
 518        }
 519
 520        if (stat & 0x00010000) {
 521                u32 intr = nvkm_rd32(device, 0x00256c);
 522                nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
 523                nvkm_wr32(device, 0x002100, 0x00010000);
 524                stat &= ~0x00010000;
 525        }
 526
 527        if (stat & 0x01000000) {
 528                u32 intr = nvkm_rd32(device, 0x00258c);
 529                nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
 530                nvkm_wr32(device, 0x002100, 0x01000000);
 531                stat &= ~0x01000000;
 532        }
 533
 534        if (stat & 0x10000000) {
 535                u32 mask = nvkm_rd32(device, 0x00259c);
 536                while (mask) {
 537                        u32 unit = __ffs(mask);
 538                        gf100_fifo_intr_fault(&fifo->base, unit);
 539                        nvkm_wr32(device, 0x00259c, (1 << unit));
 540                        mask &= ~(1 << unit);
 541                }
 542                stat &= ~0x10000000;
 543        }
 544
 545        if (stat & 0x20000000) {
 546                u32 mask = nvkm_rd32(device, 0x0025a0);
 547                while (mask) {
 548                        u32 unit = __ffs(mask);
 549                        gf100_fifo_intr_pbdma(fifo, unit);
 550                        nvkm_wr32(device, 0x0025a0, (1 << unit));
 551                        mask &= ~(1 << unit);
 552                }
 553                stat &= ~0x20000000;
 554        }
 555
 556        if (stat & 0x40000000) {
 557                gf100_fifo_intr_runlist(fifo);
 558                stat &= ~0x40000000;
 559        }
 560
 561        if (stat & 0x80000000) {
 562                gf100_fifo_intr_engine(fifo);
 563                stat &= ~0x80000000;
 564        }
 565
 566        if (stat) {
 567                nvkm_error(subdev, "INTR %08x\n", stat);
 568                nvkm_mask(device, 0x002140, stat, 0x00000000);
 569                nvkm_wr32(device, 0x002100, stat);
 570        }
 571}
 572
 573static int
 574gf100_fifo_oneinit(struct nvkm_fifo *base)
 575{
 576        struct gf100_fifo *fifo = gf100_fifo(base);
 577        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 578        struct nvkm_device *device = subdev->device;
 579        struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
 580        int ret;
 581
 582        /* Determine number of PBDMAs by checking valid enable bits. */
 583        nvkm_wr32(device, 0x002204, 0xffffffff);
 584        fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
 585        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 586
 587
 588        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 589                              false, &fifo->runlist.mem[0]);
 590        if (ret)
 591                return ret;
 592
 593        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 594                              false, &fifo->runlist.mem[1]);
 595        if (ret)
 596                return ret;
 597
 598        init_waitqueue_head(&fifo->runlist.wait);
 599
 600        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
 601                              0x1000, false, &fifo->user.mem);
 602        if (ret)
 603                return ret;
 604
 605        ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
 606                           &fifo->user.bar);
 607        if (ret)
 608                return ret;
 609
 610        return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
 611}
 612
 613static void
 614gf100_fifo_fini(struct nvkm_fifo *base)
 615{
 616        struct gf100_fifo *fifo = gf100_fifo(base);
 617        flush_work(&fifo->recover.work);
 618}
 619
 620static void
 621gf100_fifo_init(struct nvkm_fifo *base)
 622{
 623        struct gf100_fifo *fifo = gf100_fifo(base);
 624        struct nvkm_device *device = fifo->base.engine.subdev.device;
 625        int i;
 626
 627        /* Enable PBDMAs. */
 628        nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
 629        nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
 630
 631        /* Assign engines to PBDMAs. */
 632        if (fifo->pbdma_nr >= 3) {
 633                nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
 634                nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
 635                nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
 636                nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
 637                nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
 638                nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 639        }
 640
 641        /* PBDMA[n] */
 642        for (i = 0; i < fifo->pbdma_nr; i++) {
 643                nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 644                nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 645                nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 646        }
 647
 648        nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
 649        nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 650
 651        nvkm_wr32(device, 0x002100, 0xffffffff);
 652        nvkm_wr32(device, 0x002140, 0x7fffffff);
 653        nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
 654}
 655
 656static void *
 657gf100_fifo_dtor(struct nvkm_fifo *base)
 658{
 659        struct gf100_fifo *fifo = gf100_fifo(base);
 660        struct nvkm_device *device = fifo->base.engine.subdev.device;
 661        nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
 662        nvkm_memory_unref(&fifo->user.mem);
 663        nvkm_memory_unref(&fifo->runlist.mem[0]);
 664        nvkm_memory_unref(&fifo->runlist.mem[1]);
 665        return fifo;
 666}
 667
 668static const struct nvkm_fifo_func
 669gf100_fifo = {
 670        .dtor = gf100_fifo_dtor,
 671        .oneinit = gf100_fifo_oneinit,
 672        .init = gf100_fifo_init,
 673        .fini = gf100_fifo_fini,
 674        .intr = gf100_fifo_intr,
 675        .fault = gf100_fifo_fault,
 676        .engine_id = gf100_fifo_engine_id,
 677        .id_engine = gf100_fifo_id_engine,
 678        .uevent_init = gf100_fifo_uevent_init,
 679        .uevent_fini = gf100_fifo_uevent_fini,
 680        .chan = {
 681                &gf100_fifo_gpfifo_oclass,
 682                NULL
 683        },
 684};
 685
 686int
 687gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
 688               struct nvkm_fifo **pfifo)
 689{
 690        struct gf100_fifo *fifo;
 691
 692        if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 693                return -ENOMEM;
 694        INIT_LIST_HEAD(&fifo->chan);
 695        INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
 696        *pfifo = &fifo->base;
 697
 698        return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base);
 699}
 700