linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "gk104.h"
  25#include "changk104.h"
  26
  27#include <core/client.h>
  28#include <core/gpuobj.h>
  29#include <subdev/bar.h>
  30#include <subdev/top.h>
  31#include <engine/sw.h>
  32
  33#include <nvif/class.h>
  34
  35static int
  36gk104_fifo_class_get(struct nvkm_fifo *base, int index,
  37                     const struct nvkm_fifo_chan_oclass **psclass)
  38{
  39        struct gk104_fifo *fifo = gk104_fifo(base);
  40        int c = 0;
  41
  42        while ((*psclass = fifo->func->chan[c])) {
  43                if (c++ == index)
  44                        return 0;
  45        }
  46
  47        return c;
  48}
  49
  50static void
  51gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
  52{
  53        struct nvkm_device *device = fifo->engine.subdev.device;
  54        nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  55}
  56
  57static void
  58gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
  59{
  60        struct nvkm_device *device = fifo->engine.subdev.device;
  61        nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  62}
  63
  64void
  65gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
  66{
  67        struct gk104_fifo_chan *chan;
  68        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  69        struct nvkm_device *device = subdev->device;
  70        struct nvkm_memory *mem;
  71        int nr = 0;
  72        int target;
  73
  74        mutex_lock(&subdev->mutex);
  75        mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
  76        fifo->runlist[runl].next = !fifo->runlist[runl].next;
  77
  78        nvkm_kmap(mem);
  79        list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
  80                nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
  81                nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
  82                nr++;
  83        }
  84        nvkm_done(mem);
  85
  86        if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
  87                target = 0;
  88        else
  89                target = 3;
  90
  91        nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
  92                                    (target << 28));
  93        nvkm_wr32(device, 0x002274, (runl << 20) | nr);
  94
  95        if (wait_event_timeout(fifo->runlist[runl].wait,
  96                               !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
  97                                       & 0x00100000),
  98                               msecs_to_jiffies(2000)) == 0)
  99                nvkm_error(subdev, "runlist %d update timeout\n", runl);
 100        mutex_unlock(&subdev->mutex);
 101}
 102
 103void
 104gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 105{
 106        mutex_lock(&fifo->base.engine.subdev.mutex);
 107        list_del_init(&chan->head);
 108        mutex_unlock(&fifo->base.engine.subdev.mutex);
 109}
 110
 111void
 112gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 113{
 114        mutex_lock(&fifo->base.engine.subdev.mutex);
 115        list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
 116        mutex_unlock(&fifo->base.engine.subdev.mutex);
 117}
 118
 119static void
 120gk104_fifo_recover_work(struct work_struct *w)
 121{
 122        struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 123        struct nvkm_device *device = fifo->base.engine.subdev.device;
 124        struct nvkm_engine *engine;
 125        unsigned long flags;
 126        u32 engm, runm, todo;
 127        int engn, runl;
 128
 129        spin_lock_irqsave(&fifo->base.lock, flags);
 130        runm = fifo->recover.runm;
 131        engm = fifo->recover.engm;
 132        fifo->recover.engm = 0;
 133        fifo->recover.runm = 0;
 134        spin_unlock_irqrestore(&fifo->base.lock, flags);
 135
 136        nvkm_mask(device, 0x002630, runm, runm);
 137
 138        for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
 139                if ((engine = fifo->engine[engn].engine)) {
 140                        nvkm_subdev_fini(&engine->subdev, false);
 141                        WARN_ON(nvkm_subdev_init(&engine->subdev));
 142                }
 143        }
 144
 145        for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
 146                gk104_fifo_runlist_commit(fifo, runl);
 147
 148        nvkm_wr32(device, 0x00262c, runm);
 149        nvkm_mask(device, 0x002630, runm, 0x00000000);
 150}
 151
 152static void
 153gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
 154                   struct gk104_fifo_chan *chan)
 155{
 156        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 157        struct nvkm_device *device = subdev->device;
 158        u32 chid = chan->base.chid;
 159        int engn;
 160
 161        nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
 162                   nvkm_subdev_name[engine->subdev.index], chid);
 163        assert_spin_locked(&fifo->base.lock);
 164
 165        nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
 166        list_del_init(&chan->head);
 167        chan->killed = true;
 168
 169        for (engn = 0; engn < fifo->engine_nr; engn++) {
 170                if (fifo->engine[engn].engine == engine) {
 171                        fifo->recover.engm |= BIT(engn);
 172                        break;
 173                }
 174        }
 175
 176        fifo->recover.runm |= BIT(chan->runl);
 177        schedule_work(&fifo->recover.work);
 178}
 179
 180static const struct nvkm_enum
 181gk104_fifo_bind_reason[] = {
 182        { 0x01, "BIND_NOT_UNBOUND" },
 183        { 0x02, "SNOOP_WITHOUT_BAR1" },
 184        { 0x03, "UNBIND_WHILE_RUNNING" },
 185        { 0x05, "INVALID_RUNLIST" },
 186        { 0x06, "INVALID_CTX_TGT" },
 187        { 0x0b, "UNBIND_WHILE_PARKED" },
 188        {}
 189};
 190
 191static void
 192gk104_fifo_intr_bind(struct gk104_fifo *fifo)
 193{
 194        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 195        struct nvkm_device *device = subdev->device;
 196        u32 intr = nvkm_rd32(device, 0x00252c);
 197        u32 code = intr & 0x000000ff;
 198        const struct nvkm_enum *en =
 199                nvkm_enum_find(gk104_fifo_bind_reason, code);
 200
 201        nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
 202}
 203
 204static const struct nvkm_enum
 205gk104_fifo_sched_reason[] = {
 206        { 0x0a, "CTXSW_TIMEOUT" },
 207        {}
 208};
 209
 210static void
 211gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
 212{
 213        struct nvkm_device *device = fifo->base.engine.subdev.device;
 214        struct gk104_fifo_chan *chan;
 215        unsigned long flags;
 216        u32 engn;
 217
 218        spin_lock_irqsave(&fifo->base.lock, flags);
 219        for (engn = 0; engn < fifo->engine_nr; engn++) {
 220                struct nvkm_engine *engine = fifo->engine[engn].engine;
 221                int runl = fifo->engine[engn].runl;
 222                u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
 223                u32 busy = (stat & 0x80000000);
 224                u32 next = (stat & 0x0fff0000) >> 16;
 225                u32 chsw = (stat & 0x00008000);
 226                u32 save = (stat & 0x00004000);
 227                u32 load = (stat & 0x00002000);
 228                u32 prev = (stat & 0x00000fff);
 229                u32 chid = load ? next : prev;
 230                (void)save;
 231
 232                if (!busy || !chsw)
 233                        continue;
 234
 235                list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
 236                        if (chan->base.chid == chid && engine) {
 237                                gk104_fifo_recover(fifo, engine, chan);
 238                                break;
 239                        }
 240                }
 241        }
 242        spin_unlock_irqrestore(&fifo->base.lock, flags);
 243}
 244
 245static void
 246gk104_fifo_intr_sched(struct gk104_fifo *fifo)
 247{
 248        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 249        struct nvkm_device *device = subdev->device;
 250        u32 intr = nvkm_rd32(device, 0x00254c);
 251        u32 code = intr & 0x000000ff;
 252        const struct nvkm_enum *en =
 253                nvkm_enum_find(gk104_fifo_sched_reason, code);
 254
 255        nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 256
 257        switch (code) {
 258        case 0x0a:
 259                gk104_fifo_intr_sched_ctxsw(fifo);
 260                break;
 261        default:
 262                break;
 263        }
 264}
 265
 266static void
 267gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 268{
 269        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 270        struct nvkm_device *device = subdev->device;
 271        u32 stat = nvkm_rd32(device, 0x00256c);
 272        nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
 273        nvkm_wr32(device, 0x00256c, stat);
 274}
 275
 276static void
 277gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
 278{
 279        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 280        struct nvkm_device *device = subdev->device;
 281        u32 stat = nvkm_rd32(device, 0x00259c);
 282        nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
 283}
 284
 285static void
 286gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 287{
 288        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 289        struct nvkm_device *device = subdev->device;
 290        u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
 291        u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
 292        u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
 293        u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 294        u32 gpc    = (stat & 0x1f000000) >> 24;
 295        u32 client = (stat & 0x00001f00) >> 8;
 296        u32 write  = (stat & 0x00000080);
 297        u32 hub    = (stat & 0x00000040);
 298        u32 reason = (stat & 0x0000000f);
 299        const struct nvkm_enum *er, *eu, *ec;
 300        struct nvkm_engine *engine = NULL;
 301        struct nvkm_fifo_chan *chan;
 302        unsigned long flags;
 303        char gpcid[8] = "", en[16] = "";
 304
 305        er = nvkm_enum_find(fifo->func->fault.reason, reason);
 306        eu = nvkm_enum_find(fifo->func->fault.engine, unit);
 307        if (hub) {
 308                ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
 309        } else {
 310                ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
 311                snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 312        }
 313
 314        if (eu && eu->data2) {
 315                switch (eu->data2) {
 316                case NVKM_SUBDEV_BAR:
 317                        nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
 318                        break;
 319                case NVKM_SUBDEV_INSTMEM:
 320                        nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
 321                        break;
 322                case NVKM_ENGINE_IFB:
 323                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 324                        break;
 325                default:
 326                        engine = nvkm_device_engine(device, eu->data2);
 327                        break;
 328                }
 329        }
 330
 331        if (eu == NULL) {
 332                enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
 333                if (engidx < NVKM_SUBDEV_NR) {
 334                        const char *src = nvkm_subdev_name[engidx];
 335                        char *dst = en;
 336                        do {
 337                                *dst++ = toupper(*src++);
 338                        } while(*src);
 339                        engine = nvkm_device_engine(device, engidx);
 340                }
 341        } else {
 342                snprintf(en, sizeof(en), "%s", eu->name);
 343        }
 344
 345        chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 346
 347        nvkm_error(subdev,
 348                   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
 349                   "reason %02x [%s] on channel %d [%010llx %s]\n",
 350                   write ? "write" : "read", (u64)vahi << 32 | valo,
 351                   unit, en, client, gpcid, ec ? ec->name : "",
 352                   reason, er ? er->name : "", chan ? chan->chid : -1,
 353                   (u64)inst << 12,
 354                   chan ? chan->object.client->name : "unknown");
 355
 356        if (engine && chan)
 357                gk104_fifo_recover(fifo, engine, (void *)chan);
 358        nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 359}
 360
 361static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
 362        { 0x00000001, "MEMREQ" },
 363        { 0x00000002, "MEMACK_TIMEOUT" },
 364        { 0x00000004, "MEMACK_EXTRA" },
 365        { 0x00000008, "MEMDAT_TIMEOUT" },
 366        { 0x00000010, "MEMDAT_EXTRA" },
 367        { 0x00000020, "MEMFLUSH" },
 368        { 0x00000040, "MEMOP" },
 369        { 0x00000080, "LBCONNECT" },
 370        { 0x00000100, "LBREQ" },
 371        { 0x00000200, "LBACK_TIMEOUT" },
 372        { 0x00000400, "LBACK_EXTRA" },
 373        { 0x00000800, "LBDAT_TIMEOUT" },
 374        { 0x00001000, "LBDAT_EXTRA" },
 375        { 0x00002000, "GPFIFO" },
 376        { 0x00004000, "GPPTR" },
 377        { 0x00008000, "GPENTRY" },
 378        { 0x00010000, "GPCRC" },
 379        { 0x00020000, "PBPTR" },
 380        { 0x00040000, "PBENTRY" },
 381        { 0x00080000, "PBCRC" },
 382        { 0x00100000, "XBARCONNECT" },
 383        { 0x00200000, "METHOD" },
 384        { 0x00400000, "METHODCRC" },
 385        { 0x00800000, "DEVICE" },
 386        { 0x02000000, "SEMAPHORE" },
 387        { 0x04000000, "ACQUIRE" },
 388        { 0x08000000, "PRI" },
 389        { 0x20000000, "NO_CTXSW_SEG" },
 390        { 0x40000000, "PBSEG" },
 391        { 0x80000000, "SIGNATURE" },
 392        {}
 393};
 394
 395static void
 396gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
 397{
 398        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 399        struct nvkm_device *device = subdev->device;
 400        u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
 401        u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
 402        u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
 403        u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
 404        u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
 405        u32 subc = (addr & 0x00070000) >> 16;
 406        u32 mthd = (addr & 0x00003ffc);
 407        u32 show = stat;
 408        struct nvkm_fifo_chan *chan;
 409        unsigned long flags;
 410        char msg[128];
 411
 412        if (stat & 0x00800000) {
 413                if (device->sw) {
 414                        if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
 415                                show &= ~0x00800000;
 416                }
 417        }
 418
 419        nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 420
 421        if (show) {
 422                nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
 423                chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
 424                nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
 425                                   "subc %d mthd %04x data %08x\n",
 426                           unit, show, msg, chid, chan ? chan->inst->addr : 0,
 427                           chan ? chan->object.client->name : "unknown",
 428                           subc, mthd, data);
 429                nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 430        }
 431
 432        nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 433}
 434
 435static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
 436        { 0x00000001, "HCE_RE_ILLEGAL_OP" },
 437        { 0x00000002, "HCE_RE_ALIGNB" },
 438        { 0x00000004, "HCE_PRIV" },
 439        { 0x00000008, "HCE_ILLEGAL_MTHD" },
 440        { 0x00000010, "HCE_ILLEGAL_CLASS" },
 441        {}
 442};
 443
 444static void
 445gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 446{
 447        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 448        struct nvkm_device *device = subdev->device;
 449        u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
 450        u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
 451        u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
 452        char msg[128];
 453
 454        if (stat) {
 455                nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
 456                nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
 457                           unit, stat, msg, chid,
 458                           nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
 459                           nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
 460        }
 461
 462        nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
 463}
 464
 465static void
 466gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 467{
 468        struct nvkm_device *device = fifo->base.engine.subdev.device;
 469        u32 mask = nvkm_rd32(device, 0x002a00);
 470        while (mask) {
 471                int runl = __ffs(mask);
 472                wake_up(&fifo->runlist[runl].wait);
 473                nvkm_wr32(device, 0x002a00, 1 << runl);
 474                mask &= ~(1 << runl);
 475        }
 476}
 477
 478static void
 479gk104_fifo_intr_engine(struct gk104_fifo *fifo)
 480{
 481        nvkm_fifo_uevent(&fifo->base);
 482}
 483
 484static void
 485gk104_fifo_intr(struct nvkm_fifo *base)
 486{
 487        struct gk104_fifo *fifo = gk104_fifo(base);
 488        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 489        struct nvkm_device *device = subdev->device;
 490        u32 mask = nvkm_rd32(device, 0x002140);
 491        u32 stat = nvkm_rd32(device, 0x002100) & mask;
 492
 493        if (stat & 0x00000001) {
 494                gk104_fifo_intr_bind(fifo);
 495                nvkm_wr32(device, 0x002100, 0x00000001);
 496                stat &= ~0x00000001;
 497        }
 498
 499        if (stat & 0x00000010) {
 500                nvkm_error(subdev, "PIO_ERROR\n");
 501                nvkm_wr32(device, 0x002100, 0x00000010);
 502                stat &= ~0x00000010;
 503        }
 504
 505        if (stat & 0x00000100) {
 506                gk104_fifo_intr_sched(fifo);
 507                nvkm_wr32(device, 0x002100, 0x00000100);
 508                stat &= ~0x00000100;
 509        }
 510
 511        if (stat & 0x00010000) {
 512                gk104_fifo_intr_chsw(fifo);
 513                nvkm_wr32(device, 0x002100, 0x00010000);
 514                stat &= ~0x00010000;
 515        }
 516
 517        if (stat & 0x00800000) {
 518                nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
 519                nvkm_wr32(device, 0x002100, 0x00800000);
 520                stat &= ~0x00800000;
 521        }
 522
 523        if (stat & 0x01000000) {
 524                nvkm_error(subdev, "LB_ERROR\n");
 525                nvkm_wr32(device, 0x002100, 0x01000000);
 526                stat &= ~0x01000000;
 527        }
 528
 529        if (stat & 0x08000000) {
 530                gk104_fifo_intr_dropped_fault(fifo);
 531                nvkm_wr32(device, 0x002100, 0x08000000);
 532                stat &= ~0x08000000;
 533        }
 534
 535        if (stat & 0x10000000) {
 536                u32 mask = nvkm_rd32(device, 0x00259c);
 537                while (mask) {
 538                        u32 unit = __ffs(mask);
 539                        gk104_fifo_intr_fault(fifo, unit);
 540                        nvkm_wr32(device, 0x00259c, (1 << unit));
 541                        mask &= ~(1 << unit);
 542                }
 543                stat &= ~0x10000000;
 544        }
 545
 546        if (stat & 0x20000000) {
 547                u32 mask = nvkm_rd32(device, 0x0025a0);
 548                while (mask) {
 549                        u32 unit = __ffs(mask);
 550                        gk104_fifo_intr_pbdma_0(fifo, unit);
 551                        gk104_fifo_intr_pbdma_1(fifo, unit);
 552                        nvkm_wr32(device, 0x0025a0, (1 << unit));
 553                        mask &= ~(1 << unit);
 554                }
 555                stat &= ~0x20000000;
 556        }
 557
 558        if (stat & 0x40000000) {
 559                gk104_fifo_intr_runlist(fifo);
 560                stat &= ~0x40000000;
 561        }
 562
 563        if (stat & 0x80000000) {
 564                nvkm_wr32(device, 0x002100, 0x80000000);
 565                gk104_fifo_intr_engine(fifo);
 566                stat &= ~0x80000000;
 567        }
 568
 569        if (stat) {
 570                nvkm_error(subdev, "INTR %08x\n", stat);
 571                nvkm_mask(device, 0x002140, stat, 0x00000000);
 572                nvkm_wr32(device, 0x002100, stat);
 573        }
 574}
 575
 576static void
 577gk104_fifo_fini(struct nvkm_fifo *base)
 578{
 579        struct gk104_fifo *fifo = gk104_fifo(base);
 580        struct nvkm_device *device = fifo->base.engine.subdev.device;
 581        flush_work(&fifo->recover.work);
 582        /* allow mmu fault interrupts, even when we're not using fifo */
 583        nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 584}
 585
 586static int
 587gk104_fifo_oneinit(struct nvkm_fifo *base)
 588{
 589        struct gk104_fifo *fifo = gk104_fifo(base);
 590        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 591        struct nvkm_device *device = subdev->device;
 592        int engn, runl, pbid, ret, i, j;
 593        enum nvkm_devidx engidx;
 594        u32 *map;
 595
 596        /* Determine number of PBDMAs by checking valid enable bits. */
 597        nvkm_wr32(device, 0x000204, 0xffffffff);
 598        fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
 599        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 600
 601        /* Read PBDMA->runlist(s) mapping from HW. */
 602        if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
 603                return -ENOMEM;
 604
 605        for (i = 0; i < fifo->pbdma_nr; i++)
 606                map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
 607
 608        /* Determine runlist configuration from topology device info. */
 609        i = 0;
 610        while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
 611                /* Determine which PBDMA handles requests for this engine. */
 612                for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
 613                        if (map[j] & (1 << runl)) {
 614                                pbid = j;
 615                                break;
 616                        }
 617                }
 618
 619                nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
 620                           engn, runl, pbid, nvkm_subdev_name[engidx]);
 621
 622                fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
 623                fifo->engine[engn].runl = runl;
 624                fifo->engine[engn].pbid = pbid;
 625                fifo->engine_nr = max(fifo->engine_nr, engn + 1);
 626                fifo->runlist[runl].engm |= 1 << engn;
 627                fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
 628        }
 629
 630        kfree(map);
 631
 632        for (i = 0; i < fifo->runlist_nr; i++) {
 633                ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 634                                      0x8000, 0x1000, false,
 635                                      &fifo->runlist[i].mem[0]);
 636                if (ret)
 637                        return ret;
 638
 639                ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 640                                      0x8000, 0x1000, false,
 641                                      &fifo->runlist[i].mem[1]);
 642                if (ret)
 643                        return ret;
 644
 645                init_waitqueue_head(&fifo->runlist[i].wait);
 646                INIT_LIST_HEAD(&fifo->runlist[i].chan);
 647        }
 648
 649        ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 650                              fifo->base.nr * 0x200, 0x1000, true,
 651                              &fifo->user.mem);
 652        if (ret)
 653                return ret;
 654
 655        ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
 656                            &fifo->user.bar);
 657        if (ret)
 658                return ret;
 659
 660        nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 661        return 0;
 662}
 663
 664static void
 665gk104_fifo_init(struct nvkm_fifo *base)
 666{
 667        struct gk104_fifo *fifo = gk104_fifo(base);
 668        struct nvkm_device *device = fifo->base.engine.subdev.device;
 669        int i;
 670
 671        /* Enable PBDMAs. */
 672        nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
 673
 674        /* PBDMA[n] */
 675        for (i = 0; i < fifo->pbdma_nr; i++) {
 676                nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 677                nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 678                nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 679        }
 680
 681        /* PBDMA[n].HCE */
 682        for (i = 0; i < fifo->pbdma_nr; i++) {
 683                nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
 684                nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
 685        }
 686
 687        nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 688
 689        nvkm_wr32(device, 0x002100, 0xffffffff);
 690        nvkm_wr32(device, 0x002140, 0x7fffffff);
 691}
 692
 693static void *
 694gk104_fifo_dtor(struct nvkm_fifo *base)
 695{
 696        struct gk104_fifo *fifo = gk104_fifo(base);
 697        int i;
 698
 699        nvkm_vm_put(&fifo->user.bar);
 700        nvkm_memory_del(&fifo->user.mem);
 701
 702        for (i = 0; i < fifo->runlist_nr; i++) {
 703                nvkm_memory_del(&fifo->runlist[i].mem[1]);
 704                nvkm_memory_del(&fifo->runlist[i].mem[0]);
 705        }
 706
 707        return fifo;
 708}
 709
 710static const struct nvkm_fifo_func
 711gk104_fifo_ = {
 712        .dtor = gk104_fifo_dtor,
 713        .oneinit = gk104_fifo_oneinit,
 714        .init = gk104_fifo_init,
 715        .fini = gk104_fifo_fini,
 716        .intr = gk104_fifo_intr,
 717        .uevent_init = gk104_fifo_uevent_init,
 718        .uevent_fini = gk104_fifo_uevent_fini,
 719        .class_get = gk104_fifo_class_get,
 720};
 721
 722int
 723gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
 724                int index, int nr, struct nvkm_fifo **pfifo)
 725{
 726        struct gk104_fifo *fifo;
 727
 728        if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 729                return -ENOMEM;
 730        fifo->func = func;
 731        INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
 732        *pfifo = &fifo->base;
 733
 734        return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
 735}
 736
 737const struct nvkm_enum
 738gk104_fifo_fault_engine[] = {
 739        { 0x00, "GR", NULL, NVKM_ENGINE_GR },
 740        { 0x01, "DISPLAY" },
 741        { 0x02, "CAPTURE" },
 742        { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
 743        { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
 744        { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
 745        { 0x06, "SCHED" },
 746        { 0x07, "HOST0" },
 747        { 0x08, "HOST1" },
 748        { 0x09, "HOST2" },
 749        { 0x0a, "HOST3" },
 750        { 0x0b, "HOST4" },
 751        { 0x0c, "HOST5" },
 752        { 0x0d, "HOST6" },
 753        { 0x0e, "HOST7" },
 754        { 0x0f, "HOSTSR" },
 755        { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
 756        { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
 757        { 0x13, "PERF" },
 758        { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
 759        { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
 760        { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
 761        { 0x17, "PMU" },
 762        { 0x18, "PTP" },
 763        { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
 764        { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
 765        {}
 766};
 767
 768const struct nvkm_enum
 769gk104_fifo_fault_reason[] = {
 770        { 0x00, "PDE" },
 771        { 0x01, "PDE_SIZE" },
 772        { 0x02, "PTE" },
 773        { 0x03, "VA_LIMIT_VIOLATION" },
 774        { 0x04, "UNBOUND_INST_BLOCK" },
 775        { 0x05, "PRIV_VIOLATION" },
 776        { 0x06, "RO_VIOLATION" },
 777        { 0x07, "WO_VIOLATION" },
 778        { 0x08, "PITCH_MASK_VIOLATION" },
 779        { 0x09, "WORK_CREATION" },
 780        { 0x0a, "UNSUPPORTED_APERTURE" },
 781        { 0x0b, "COMPRESSION_FAILURE" },
 782        { 0x0c, "UNSUPPORTED_KIND" },
 783        { 0x0d, "REGION_VIOLATION" },
 784        { 0x0e, "BOTH_PTES_VALID" },
 785        { 0x0f, "INFO_TYPE_POISONED" },
 786        {}
 787};
 788
 789const struct nvkm_enum
 790gk104_fifo_fault_hubclient[] = {
 791        { 0x00, "VIP" },
 792        { 0x01, "CE0" },
 793        { 0x02, "CE1" },
 794        { 0x03, "DNISO" },
 795        { 0x04, "FE" },
 796        { 0x05, "FECS" },
 797        { 0x06, "HOST" },
 798        { 0x07, "HOST_CPU" },
 799        { 0x08, "HOST_CPU_NB" },
 800        { 0x09, "ISO" },
 801        { 0x0a, "MMU" },
 802        { 0x0b, "MSPDEC" },
 803        { 0x0c, "MSPPP" },
 804        { 0x0d, "MSVLD" },
 805        { 0x0e, "NISO" },
 806        { 0x0f, "P2P" },
 807        { 0x10, "PD" },
 808        { 0x11, "PERF" },
 809        { 0x12, "PMU" },
 810        { 0x13, "RASTERTWOD" },
 811        { 0x14, "SCC" },
 812        { 0x15, "SCC_NB" },
 813        { 0x16, "SEC" },
 814        { 0x17, "SSYNC" },
 815        { 0x18, "GR_CE" },
 816        { 0x19, "CE2" },
 817        { 0x1a, "XV" },
 818        { 0x1b, "MMU_NB" },
 819        { 0x1c, "MSENC" },
 820        { 0x1d, "DFALCON" },
 821        { 0x1e, "SKED" },
 822        { 0x1f, "AFALCON" },
 823        {}
 824};
 825
 826const struct nvkm_enum
 827gk104_fifo_fault_gpcclient[] = {
 828        { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
 829        { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
 830        { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
 831        { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
 832        { 0x0c, "RAST" },
 833        { 0x0d, "GCC" },
 834        { 0x0e, "GPCCS" },
 835        { 0x0f, "PROP_0" },
 836        { 0x10, "PROP_1" },
 837        { 0x11, "PROP_2" },
 838        { 0x12, "PROP_3" },
 839        { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
 840        { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
 841        { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
 842        { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
 843        { 0x1f, "GPM" },
 844        { 0x20, "LTP_UTLB_0" },
 845        { 0x21, "LTP_UTLB_1" },
 846        { 0x22, "LTP_UTLB_2" },
 847        { 0x23, "LTP_UTLB_3" },
 848        { 0x24, "GPC_RGG_UTLB" },
 849        {}
 850};
 851
 852static const struct gk104_fifo_func
 853gk104_fifo = {
 854        .fault.engine = gk104_fifo_fault_engine,
 855        .fault.reason = gk104_fifo_fault_reason,
 856        .fault.hubclient = gk104_fifo_fault_hubclient,
 857        .fault.gpcclient = gk104_fifo_fault_gpcclient,
 858        .chan = {
 859                &gk104_fifo_gpfifo_oclass,
 860                NULL
 861        },
 862};
 863
 864int
 865gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 866{
 867        return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
 868}
 869