linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "nv40.h"
  25#include "regs.h"
  26
  27#include <core/client.h>
  28#include <core/gpuobj.h>
  29#include <subdev/fb.h>
  30#include <subdev/timer.h>
  31#include <engine/fifo.h>
  32
  33u64
  34nv40_gr_units(struct nvkm_gr *gr)
  35{
  36        return nvkm_rd32(gr->engine.subdev.device, 0x1540);
  37}
  38
  39/*******************************************************************************
  40 * Graphics object classes
  41 ******************************************************************************/
  42
  43static int
  44nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  45                    int align, struct nvkm_gpuobj **pgpuobj)
  46{
  47        int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
  48                                  false, parent, pgpuobj);
  49        if (ret == 0) {
  50                nvkm_kmap(*pgpuobj);
  51                nvkm_wo32(*pgpuobj, 0x00, object->oclass);
  52                nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
  53                nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
  54#ifdef __BIG_ENDIAN
  55                nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
  56#endif
  57                nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
  58                nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
  59                nvkm_done(*pgpuobj);
  60        }
  61        return ret;
  62}
  63
  64const struct nvkm_object_func
  65nv40_gr_object = {
  66        .bind = nv40_gr_object_bind,
  67};
  68
  69/*******************************************************************************
  70 * PGRAPH context
  71 ******************************************************************************/
  72
  73static int
  74nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  75                  int align, struct nvkm_gpuobj **pgpuobj)
  76{
  77        struct nv40_gr_chan *chan = nv40_gr_chan(object);
  78        struct nv40_gr *gr = chan->gr;
  79        int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
  80                                  align, true, parent, pgpuobj);
  81        if (ret == 0) {
  82                chan->inst = (*pgpuobj)->addr;
  83                nvkm_kmap(*pgpuobj);
  84                nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
  85                nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
  86                nvkm_done(*pgpuobj);
  87        }
  88        return ret;
  89}
  90
  91static int
  92nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
  93{
  94        struct nv40_gr_chan *chan = nv40_gr_chan(object);
  95        struct nv40_gr *gr = chan->gr;
  96        struct nvkm_subdev *subdev = &gr->base.engine.subdev;
  97        struct nvkm_device *device = subdev->device;
  98        u32 inst = 0x01000000 | chan->inst >> 4;
  99        int ret = 0;
 100
 101        nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
 102
 103        if (nvkm_rd32(device, 0x40032c) == inst) {
 104                if (suspend) {
 105                        nvkm_wr32(device, 0x400720, 0x00000000);
 106                        nvkm_wr32(device, 0x400784, inst);
 107                        nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
 108                        nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
 109                        if (nvkm_msec(device, 2000,
 110                                if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
 111                                        break;
 112                        ) < 0) {
 113                                u32 insn = nvkm_rd32(device, 0x400308);
 114                                nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
 115                                ret = -EBUSY;
 116                        }
 117                }
 118
 119                nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
 120        }
 121
 122        if (nvkm_rd32(device, 0x400330) == inst)
 123                nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
 124
 125        nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
 126        return ret;
 127}
 128
 129static void *
 130nv40_gr_chan_dtor(struct nvkm_object *object)
 131{
 132        struct nv40_gr_chan *chan = nv40_gr_chan(object);
 133        unsigned long flags;
 134        spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
 135        list_del(&chan->head);
 136        spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
 137        return chan;
 138}
 139
 140static const struct nvkm_object_func
 141nv40_gr_chan = {
 142        .dtor = nv40_gr_chan_dtor,
 143        .fini = nv40_gr_chan_fini,
 144        .bind = nv40_gr_chan_bind,
 145};
 146
 147int
 148nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 149                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 150{
 151        struct nv40_gr *gr = nv40_gr(base);
 152        struct nv40_gr_chan *chan;
 153        unsigned long flags;
 154
 155        if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
 156                return -ENOMEM;
 157        nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
 158        chan->gr = gr;
 159        chan->fifo = fifoch;
 160        *pobject = &chan->object;
 161
 162        spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
 163        list_add(&chan->head, &gr->chan);
 164        spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
 165        return 0;
 166}
 167
 168/*******************************************************************************
 169 * PGRAPH engine/subdev functions
 170 ******************************************************************************/
 171
 172static void
 173nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 174{
 175        struct nv40_gr *gr = nv40_gr(base);
 176        struct nvkm_device *device = gr->base.engine.subdev.device;
 177        struct nvkm_fifo *fifo = device->fifo;
 178        unsigned long flags;
 179
 180        nvkm_fifo_pause(fifo, &flags);
 181        nv04_gr_idle(&gr->base);
 182
 183        switch (device->chipset) {
 184        case 0x40:
 185        case 0x41:
 186        case 0x42:
 187        case 0x43:
 188        case 0x45:
 189                nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
 190                nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
 191                nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
 192                nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 193                nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 194                nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
 195                switch (device->chipset) {
 196                case 0x40:
 197                case 0x45:
 198                        nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
 199                        nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
 200                        break;
 201                case 0x41:
 202                case 0x42:
 203                case 0x43:
 204                        nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
 205                        nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
 206                        break;
 207                default:
 208                        break;
 209                }
 210                break;
 211        case 0x47:
 212        case 0x49:
 213        case 0x4b:
 214                nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
 215                nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
 216                nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
 217                nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 218                nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 219                nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
 220                nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
 221                nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
 222                break;
 223        default:
 224                WARN_ON(1);
 225                break;
 226        }
 227
 228        nvkm_fifo_start(fifo, &flags);
 229}
 230
 231void
 232nv40_gr_intr(struct nvkm_gr *base)
 233{
 234        struct nv40_gr *gr = nv40_gr(base);
 235        struct nv40_gr_chan *temp, *chan = NULL;
 236        struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 237        struct nvkm_device *device = subdev->device;
 238        u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
 239        u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
 240        u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
 241        u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
 242        u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 243        u32 subc = (addr & 0x00070000) >> 16;
 244        u32 mthd = (addr & 0x00001ffc);
 245        u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
 246        u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
 247        u32 show = stat;
 248        char msg[128], src[128], sta[128];
 249        unsigned long flags;
 250
 251        spin_lock_irqsave(&gr->base.engine.lock, flags);
 252        list_for_each_entry(temp, &gr->chan, head) {
 253                if (temp->inst >> 4 == inst) {
 254                        chan = temp;
 255                        list_del(&chan->head);
 256                        list_add(&chan->head, &gr->chan);
 257                        break;
 258                }
 259        }
 260
 261        if (stat & NV_PGRAPH_INTR_ERROR) {
 262                if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
 263                        nvkm_mask(device, 0x402000, 0, 0);
 264                }
 265        }
 266
 267        nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
 268        nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 269
 270        if (show) {
 271                nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
 272                nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
 273                nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
 274                nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
 275                                   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
 276                                   "class %04x mthd %04x data %08x\n",
 277                           show, msg, nsource, src, nstatus, sta,
 278                           chan ? chan->fifo->chid : -1, inst << 4,
 279                           chan ? chan->fifo->object.client->name : "unknown",
 280                           subc, class, mthd, data);
 281        }
 282
 283        spin_unlock_irqrestore(&gr->base.engine.lock, flags);
 284}
 285
 286int
 287nv40_gr_init(struct nvkm_gr *base)
 288{
 289        struct nv40_gr *gr = nv40_gr(base);
 290        struct nvkm_device *device = gr->base.engine.subdev.device;
 291        int ret, i, j;
 292        u32 vramsz;
 293
 294        /* generate and upload context program */
 295        ret = nv40_grctx_init(device, &gr->size);
 296        if (ret)
 297                return ret;
 298
 299        /* No context present currently */
 300        nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 301
 302        nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 303        nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 304
 305        nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
 306        nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
 307        nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
 308        nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
 309        nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
 310        nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
 311
 312        nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
 313        nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 314
 315        j = nvkm_rd32(device, 0x1540) & 0xff;
 316        if (j) {
 317                for (i = 0; !(j & 1); j >>= 1, i++)
 318                        ;
 319                nvkm_wr32(device, 0x405000, i);
 320        }
 321
 322        if (device->chipset == 0x40) {
 323                nvkm_wr32(device, 0x4009b0, 0x83280fff);
 324                nvkm_wr32(device, 0x4009b4, 0x000000a0);
 325        } else {
 326                nvkm_wr32(device, 0x400820, 0x83280eff);
 327                nvkm_wr32(device, 0x400824, 0x000000a0);
 328        }
 329
 330        switch (device->chipset) {
 331        case 0x40:
 332        case 0x45:
 333                nvkm_wr32(device, 0x4009b8, 0x0078e366);
 334                nvkm_wr32(device, 0x4009bc, 0x0000014c);
 335                break;
 336        case 0x41:
 337        case 0x42: /* pciid also 0x00Cx */
 338        /* case 0x0120: XXX (pciid) */
 339                nvkm_wr32(device, 0x400828, 0x007596ff);
 340                nvkm_wr32(device, 0x40082c, 0x00000108);
 341                break;
 342        case 0x43:
 343                nvkm_wr32(device, 0x400828, 0x0072cb77);
 344                nvkm_wr32(device, 0x40082c, 0x00000108);
 345                break;
 346        case 0x44:
 347        case 0x46: /* G72 */
 348        case 0x4a:
 349        case 0x4c: /* G7x-based C51 */
 350        case 0x4e:
 351                nvkm_wr32(device, 0x400860, 0);
 352                nvkm_wr32(device, 0x400864, 0);
 353                break;
 354        case 0x47: /* G70 */
 355        case 0x49: /* G71 */
 356        case 0x4b: /* G73 */
 357                nvkm_wr32(device, 0x400828, 0x07830610);
 358                nvkm_wr32(device, 0x40082c, 0x0000016A);
 359                break;
 360        default:
 361                break;
 362        }
 363
 364        nvkm_wr32(device, 0x400b38, 0x2ffff800);
 365        nvkm_wr32(device, 0x400b3c, 0x00006000);
 366
 367        /* Tiling related stuff. */
 368        switch (device->chipset) {
 369        case 0x44:
 370        case 0x4a:
 371                nvkm_wr32(device, 0x400bc4, 0x1003d888);
 372                nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
 373                break;
 374        case 0x46:
 375                nvkm_wr32(device, 0x400bc4, 0x0000e024);
 376                nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
 377                break;
 378        case 0x4c:
 379        case 0x4e:
 380        case 0x67:
 381                nvkm_wr32(device, 0x400bc4, 0x1003d888);
 382                nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
 383                break;
 384        default:
 385                break;
 386        }
 387
 388        /* begin RAM config */
 389        vramsz = device->func->resource_size(device, 1) - 1;
 390        switch (device->chipset) {
 391        case 0x40:
 392                nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
 393                nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
 394                nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
 395                nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
 396                nvkm_wr32(device, 0x400820, 0);
 397                nvkm_wr32(device, 0x400824, 0);
 398                nvkm_wr32(device, 0x400864, vramsz);
 399                nvkm_wr32(device, 0x400868, vramsz);
 400                break;
 401        default:
 402                switch (device->chipset) {
 403                case 0x41:
 404                case 0x42:
 405                case 0x43:
 406                case 0x45:
 407                case 0x4e:
 408                case 0x44:
 409                case 0x4a:
 410                        nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
 411                        nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
 412                        break;
 413                default:
 414                        nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
 415                        nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
 416                        break;
 417                }
 418                nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
 419                nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
 420                nvkm_wr32(device, 0x400840, 0);
 421                nvkm_wr32(device, 0x400844, 0);
 422                nvkm_wr32(device, 0x4008A0, vramsz);
 423                nvkm_wr32(device, 0x4008A4, vramsz);
 424                break;
 425        }
 426
 427        return 0;
 428}
 429
 430int
 431nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
 432             enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 433{
 434        struct nv40_gr *gr;
 435
 436        if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
 437                return -ENOMEM;
 438        *pgr = &gr->base;
 439        INIT_LIST_HEAD(&gr->chan);
 440
 441        return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
 442}
 443
 444static const struct nvkm_gr_func
 445nv40_gr = {
 446        .init = nv40_gr_init,
 447        .intr = nv40_gr_intr,
 448        .tile = nv40_gr_tile,
 449        .units = nv40_gr_units,
 450        .chan_new = nv40_gr_chan_new,
 451        .sclass = {
 452                { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
 453                { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
 454                { -1, -1, 0x0030, &nv40_gr_object }, /* null */
 455                { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
 456                { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
 457                { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
 458                { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
 459                { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
 460                { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
 461                { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
 462                { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
 463                { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
 464                { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
 465                { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
 466                { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
 467                { -1, -1, 0x4097, &nv40_gr_object }, /* curie */
 468                {}
 469        }
 470};
 471
 472int
 473nv40_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 474{
 475        return nv40_gr_new_(&nv40_gr, device, type, inst, pgr);
 476}
 477