linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "nv50.h"
  25#include "head.h"
  26#include "ior.h"
  27#include "rootnv50.h"
  28
  29void
  30gf119_disp_super(struct work_struct *work)
  31{
  32        struct nv50_disp *disp =
  33                container_of(work, struct nv50_disp, supervisor);
  34        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
  35        struct nvkm_device *device = subdev->device;
  36        struct nvkm_head *head;
  37        u32 mask[4];
  38
  39        nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
  40        list_for_each_entry(head, &disp->base.head, head) {
  41                mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
  42                HEAD_DBG(head, "%08x", mask[head->id]);
  43        }
  44
  45        if (disp->super & 0x00000001) {
  46                nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
  47                nv50_disp_super_1(disp);
  48                list_for_each_entry(head, &disp->base.head, head) {
  49                        if (!(mask[head->id] & 0x00001000))
  50                                continue;
  51                        nv50_disp_super_1_0(disp, head);
  52                }
  53        } else
  54        if (disp->super & 0x00000002) {
  55                list_for_each_entry(head, &disp->base.head, head) {
  56                        if (!(mask[head->id] & 0x00001000))
  57                                continue;
  58                        nv50_disp_super_2_0(disp, head);
  59                }
  60                nvkm_outp_route(&disp->base);
  61                list_for_each_entry(head, &disp->base.head, head) {
  62                        if (!(mask[head->id] & 0x00010000))
  63                                continue;
  64                        nv50_disp_super_2_1(disp, head);
  65                }
  66                list_for_each_entry(head, &disp->base.head, head) {
  67                        if (!(mask[head->id] & 0x00001000))
  68                                continue;
  69                        nv50_disp_super_2_2(disp, head);
  70                }
  71        } else
  72        if (disp->super & 0x00000004) {
  73                list_for_each_entry(head, &disp->base.head, head) {
  74                        if (!(mask[head->id] & 0x00001000))
  75                                continue;
  76                        nv50_disp_super_3_0(disp, head);
  77                }
  78        }
  79
  80        list_for_each_entry(head, &disp->base.head, head)
  81                nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
  82        nvkm_wr32(device, 0x6101d0, 0x80000000);
  83}
  84
  85void
  86gf119_disp_intr_error(struct nv50_disp *disp, int chid)
  87{
  88        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
  89        struct nvkm_device *device = subdev->device;
  90        u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
  91        u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
  92        u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
  93
  94        nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
  95                   chid, (mthd & 0x0000ffc), data, mthd, unkn);
  96
  97        if (chid < ARRAY_SIZE(disp->chan)) {
  98                switch (mthd & 0xffc) {
  99                case 0x0080:
 100                        nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 101                        break;
 102                default:
 103                        break;
 104                }
 105        }
 106
 107        nvkm_wr32(device, 0x61009c, (1 << chid));
 108        nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
 109}
 110
 111void
 112gf119_disp_intr(struct nv50_disp *disp)
 113{
 114        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 115        struct nvkm_device *device = subdev->device;
 116        struct nvkm_head *head;
 117        u32 intr = nvkm_rd32(device, 0x610088);
 118
 119        if (intr & 0x00000001) {
 120                u32 stat = nvkm_rd32(device, 0x61008c);
 121                while (stat) {
 122                        int chid = __ffs(stat); stat &= ~(1 << chid);
 123                        nv50_disp_chan_uevent_send(disp, chid);
 124                        nvkm_wr32(device, 0x61008c, 1 << chid);
 125                }
 126                intr &= ~0x00000001;
 127        }
 128
 129        if (intr & 0x00000002) {
 130                u32 stat = nvkm_rd32(device, 0x61009c);
 131                int chid = ffs(stat) - 1;
 132                if (chid >= 0)
 133                        disp->func->intr_error(disp, chid);
 134                intr &= ~0x00000002;
 135        }
 136
 137        if (intr & 0x00100000) {
 138                u32 stat = nvkm_rd32(device, 0x6100ac);
 139                if (stat & 0x00000007) {
 140                        disp->super = (stat & 0x00000007);
 141                        queue_work(disp->wq, &disp->supervisor);
 142                        nvkm_wr32(device, 0x6100ac, disp->super);
 143                        stat &= ~0x00000007;
 144                }
 145
 146                if (stat) {
 147                        nvkm_warn(subdev, "intr24 %08x\n", stat);
 148                        nvkm_wr32(device, 0x6100ac, stat);
 149                }
 150
 151                intr &= ~0x00100000;
 152        }
 153
 154        list_for_each_entry(head, &disp->base.head, head) {
 155                const u32 hoff = head->id * 0x800;
 156                u32 mask = 0x01000000 << head->id;
 157                if (mask & intr) {
 158                        u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
 159                        if (stat & 0x00000001)
 160                                nvkm_disp_vblank(&disp->base, head->id);
 161                        nvkm_mask(device, 0x6100bc + hoff, 0, 0);
 162                        nvkm_rd32(device, 0x6100c0 + hoff);
 163                }
 164        }
 165}
 166
 167int
 168gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 169                int index, struct nvkm_disp **pdisp)
 170{
 171        u32 heads = nvkm_rd32(device, 0x022448);
 172        return nv50_disp_new_(func, device, index, heads, pdisp);
 173}
 174
 175static const struct nv50_disp_func
 176gf119_disp = {
 177        .intr = gf119_disp_intr,
 178        .intr_error = gf119_disp_intr_error,
 179        .uevent = &gf119_disp_chan_uevent,
 180        .super = gf119_disp_super,
 181        .root = &gf119_disp_root_oclass,
 182        .head.new = gf119_head_new,
 183        .dac = { .nr = 3, .new = gf119_dac_new },
 184        .sor = { .nr = 4, .new = gf119_sor_new },
 185};
 186
 187int
 188gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 189{
 190        return gf119_disp_new_(&gf119_disp, device, index, pdisp);
 191}
 192