linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "dmacnv50.h"
  25#include "rootnv50.h"
  26
  27#include <core/client.h>
  28#include <core/oproxy.h>
  29#include <core/ramht.h>
  30#include <subdev/fb.h>
  31#include <subdev/timer.h>
  32#include <engine/dma.h>
  33
  34struct nv50_disp_dmac_object {
  35        struct nvkm_oproxy oproxy;
  36        struct nv50_disp_root *root;
  37        int hash;
  38};
  39
  40static void
  41nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
  42{
  43        struct nv50_disp_dmac_object *object =
  44                container_of(base, typeof(*object), oproxy);
  45        nvkm_ramht_remove(object->root->ramht, object->hash);
  46}
  47
  48static const struct nvkm_oproxy_func
  49nv50_disp_dmac_child_func_ = {
  50        .dtor[0] = nv50_disp_dmac_child_del_,
  51};
  52
  53static int
  54nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
  55                          const struct nvkm_oclass *oclass,
  56                          void *data, u32 size, struct nvkm_object **pobject)
  57{
  58        struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
  59        struct nv50_disp_root *root = chan->base.root;
  60        struct nvkm_device *device = root->disp->base.engine.subdev.device;
  61        const struct nvkm_device_oclass *sclass = oclass->priv;
  62        struct nv50_disp_dmac_object *object;
  63        int ret;
  64
  65        if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
  66                return -ENOMEM;
  67        nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
  68        object->root = root;
  69        *pobject = &object->oproxy.base;
  70
  71        ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
  72        if (ret)
  73                return ret;
  74
  75        object->hash = chan->func->bind(chan, object->oproxy.object,
  76                                              oclass->handle);
  77        if (object->hash < 0)
  78                return object->hash;
  79
  80        return 0;
  81}
  82
  83static int
  84nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
  85                          struct nvkm_oclass *sclass)
  86{
  87        struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
  88        struct nv50_disp *disp = chan->base.root->disp;
  89        struct nvkm_device *device = disp->base.engine.subdev.device;
  90        const struct nvkm_device_oclass *oclass = NULL;
  91
  92        sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
  93        if (sclass->engine && sclass->engine->func->base.sclass) {
  94                sclass->engine->func->base.sclass(sclass, index, &oclass);
  95                if (oclass) {
  96                        sclass->priv = oclass;
  97                        return 0;
  98                }
  99        }
 100
 101        return -EINVAL;
 102}
 103
 104static void
 105nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
 106{
 107        struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
 108        chan->func->fini(chan);
 109}
 110
 111static int
 112nv50_disp_dmac_init_(struct nv50_disp_chan *base)
 113{
 114        struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
 115        return chan->func->init(chan);
 116}
 117
 118static void *
 119nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
 120{
 121        return nv50_disp_dmac(base);
 122}
 123
 124static const struct nv50_disp_chan_func
 125nv50_disp_dmac_func_ = {
 126        .dtor = nv50_disp_dmac_dtor_,
 127        .init = nv50_disp_dmac_init_,
 128        .fini = nv50_disp_dmac_fini_,
 129        .child_get = nv50_disp_dmac_child_get_,
 130        .child_new = nv50_disp_dmac_child_new_,
 131};
 132
 133int
 134nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
 135                    const struct nv50_disp_chan_mthd *mthd,
 136                    struct nv50_disp_root *root, int chid, int head, u64 push,
 137                    const struct nvkm_oclass *oclass,
 138                    struct nvkm_object **pobject)
 139{
 140        struct nvkm_client *client = oclass->client;
 141        struct nvkm_dmaobj *dmaobj;
 142        struct nv50_disp_dmac *chan;
 143        int ret;
 144
 145        if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
 146                return -ENOMEM;
 147        *pobject = &chan->base.object;
 148        chan->func = func;
 149
 150        ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
 151                                  chid, chid, head, oclass, &chan->base);
 152        if (ret)
 153                return ret;
 154
 155        dmaobj = nvkm_dmaobj_search(client, push);
 156        if (IS_ERR(dmaobj))
 157                return PTR_ERR(dmaobj);
 158
 159        if (dmaobj->limit - dmaobj->start != 0xfff)
 160                return -EINVAL;
 161
 162        switch (dmaobj->target) {
 163        case NV_MEM_TARGET_VRAM:
 164                chan->push = 0x00000001 | dmaobj->start >> 8;
 165                break;
 166        case NV_MEM_TARGET_PCI_NOSNOOP:
 167                chan->push = 0x00000003 | dmaobj->start >> 8;
 168                break;
 169        default:
 170                return -EINVAL;
 171        }
 172
 173        return 0;
 174}
 175
 176int
 177nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
 178                    struct nvkm_object *object, u32 handle)
 179{
 180        return nvkm_ramht_insert(chan->base.root->ramht, object,
 181                                 chan->base.chid.user, -10, handle,
 182                                 chan->base.chid.user << 28 |
 183                                 chan->base.chid.user);
 184}
 185
 186static void
 187nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
 188{
 189        struct nv50_disp *disp = chan->base.root->disp;
 190        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 191        struct nvkm_device *device = subdev->device;
 192        int ctrl = chan->base.chid.ctrl;
 193        int user = chan->base.chid.user;
 194
 195        /* deactivate channel */
 196        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
 197        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
 198        if (nvkm_msec(device, 2000,
 199                if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
 200                        break;
 201        ) < 0) {
 202                nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
 203                           nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 204        }
 205
 206        /* disable error reporting and completion notifications */
 207        nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
 208}
 209
 210static int
 211nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
 212{
 213        struct nv50_disp *disp = chan->base.root->disp;
 214        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 215        struct nvkm_device *device = subdev->device;
 216        int ctrl = chan->base.chid.ctrl;
 217        int user = chan->base.chid.user;
 218
 219        /* enable error reporting */
 220        nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
 221
 222        /* initialise channel for dma command submission */
 223        nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
 224        nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
 225        nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
 226        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
 227        nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
 228        nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
 229
 230        /* wait for it to go inactive */
 231        if (nvkm_msec(device, 2000,
 232                if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
 233                        break;
 234        ) < 0) {
 235                nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
 236                           nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 237                return -EBUSY;
 238        }
 239
 240        return 0;
 241}
 242
 243const struct nv50_disp_dmac_func
 244nv50_disp_dmac_func = {
 245        .init = nv50_disp_dmac_init,
 246        .fini = nv50_disp_dmac_fini,
 247        .bind = nv50_disp_dmac_bind,
 248};
 249