linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "channv50.h"
  25
  26#include <core/client.h>
  27#include <core/ramht.h>
  28#include <subdev/fb.h>
  29#include <subdev/mmu.h>
  30#include <subdev/timer.h>
  31#include <engine/dma.h>
  32
  33int
  34nv50_disp_dmac_new_(const struct nv50_disp_chan_func *func,
  35                    const struct nv50_disp_chan_mthd *mthd,
  36                    struct nv50_disp *disp, int chid, int head, u64 push,
  37                    const struct nvkm_oclass *oclass,
  38                    struct nvkm_object **pobject)
  39{
  40        struct nvkm_client *client = oclass->client;
  41        struct nv50_disp_chan *chan;
  42        int ret;
  43
  44        ret = nv50_disp_chan_new_(func, mthd, disp, chid, chid, head, oclass,
  45                                  pobject);
  46        chan = nv50_disp_chan(*pobject);
  47        if (ret)
  48                return ret;
  49
  50        chan->memory = nvkm_umem_search(client, push);
  51        if (IS_ERR(chan->memory))
  52                return PTR_ERR(chan->memory);
  53
  54        if (nvkm_memory_size(chan->memory) < 0x1000)
  55                return -EINVAL;
  56
  57        switch (nvkm_memory_target(chan->memory)) {
  58        case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
  59        case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
  60        case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
  61        default:
  62                return -EINVAL;
  63        }
  64
  65        chan->push |= nvkm_memory_addr(chan->memory) >> 8;
  66        return 0;
  67}
  68
  69int
  70nv50_disp_dmac_bind(struct nv50_disp_chan *chan,
  71                    struct nvkm_object *object, u32 handle)
  72{
  73        return nvkm_ramht_insert(chan->disp->ramht, object,
  74                                 chan->chid.user, -10, handle,
  75                                 chan->chid.user << 28 |
  76                                 chan->chid.user);
  77}
  78
  79static void
  80nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
  81{
  82        struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
  83        struct nvkm_device *device = subdev->device;
  84        int ctrl = chan->chid.ctrl;
  85        int user = chan->chid.user;
  86
  87        /* deactivate channel */
  88        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
  89        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
  90        if (nvkm_msec(device, 2000,
  91                if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
  92                        break;
  93        ) < 0) {
  94                nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
  95                           nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
  96        }
  97}
  98
  99static int
 100nv50_disp_dmac_init(struct nv50_disp_chan *chan)
 101{
 102        struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 103        struct nvkm_device *device = subdev->device;
 104        int ctrl = chan->chid.ctrl;
 105        int user = chan->chid.user;
 106
 107        /* initialise channel for dma command submission */
 108        nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
 109        nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
 110        nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
 111        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
 112        nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
 113        nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
 114
 115        /* wait for it to go inactive */
 116        if (nvkm_msec(device, 2000,
 117                if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
 118                        break;
 119        ) < 0) {
 120                nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
 121                           nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 122                return -EBUSY;
 123        }
 124
 125        return 0;
 126}
 127
 128const struct nv50_disp_chan_func
 129nv50_disp_dmac_func = {
 130        .init = nv50_disp_dmac_init,
 131        .fini = nv50_disp_dmac_fini,
 132        .intr = nv50_disp_chan_intr,
 133        .user = nv50_disp_chan_user,
 134        .bind = nv50_disp_dmac_bind,
 135};
 136