linux/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Ilia Mirkin
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include <engine/xtensa.h>
  23
  24#include <core/gpuobj.h>
  25#include <engine/fifo.h>
  26
  27static int
  28nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
  29{
  30        struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
  31        int c = 0;
  32
  33        while (xtensa->func->sclass[c].oclass) {
  34                if (c++ == index) {
  35                        oclass->base = xtensa->func->sclass[index];
  36                        return index;
  37                }
  38        }
  39
  40        return c;
  41}
  42
  43static int
  44nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  45                        int align, struct nvkm_gpuobj **pgpuobj)
  46{
  47        return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
  48                               true, parent, pgpuobj);
  49}
  50
  51static const struct nvkm_object_func
  52nvkm_xtensa_cclass = {
  53        .bind = nvkm_xtensa_cclass_bind,
  54};
  55
  56static void
  57nvkm_xtensa_intr(struct nvkm_engine *engine)
  58{
  59        struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
  60        struct nvkm_subdev *subdev = &xtensa->engine.subdev;
  61        struct nvkm_device *device = subdev->device;
  62        const u32 base = xtensa->addr;
  63        u32 unk104 = nvkm_rd32(device, base + 0xd04);
  64        u32 intr = nvkm_rd32(device, base + 0xc20);
  65        u32 chan = nvkm_rd32(device, base + 0xc28);
  66        u32 unk10c = nvkm_rd32(device, base + 0xd0c);
  67
  68        if (intr & 0x10)
  69                nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
  70        nvkm_wr32(device, base + 0xc20, intr);
  71        intr = nvkm_rd32(device, base + 0xc20);
  72        if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
  73                nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
  74                nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
  75        }
  76}
  77
  78static int
  79nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
  80{
  81        struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
  82        struct nvkm_device *device = xtensa->engine.subdev.device;
  83        const u32 base = xtensa->addr;
  84
  85        nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
  86        nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
  87
  88        if (!suspend)
  89                nvkm_memory_unref(&xtensa->gpu_fw);
  90        return 0;
  91}
  92
  93static int
  94nvkm_xtensa_init(struct nvkm_engine *engine)
  95{
  96        struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
  97        struct nvkm_subdev *subdev = &xtensa->engine.subdev;
  98        struct nvkm_device *device = subdev->device;
  99        const u32 base = xtensa->addr;
 100        const struct firmware *fw;
 101        char name[32];
 102        int i, ret;
 103        u64 addr, size;
 104        u32 tmp;
 105
 106        if (!xtensa->gpu_fw) {
 107                snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
 108                         xtensa->addr >> 12);
 109
 110                ret = request_firmware(&fw, name, device->dev);
 111                if (ret) {
 112                        nvkm_warn(subdev, "unable to load firmware %s\n", name);
 113                        return ret;
 114                }
 115
 116                if (fw->size > 0x40000) {
 117                        nvkm_warn(subdev, "firmware %s too large\n", name);
 118                        release_firmware(fw);
 119                        return -EINVAL;
 120                }
 121
 122                ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 123                                      0x40000, 0x1000, false,
 124                                      &xtensa->gpu_fw);
 125                if (ret) {
 126                        release_firmware(fw);
 127                        return ret;
 128                }
 129
 130                nvkm_kmap(xtensa->gpu_fw);
 131                for (i = 0; i < fw->size / 4; i++)
 132                        nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
 133                nvkm_done(xtensa->gpu_fw);
 134                release_firmware(fw);
 135        }
 136
 137        addr = nvkm_memory_addr(xtensa->gpu_fw);
 138        size = nvkm_memory_size(xtensa->gpu_fw);
 139
 140        nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
 141        nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
 142
 143        nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
 144        nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
 145        nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 146
 147        nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
 148        nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
 149        nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
 150
 151        tmp = nvkm_rd32(device, 0x0);
 152        nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
 153
 154        nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
 155
 156        nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
 157        nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 158        return 0;
 159}
 160
 161static void *
 162nvkm_xtensa_dtor(struct nvkm_engine *engine)
 163{
 164        return nvkm_xtensa(engine);
 165}
 166
 167static const struct nvkm_engine_func
 168nvkm_xtensa = {
 169        .dtor = nvkm_xtensa_dtor,
 170        .init = nvkm_xtensa_init,
 171        .fini = nvkm_xtensa_fini,
 172        .intr = nvkm_xtensa_intr,
 173        .fifo.sclass = nvkm_xtensa_oclass_get,
 174        .cclass = &nvkm_xtensa_cclass,
 175};
 176
 177int
 178nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
 179                 struct nvkm_device *device, int index, bool enable,
 180                 u32 addr, struct nvkm_engine **pengine)
 181{
 182        struct nvkm_xtensa *xtensa;
 183
 184        if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
 185                return -ENOMEM;
 186        xtensa->func = func;
 187        xtensa->addr = addr;
 188        *pengine = &xtensa->engine;
 189
 190        return nvkm_engine_ctor(&nvkm_xtensa, device, index,
 191                                enable, &xtensa->engine);
 192}
 193