linux/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22#include "priv.h"
  23
  24#include <subdev/mc.h>
  25#include <subdev/top.h>
  26
  27void
  28nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
  29                      u32 size, u16 tag, u8 port, bool secure)
  30{
  31        if (secure && !falcon->secret) {
  32                nvkm_warn(falcon->user,
  33                          "writing with secure tag on a non-secure falcon!\n");
  34                return;
  35        }
  36
  37        falcon->func->load_imem(falcon, data, start, size, tag, port,
  38                                secure);
  39}
  40
  41void
  42nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
  43                      u32 size, u8 port)
  44{
  45        mutex_lock(&falcon->dmem_mutex);
  46
  47        falcon->func->load_dmem(falcon, data, start, size, port);
  48
  49        mutex_unlock(&falcon->dmem_mutex);
  50}
  51
  52void
  53nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
  54                      void *data)
  55{
  56        mutex_lock(&falcon->dmem_mutex);
  57
  58        falcon->func->read_dmem(falcon, start, size, port, data);
  59
  60        mutex_unlock(&falcon->dmem_mutex);
  61}
  62
  63void
  64nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
  65{
  66        if (!falcon->func->bind_context) {
  67                nvkm_error(falcon->user,
  68                           "Context binding not supported on this falcon!\n");
  69                return;
  70        }
  71
  72        falcon->func->bind_context(falcon, inst);
  73}
  74
  75void
  76nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
  77{
  78        falcon->func->set_start_addr(falcon, start_addr);
  79}
  80
  81void
  82nvkm_falcon_start(struct nvkm_falcon *falcon)
  83{
  84        falcon->func->start(falcon);
  85}
  86
  87int
  88nvkm_falcon_enable(struct nvkm_falcon *falcon)
  89{
  90        struct nvkm_device *device = falcon->owner->device;
  91        int ret;
  92
  93        nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
  94        ret = falcon->func->enable(falcon);
  95        if (ret) {
  96                nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
  97                return ret;
  98        }
  99
 100        return 0;
 101}
 102
 103void
 104nvkm_falcon_disable(struct nvkm_falcon *falcon)
 105{
 106        struct nvkm_device *device = falcon->owner->device;
 107
 108        /* already disabled, return or wait_idle will timeout */
 109        if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
 110                return;
 111
 112        falcon->func->disable(falcon);
 113
 114        nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
 115}
 116
 117int
 118nvkm_falcon_reset(struct nvkm_falcon *falcon)
 119{
 120        nvkm_falcon_disable(falcon);
 121        return nvkm_falcon_enable(falcon);
 122}
 123
 124int
 125nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
 126{
 127        return falcon->func->wait_for_halt(falcon, ms);
 128}
 129
 130int
 131nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
 132{
 133        return falcon->func->clear_interrupt(falcon, mask);
 134}
 135
 136static int
 137nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
 138{
 139        const struct nvkm_falcon_func *func = falcon->func;
 140        const struct nvkm_subdev *subdev = falcon->owner;
 141        u32 reg;
 142
 143        if (!falcon->addr) {
 144                falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
 145                if (WARN_ON(!falcon->addr))
 146                        return -ENODEV;
 147        }
 148
 149        reg = nvkm_falcon_rd32(falcon, 0x12c);
 150        falcon->version = reg & 0xf;
 151        falcon->secret = (reg >> 4) & 0x3;
 152        falcon->code.ports = (reg >> 8) & 0xf;
 153        falcon->data.ports = (reg >> 12) & 0xf;
 154
 155        reg = nvkm_falcon_rd32(falcon, 0x108);
 156        falcon->code.limit = (reg & 0x1ff) << 8;
 157        falcon->data.limit = (reg & 0x3fe00) >> 1;
 158
 159        if (func->debug) {
 160                u32 val = nvkm_falcon_rd32(falcon, func->debug);
 161                falcon->debug = (val >> 20) & 0x1;
 162        }
 163
 164        return 0;
 165}
 166
 167void
 168nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 169{
 170        if (unlikely(!falcon))
 171                return;
 172
 173        mutex_lock(&falcon->mutex);
 174        if (falcon->user == user) {
 175                nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
 176                falcon->user = NULL;
 177        }
 178        mutex_unlock(&falcon->mutex);
 179}
 180
 181int
 182nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 183{
 184        int ret = 0;
 185
 186        mutex_lock(&falcon->mutex);
 187        if (falcon->user) {
 188                nvkm_error(user, "%s falcon already acquired by %s!\n",
 189                           falcon->name, falcon->user->name);
 190                mutex_unlock(&falcon->mutex);
 191                return -EBUSY;
 192        }
 193
 194        nvkm_debug(user, "acquired %s falcon\n", falcon->name);
 195        if (!falcon->oneinit)
 196                ret = nvkm_falcon_oneinit(falcon);
 197        falcon->user = user;
 198        mutex_unlock(&falcon->mutex);
 199        return ret;
 200}
 201
 202void
 203nvkm_falcon_dtor(struct nvkm_falcon *falcon)
 204{
 205}
 206
 207int
 208nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
 209                 struct nvkm_subdev *subdev, const char *name, u32 addr,
 210                 struct nvkm_falcon *falcon)
 211{
 212        falcon->func = func;
 213        falcon->owner = subdev;
 214        falcon->name = name;
 215        falcon->addr = addr;
 216        mutex_init(&falcon->mutex);
 217        mutex_init(&falcon->dmem_mutex);
 218        return 0;
 219}
 220
 221void
 222nvkm_falcon_del(struct nvkm_falcon **pfalcon)
 223{
 224        if (*pfalcon) {
 225                nvkm_falcon_dtor(*pfalcon);
 226                kfree(*pfalcon);
 227                *pfalcon = NULL;
 228        }
 229}
 230