linux/drivers/gpu/drm/nouveau/nvkm/core/engine.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include <core/engine.h>
  25#include <core/device.h>
  26#include <core/option.h>
  27
  28#include <subdev/fb.h>
  29
  30bool
  31nvkm_engine_chsw_load(struct nvkm_engine *engine)
  32{
  33        if (engine->func->chsw_load)
  34                return engine->func->chsw_load(engine);
  35        return false;
  36}
  37
  38void
  39nvkm_engine_unref(struct nvkm_engine **pengine)
  40{
  41        struct nvkm_engine *engine = *pengine;
  42        if (engine) {
  43                mutex_lock(&engine->subdev.mutex);
  44                if (--engine->usecount == 0)
  45                        nvkm_subdev_fini(&engine->subdev, false);
  46                mutex_unlock(&engine->subdev.mutex);
  47                *pengine = NULL;
  48        }
  49}
  50
  51struct nvkm_engine *
  52nvkm_engine_ref(struct nvkm_engine *engine)
  53{
  54        if (engine) {
  55                mutex_lock(&engine->subdev.mutex);
  56                if (++engine->usecount == 1) {
  57                        int ret = nvkm_subdev_init(&engine->subdev);
  58                        if (ret) {
  59                                engine->usecount--;
  60                                mutex_unlock(&engine->subdev.mutex);
  61                                return ERR_PTR(ret);
  62                        }
  63                }
  64                mutex_unlock(&engine->subdev.mutex);
  65        }
  66        return engine;
  67}
  68
  69void
  70nvkm_engine_tile(struct nvkm_engine *engine, int region)
  71{
  72        struct nvkm_fb *fb = engine->subdev.device->fb;
  73        if (engine->func->tile)
  74                engine->func->tile(engine, region, &fb->tile.region[region]);
  75}
  76
  77static void
  78nvkm_engine_intr(struct nvkm_subdev *subdev)
  79{
  80        struct nvkm_engine *engine = nvkm_engine(subdev);
  81        if (engine->func->intr)
  82                engine->func->intr(engine);
  83}
  84
  85static int
  86nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
  87{
  88        struct nvkm_engine *engine = nvkm_engine(subdev);
  89        if (engine->func->info) {
  90                if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
  91                        int ret = engine->func->info(engine, mthd, data);
  92                        nvkm_engine_unref(&engine);
  93                        return ret;
  94                }
  95                return PTR_ERR(engine);
  96        }
  97        return -ENOSYS;
  98}
  99
 100static int
 101nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
 102{
 103        struct nvkm_engine *engine = nvkm_engine(subdev);
 104        if (engine->func->fini)
 105                return engine->func->fini(engine, suspend);
 106        return 0;
 107}
 108
 109static int
 110nvkm_engine_init(struct nvkm_subdev *subdev)
 111{
 112        struct nvkm_engine *engine = nvkm_engine(subdev);
 113        struct nvkm_fb *fb = subdev->device->fb;
 114        int ret = 0, i;
 115        s64 time;
 116
 117        if (!engine->usecount) {
 118                nvkm_trace(subdev, "init skipped, engine has no users\n");
 119                return ret;
 120        }
 121
 122        if (engine->func->oneinit && !engine->subdev.oneinit) {
 123                nvkm_trace(subdev, "one-time init running...\n");
 124                time = ktime_to_us(ktime_get());
 125                ret = engine->func->oneinit(engine);
 126                if (ret) {
 127                        nvkm_trace(subdev, "one-time init failed, %d\n", ret);
 128                        return ret;
 129                }
 130
 131                engine->subdev.oneinit = true;
 132                time = ktime_to_us(ktime_get()) - time;
 133                nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
 134        }
 135
 136        if (engine->func->init)
 137                ret = engine->func->init(engine);
 138
 139        for (i = 0; fb && i < fb->tile.regions; i++)
 140                nvkm_engine_tile(engine, i);
 141        return ret;
 142}
 143
 144static int
 145nvkm_engine_preinit(struct nvkm_subdev *subdev)
 146{
 147        struct nvkm_engine *engine = nvkm_engine(subdev);
 148        if (engine->func->preinit)
 149                engine->func->preinit(engine);
 150        return 0;
 151}
 152
 153static void *
 154nvkm_engine_dtor(struct nvkm_subdev *subdev)
 155{
 156        struct nvkm_engine *engine = nvkm_engine(subdev);
 157        if (engine->func->dtor)
 158                return engine->func->dtor(engine);
 159        return engine;
 160}
 161
 162static const struct nvkm_subdev_func
 163nvkm_engine_func = {
 164        .dtor = nvkm_engine_dtor,
 165        .preinit = nvkm_engine_preinit,
 166        .init = nvkm_engine_init,
 167        .fini = nvkm_engine_fini,
 168        .info = nvkm_engine_info,
 169        .intr = nvkm_engine_intr,
 170};
 171
 172int
 173nvkm_engine_ctor(const struct nvkm_engine_func *func,
 174                 struct nvkm_device *device, int index, bool enable,
 175                 struct nvkm_engine *engine)
 176{
 177        nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
 178        engine->func = func;
 179
 180        if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
 181                nvkm_debug(&engine->subdev, "disabled\n");
 182                return -ENODEV;
 183        }
 184
 185        spin_lock_init(&engine->lock);
 186        return 0;
 187}
 188
 189int
 190nvkm_engine_new_(const struct nvkm_engine_func *func,
 191                 struct nvkm_device *device, int index, bool enable,
 192                 struct nvkm_engine **pengine)
 193{
 194        if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
 195                return -ENOMEM;
 196        return nvkm_engine_ctor(func, device, index, enable, *pengine);
 197}
 198