linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "chan.h"
  25
  26#include <core/client.h>
  27#include <core/gpuobj.h>
  28#include <core/oproxy.h>
  29#include <subdev/mmu.h>
  30#include <engine/dma.h>
  31
  32struct nvkm_fifo_chan_object {
  33        struct nvkm_oproxy oproxy;
  34        struct nvkm_fifo_chan *chan;
  35        int hash;
  36};
  37
  38static int
  39nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
  40{
  41        struct nvkm_fifo_chan_object *object =
  42                container_of(base, typeof(*object), oproxy);
  43        struct nvkm_engine *engine  = object->oproxy.object->engine;
  44        struct nvkm_fifo_chan *chan = object->chan;
  45        struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  46        const char *name = nvkm_subdev_name[engine->subdev.index];
  47        int ret = 0;
  48
  49        if (--engn->usecount)
  50                return 0;
  51
  52        if (chan->func->engine_fini) {
  53                ret = chan->func->engine_fini(chan, engine, suspend);
  54                if (ret) {
  55                        nvif_error(&chan->object,
  56                                   "detach %s failed, %d\n", name, ret);
  57                        return ret;
  58                }
  59        }
  60
  61        if (engn->object) {
  62                ret = nvkm_object_fini(engn->object, suspend);
  63                if (ret && suspend)
  64                        return ret;
  65        }
  66
  67        nvif_trace(&chan->object, "detached %s\n", name);
  68        return ret;
  69}
  70
  71static int
  72nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
  73{
  74        struct nvkm_fifo_chan_object *object =
  75                container_of(base, typeof(*object), oproxy);
  76        struct nvkm_engine *engine  = object->oproxy.object->engine;
  77        struct nvkm_fifo_chan *chan = object->chan;
  78        struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  79        const char *name = nvkm_subdev_name[engine->subdev.index];
  80        int ret;
  81
  82        if (engn->usecount++)
  83                return 0;
  84
  85        if (engn->object) {
  86                ret = nvkm_object_init(engn->object);
  87                if (ret)
  88                        return ret;
  89        }
  90
  91        if (chan->func->engine_init) {
  92                ret = chan->func->engine_init(chan, engine);
  93                if (ret) {
  94                        nvif_error(&chan->object,
  95                                   "attach %s failed, %d\n", name, ret);
  96                        return ret;
  97                }
  98        }
  99
 100        nvif_trace(&chan->object, "attached %s\n", name);
 101        return 0;
 102}
 103
 104static void
 105nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
 106{
 107        struct nvkm_fifo_chan_object *object =
 108                container_of(base, typeof(*object), oproxy);
 109        struct nvkm_engine *engine  = object->oproxy.base.engine;
 110        struct nvkm_fifo_chan *chan = object->chan;
 111        struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
 112
 113        if (chan->func->object_dtor)
 114                chan->func->object_dtor(chan, object->hash);
 115
 116        if (!--engn->refcount) {
 117                if (chan->func->engine_dtor)
 118                        chan->func->engine_dtor(chan, engine);
 119                nvkm_object_del(&engn->object);
 120                if (chan->vmm)
 121                        atomic_dec(&chan->vmm->engref[engine->subdev.index]);
 122        }
 123}
 124
 125static const struct nvkm_oproxy_func
 126nvkm_fifo_chan_child_func = {
 127        .dtor[0] = nvkm_fifo_chan_child_del,
 128        .init[0] = nvkm_fifo_chan_child_init,
 129        .fini[0] = nvkm_fifo_chan_child_fini,
 130};
 131
 132static int
 133nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
 134                         struct nvkm_object **pobject)
 135{
 136        struct nvkm_engine *engine = oclass->engine;
 137        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
 138        struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
 139        struct nvkm_fifo_chan_object *object;
 140        int ret = 0;
 141
 142        if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
 143                return -ENOMEM;
 144        nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
 145        object->chan = chan;
 146        *pobject = &object->oproxy.base;
 147
 148        if (!engn->refcount++) {
 149                struct nvkm_oclass cclass = {
 150                        .client = oclass->client,
 151                        .engine = oclass->engine,
 152                };
 153
 154                if (chan->vmm)
 155                        atomic_inc(&chan->vmm->engref[engine->subdev.index]);
 156
 157                if (engine->func->fifo.cclass) {
 158                        ret = engine->func->fifo.cclass(chan, &cclass,
 159                                                        &engn->object);
 160                } else
 161                if (engine->func->cclass) {
 162                        ret = nvkm_object_new_(engine->func->cclass, &cclass,
 163                                               NULL, 0, &engn->object);
 164                }
 165                if (ret)
 166                        return ret;
 167
 168                if (chan->func->engine_ctor) {
 169                        ret = chan->func->engine_ctor(chan, oclass->engine,
 170                                                      engn->object);
 171                        if (ret)
 172                                return ret;
 173                }
 174        }
 175
 176        ret = oclass->base.ctor(&(const struct nvkm_oclass) {
 177                                        .base = oclass->base,
 178                                        .engn = oclass->engn,
 179                                        .handle = oclass->handle,
 180                                        .object = oclass->object,
 181                                        .client = oclass->client,
 182                                        .parent = engn->object ?
 183                                                  engn->object :
 184                                                  oclass->parent,
 185                                        .engine = engine,
 186                                }, data, size, &object->oproxy.object);
 187        if (ret)
 188                return ret;
 189
 190        if (chan->func->object_ctor) {
 191                object->hash =
 192                        chan->func->object_ctor(chan, object->oproxy.object);
 193                if (object->hash < 0)
 194                        return object->hash;
 195        }
 196
 197        return 0;
 198}
 199
 200static int
 201nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
 202                         struct nvkm_oclass *oclass)
 203{
 204        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 205        struct nvkm_fifo *fifo = chan->fifo;
 206        struct nvkm_device *device = fifo->engine.subdev.device;
 207        struct nvkm_engine *engine;
 208        u64 mask = chan->engines;
 209        int ret, i, c;
 210
 211        for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
 212                if (!(engine = nvkm_device_engine(device, i)))
 213                        continue;
 214                oclass->engine = engine;
 215                oclass->base.oclass = 0;
 216
 217                if (engine->func->fifo.sclass) {
 218                        ret = engine->func->fifo.sclass(oclass, index);
 219                        if (oclass->base.oclass) {
 220                                if (!oclass->base.ctor)
 221                                        oclass->base.ctor = nvkm_object_new;
 222                                oclass->ctor = nvkm_fifo_chan_child_new;
 223                                return 0;
 224                        }
 225
 226                        index -= ret;
 227                        continue;
 228                }
 229
 230                while (engine->func->sclass[c].oclass) {
 231                        if (c++ == index) {
 232                                oclass->base = engine->func->sclass[index];
 233                                if (!oclass->base.ctor)
 234                                        oclass->base.ctor = nvkm_object_new;
 235                                oclass->ctor = nvkm_fifo_chan_child_new;
 236                                return 0;
 237                        }
 238                }
 239                index -= c;
 240        }
 241
 242        return -EINVAL;
 243}
 244
 245static int
 246nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
 247                    struct nvkm_event **pevent)
 248{
 249        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 250        if (chan->func->ntfy)
 251                return chan->func->ntfy(chan, type, pevent);
 252        return -ENODEV;
 253}
 254
 255static int
 256nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
 257                   enum nvkm_object_map *type, u64 *addr, u64 *size)
 258{
 259        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 260        *type = NVKM_OBJECT_MAP_IO;
 261        *addr = chan->addr;
 262        *size = chan->size;
 263        return 0;
 264}
 265
 266static int
 267nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
 268{
 269        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 270        if (unlikely(!chan->user)) {
 271                chan->user = ioremap(chan->addr, chan->size);
 272                if (!chan->user)
 273                        return -ENOMEM;
 274        }
 275        if (unlikely(addr + 4 > chan->size))
 276                return -EINVAL;
 277        *data = ioread32_native(chan->user + addr);
 278        return 0;
 279}
 280
 281static int
 282nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
 283{
 284        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 285        if (unlikely(!chan->user)) {
 286                chan->user = ioremap(chan->addr, chan->size);
 287                if (!chan->user)
 288                        return -ENOMEM;
 289        }
 290        if (unlikely(addr + 4 > chan->size))
 291                return -EINVAL;
 292        iowrite32_native(data, chan->user + addr);
 293        return 0;
 294}
 295
 296static int
 297nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
 298{
 299        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 300        chan->func->fini(chan);
 301        return 0;
 302}
 303
 304static int
 305nvkm_fifo_chan_init(struct nvkm_object *object)
 306{
 307        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 308        chan->func->init(chan);
 309        return 0;
 310}
 311
 312static void *
 313nvkm_fifo_chan_dtor(struct nvkm_object *object)
 314{
 315        struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
 316        struct nvkm_fifo *fifo = chan->fifo;
 317        void *data = chan->func->dtor(chan);
 318        unsigned long flags;
 319
 320        spin_lock_irqsave(&fifo->lock, flags);
 321        if (!list_empty(&chan->head)) {
 322                __clear_bit(chan->chid, fifo->mask);
 323                list_del(&chan->head);
 324        }
 325        spin_unlock_irqrestore(&fifo->lock, flags);
 326
 327        if (chan->user)
 328                iounmap(chan->user);
 329
 330        if (chan->vmm) {
 331                nvkm_vmm_part(chan->vmm, chan->inst->memory);
 332                nvkm_vmm_unref(&chan->vmm);
 333        }
 334
 335        nvkm_gpuobj_del(&chan->push);
 336        nvkm_gpuobj_del(&chan->inst);
 337        return data;
 338}
 339
 340static const struct nvkm_object_func
 341nvkm_fifo_chan_func = {
 342        .dtor = nvkm_fifo_chan_dtor,
 343        .init = nvkm_fifo_chan_init,
 344        .fini = nvkm_fifo_chan_fini,
 345        .ntfy = nvkm_fifo_chan_ntfy,
 346        .map = nvkm_fifo_chan_map,
 347        .rd32 = nvkm_fifo_chan_rd32,
 348        .wr32 = nvkm_fifo_chan_wr32,
 349        .sclass = nvkm_fifo_chan_child_get,
 350};
 351
 352int
 353nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
 354                    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
 355                    u64 hvmm, u64 push, u64 engines, int bar, u32 base,
 356                    u32 user, const struct nvkm_oclass *oclass,
 357                    struct nvkm_fifo_chan *chan)
 358{
 359        struct nvkm_client *client = oclass->client;
 360        struct nvkm_device *device = fifo->engine.subdev.device;
 361        struct nvkm_dmaobj *dmaobj;
 362        unsigned long flags;
 363        int ret;
 364
 365        nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
 366        chan->func = func;
 367        chan->fifo = fifo;
 368        chan->engines = engines;
 369        INIT_LIST_HEAD(&chan->head);
 370
 371        /* instance memory */
 372        ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
 373        if (ret)
 374                return ret;
 375
 376        /* allocate push buffer ctxdma instance */
 377        if (push) {
 378                dmaobj = nvkm_dmaobj_search(client, push);
 379                if (IS_ERR(dmaobj))
 380                        return PTR_ERR(dmaobj);
 381
 382                ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
 383                                       &chan->push);
 384                if (ret)
 385                        return ret;
 386        }
 387
 388        /* channel address space */
 389        if (hvmm) {
 390                struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
 391                if (IS_ERR(vmm))
 392                        return PTR_ERR(vmm);
 393
 394                if (vmm->mmu != device->mmu)
 395                        return -EINVAL;
 396
 397                ret = nvkm_vmm_join(vmm, chan->inst->memory);
 398                if (ret)
 399                        return ret;
 400
 401                chan->vmm = nvkm_vmm_ref(vmm);
 402        }
 403
 404        /* allocate channel id */
 405        spin_lock_irqsave(&fifo->lock, flags);
 406        chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
 407        if (chan->chid >= NVKM_FIFO_CHID_NR) {
 408                spin_unlock_irqrestore(&fifo->lock, flags);
 409                return -ENOSPC;
 410        }
 411        list_add(&chan->head, &fifo->chan);
 412        __set_bit(chan->chid, fifo->mask);
 413        spin_unlock_irqrestore(&fifo->lock, flags);
 414
 415        /* determine address of this channel's user registers */
 416        chan->addr = device->func->resource_addr(device, bar) +
 417                     base + user * chan->chid;
 418        chan->size = user;
 419
 420        nvkm_fifo_cevent(fifo);
 421        return 0;
 422}
 423