linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include <engine/fifo.h>
  25
  26#include <core/client.h>
  27#include <core/device.h>
  28#include <core/handle.h>
  29#include <core/notify.h>
  30#include <engine/dmaobj.h>
  31
  32#include <nvif/class.h>
  33#include <nvif/event.h>
  34#include <nvif/unpack.h>
  35
  36static int
  37nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
  38                     struct nvkm_notify *notify)
  39{
  40        if (size == 0) {
  41                notify->size  = 0;
  42                notify->types = 1;
  43                notify->index = 0;
  44                return 0;
  45        }
  46        return -ENOSYS;
  47}
  48
  49static const struct nvkm_event_func
  50nvkm_fifo_event_func = {
  51        .ctor = nvkm_fifo_event_ctor,
  52};
  53
  54int
  55nvkm_fifo_channel_create_(struct nvkm_object *parent,
  56                          struct nvkm_object *engine,
  57                          struct nvkm_oclass *oclass,
  58                          int bar, u32 addr, u32 size, u32 pushbuf,
  59                          u64 engmask, int len, void **ptr)
  60{
  61        struct nvkm_device *device = nv_device(engine);
  62        struct nvkm_fifo *priv = (void *)engine;
  63        struct nvkm_fifo_chan *chan;
  64        struct nvkm_dmaeng *dmaeng;
  65        unsigned long flags;
  66        int ret;
  67
  68        /* create base object class */
  69        ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
  70                                  engmask, len, ptr);
  71        chan = *ptr;
  72        if (ret)
  73                return ret;
  74
  75        /* validate dma object representing push buffer */
  76        chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
  77        if (!chan->pushdma)
  78                return -ENOENT;
  79
  80        dmaeng = (void *)chan->pushdma->base.engine;
  81        switch (chan->pushdma->base.oclass->handle) {
  82        case NV_DMA_FROM_MEMORY:
  83        case NV_DMA_IN_MEMORY:
  84                break;
  85        default:
  86                return -EINVAL;
  87        }
  88
  89        ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
  90        if (ret)
  91                return ret;
  92
  93        /* find a free fifo channel */
  94        spin_lock_irqsave(&priv->lock, flags);
  95        for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
  96                if (!priv->channel[chan->chid]) {
  97                        priv->channel[chan->chid] = nv_object(chan);
  98                        break;
  99                }
 100        }
 101        spin_unlock_irqrestore(&priv->lock, flags);
 102
 103        if (chan->chid == priv->max) {
 104                nv_error(priv, "no free channels\n");
 105                return -ENOSPC;
 106        }
 107
 108        chan->addr = nv_device_resource_start(device, bar) +
 109                     addr + size * chan->chid;
 110        chan->size = size;
 111        nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
 112        return 0;
 113}
 114
 115void
 116nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
 117{
 118        struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
 119        unsigned long flags;
 120
 121        if (chan->user)
 122                iounmap(chan->user);
 123
 124        spin_lock_irqsave(&priv->lock, flags);
 125        priv->channel[chan->chid] = NULL;
 126        spin_unlock_irqrestore(&priv->lock, flags);
 127
 128        nvkm_gpuobj_ref(NULL, &chan->pushgpu);
 129        nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
 130        nvkm_namedb_destroy(&chan->namedb);
 131}
 132
 133void
 134_nvkm_fifo_channel_dtor(struct nvkm_object *object)
 135{
 136        struct nvkm_fifo_chan *chan = (void *)object;
 137        nvkm_fifo_channel_destroy(chan);
 138}
 139
 140int
 141_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
 142{
 143        struct nvkm_fifo_chan *chan = (void *)object;
 144        *addr = chan->addr;
 145        *size = chan->size;
 146        return 0;
 147}
 148
 149u32
 150_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
 151{
 152        struct nvkm_fifo_chan *chan = (void *)object;
 153        if (unlikely(!chan->user)) {
 154                chan->user = ioremap(chan->addr, chan->size);
 155                if (WARN_ON_ONCE(chan->user == NULL))
 156                        return 0;
 157        }
 158        return ioread32_native(chan->user + addr);
 159}
 160
 161void
 162_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
 163{
 164        struct nvkm_fifo_chan *chan = (void *)object;
 165        if (unlikely(!chan->user)) {
 166                chan->user = ioremap(chan->addr, chan->size);
 167                if (WARN_ON_ONCE(chan->user == NULL))
 168                        return;
 169        }
 170        iowrite32_native(data, chan->user + addr);
 171}
 172
 173int
 174nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
 175                      struct nvkm_notify *notify)
 176{
 177        union {
 178                struct nvif_notify_uevent_req none;
 179        } *req = data;
 180        int ret;
 181
 182        if (nvif_unvers(req->none)) {
 183                notify->size  = sizeof(struct nvif_notify_uevent_rep);
 184                notify->types = 1;
 185                notify->index = 0;
 186        }
 187
 188        return ret;
 189}
 190
 191void
 192nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 193{
 194        struct nvif_notify_uevent_rep rep = {
 195        };
 196        nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
 197}
 198
 199int
 200_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
 201                        struct nvkm_event **event)
 202{
 203        struct nvkm_fifo *fifo = (void *)object->engine;
 204        switch (type) {
 205        case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
 206                if (nv_mclass(object) >= G82_CHANNEL_DMA) {
 207                        *event = &fifo->uevent;
 208                        return 0;
 209                }
 210                break;
 211        default:
 212                break;
 213        }
 214        return -EINVAL;
 215}
 216
 217static int
 218nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
 219{
 220        int engidx = nv_hclass(priv) & 0xff;
 221
 222        while (object && object->parent) {
 223                if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
 224                    (nv_hclass(object->parent) & 0xff) == engidx)
 225                        return nvkm_fifo_chan(object)->chid;
 226                object = object->parent;
 227        }
 228
 229        return -1;
 230}
 231
 232const char *
 233nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
 234{
 235        struct nvkm_fifo_chan *chan = NULL;
 236        unsigned long flags;
 237
 238        spin_lock_irqsave(&fifo->lock, flags);
 239        if (chid >= fifo->min && chid <= fifo->max)
 240                chan = (void *)fifo->channel[chid];
 241        spin_unlock_irqrestore(&fifo->lock, flags);
 242
 243        return nvkm_client_name(chan);
 244}
 245
 246void
 247nvkm_fifo_destroy(struct nvkm_fifo *priv)
 248{
 249        kfree(priv->channel);
 250        nvkm_event_fini(&priv->uevent);
 251        nvkm_event_fini(&priv->cevent);
 252        nvkm_engine_destroy(&priv->base);
 253}
 254
 255int
 256nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
 257                  struct nvkm_oclass *oclass,
 258                  int min, int max, int length, void **pobject)
 259{
 260        struct nvkm_fifo *priv;
 261        int ret;
 262
 263        ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
 264                                  "fifo", length, pobject);
 265        priv = *pobject;
 266        if (ret)
 267                return ret;
 268
 269        priv->min = min;
 270        priv->max = max;
 271        priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
 272        if (!priv->channel)
 273                return -ENOMEM;
 274
 275        ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
 276        if (ret)
 277                return ret;
 278
 279        priv->chid = nvkm_fifo_chid;
 280        spin_lock_init(&priv->lock);
 281        return 0;
 282}
 283