linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "priv.h"
  25
  26#include <core/client.h>
  27#include <engine/fifo.h>
  28
  29#include <nvif/class.h>
  30
  31struct nvkm_dmaobj *
  32nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
  33{
  34        struct rb_node *node = client->dmaroot.rb_node;
  35        while (node) {
  36                struct nvkm_dmaobj *dmaobj =
  37                        container_of(node, typeof(*dmaobj), rb);
  38                if (object < dmaobj->handle)
  39                        node = node->rb_left;
  40                else
  41                if (object > dmaobj->handle)
  42                        node = node->rb_right;
  43                else
  44                        return dmaobj;
  45        }
  46        return NULL;
  47}
  48
  49static int
  50nvkm_dma_oclass_new(struct nvkm_device *device,
  51                    const struct nvkm_oclass *oclass, void *data, u32 size,
  52                    struct nvkm_object **pobject)
  53{
  54        struct nvkm_dma *dma = nvkm_dma(oclass->engine);
  55        struct nvkm_dmaobj *dmaobj = NULL;
  56        struct nvkm_client *client = oclass->client;
  57        struct rb_node **ptr = &client->dmaroot.rb_node;
  58        struct rb_node *parent = NULL;
  59        int ret;
  60
  61        ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
  62        if (dmaobj)
  63                *pobject = &dmaobj->object;
  64        if (ret)
  65                return ret;
  66
  67        dmaobj->handle = oclass->object;
  68
  69        while (*ptr) {
  70                struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
  71                parent = *ptr;
  72                if (dmaobj->handle < obj->handle)
  73                        ptr = &parent->rb_left;
  74                else
  75                if (dmaobj->handle > obj->handle)
  76                        ptr = &parent->rb_right;
  77                else
  78                        return -EEXIST;
  79        }
  80
  81        rb_link_node(&dmaobj->rb, parent, ptr);
  82        rb_insert_color(&dmaobj->rb, &client->dmaroot);
  83        return 0;
  84}
  85
  86static const struct nvkm_device_oclass
  87nvkm_dma_oclass_base = {
  88        .ctor = nvkm_dma_oclass_new,
  89};
  90
  91static int
  92nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
  93                         struct nvkm_object **pobject)
  94{
  95        return nvkm_dma_oclass_new(oclass->engine->subdev.device,
  96                                   oclass, data, size, pobject);
  97}
  98
  99static const struct nvkm_sclass
 100nvkm_dma_sclass[] = {
 101        { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
 102        { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
 103        { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
 104};
 105
 106static int
 107nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
 108                         const struct nvkm_device_oclass **class)
 109{
 110        const int count = ARRAY_SIZE(nvkm_dma_sclass);
 111        if (index < count) {
 112                const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
 113                sclass->base = oclass[0];
 114                sclass->engn = oclass;
 115                *class = &nvkm_dma_oclass_base;
 116                return index;
 117        }
 118        return count;
 119}
 120
 121static int
 122nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
 123{
 124        const int count = ARRAY_SIZE(nvkm_dma_sclass);
 125        if (index < count) {
 126                oclass->base = nvkm_dma_sclass[index];
 127                return index;
 128        }
 129        return count;
 130}
 131
 132static void *
 133nvkm_dma_dtor(struct nvkm_engine *engine)
 134{
 135        return nvkm_dma(engine);
 136}
 137
 138static const struct nvkm_engine_func
 139nvkm_dma = {
 140        .dtor = nvkm_dma_dtor,
 141        .base.sclass = nvkm_dma_oclass_base_get,
 142        .fifo.sclass = nvkm_dma_oclass_fifo_get,
 143};
 144
 145int
 146nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
 147              int index, struct nvkm_dma **pdma)
 148{
 149        struct nvkm_dma *dma;
 150
 151        if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
 152                return -ENOMEM;
 153        dma->func = func;
 154
 155        return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
 156}
 157