linux/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include <core/gpuobj.h>
  25#include <core/engine.h>
  26
  27#include <subdev/instmem.h>
  28#include <subdev/bar.h>
  29#include <subdev/mmu.h>
  30
  31/* fast-path, where backend is able to provide direct pointer to memory */
  32static u32
  33nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
  34{
  35        return ioread32_native(gpuobj->map + offset);
  36}
  37
  38static void
  39nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
  40{
  41        iowrite32_native(data, gpuobj->map + offset);
  42}
  43
  44/* accessor functions for gpuobjs allocated directly from instmem */
  45static u32
  46nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
  47{
  48        return nvkm_ro32(gpuobj->memory, offset);
  49}
  50
  51static void
  52nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
  53{
  54        nvkm_wo32(gpuobj->memory, offset, data);
  55}
  56
  57static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
  58static void
  59nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
  60{
  61        gpuobj->func = &nvkm_gpuobj_heap;
  62        nvkm_done(gpuobj->memory);
  63}
  64
  65static const struct nvkm_gpuobj_func
  66nvkm_gpuobj_heap_fast = {
  67        .release = nvkm_gpuobj_heap_release,
  68        .rd32 = nvkm_gpuobj_rd32_fast,
  69        .wr32 = nvkm_gpuobj_wr32_fast,
  70};
  71
  72static const struct nvkm_gpuobj_func
  73nvkm_gpuobj_heap_slow = {
  74        .release = nvkm_gpuobj_heap_release,
  75        .rd32 = nvkm_gpuobj_heap_rd32,
  76        .wr32 = nvkm_gpuobj_heap_wr32,
  77};
  78
  79static void *
  80nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
  81{
  82        gpuobj->map = nvkm_kmap(gpuobj->memory);
  83        if (likely(gpuobj->map))
  84                gpuobj->func = &nvkm_gpuobj_heap_fast;
  85        else
  86                gpuobj->func = &nvkm_gpuobj_heap_slow;
  87        return gpuobj->map;
  88}
  89
  90static const struct nvkm_gpuobj_func
  91nvkm_gpuobj_heap = {
  92        .acquire = nvkm_gpuobj_heap_acquire,
  93};
  94
  95/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
  96static u32
  97nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
  98{
  99        return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
 100}
 101
 102static void
 103nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 104{
 105        nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
 106}
 107
 108static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
 109static void
 110nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
 111{
 112        gpuobj->func = &nvkm_gpuobj_func;
 113        nvkm_done(gpuobj->parent);
 114}
 115
 116static const struct nvkm_gpuobj_func
 117nvkm_gpuobj_fast = {
 118        .release = nvkm_gpuobj_release,
 119        .rd32 = nvkm_gpuobj_rd32_fast,
 120        .wr32 = nvkm_gpuobj_wr32_fast,
 121};
 122
 123static const struct nvkm_gpuobj_func
 124nvkm_gpuobj_slow = {
 125        .release = nvkm_gpuobj_release,
 126        .rd32 = nvkm_gpuobj_rd32,
 127        .wr32 = nvkm_gpuobj_wr32,
 128};
 129
 130static void *
 131nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
 132{
 133        gpuobj->map = nvkm_kmap(gpuobj->parent);
 134        if (likely(gpuobj->map)) {
 135                gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
 136                gpuobj->func = &nvkm_gpuobj_fast;
 137        } else {
 138                gpuobj->func = &nvkm_gpuobj_slow;
 139        }
 140        return gpuobj->map;
 141}
 142
 143static const struct nvkm_gpuobj_func
 144nvkm_gpuobj_func = {
 145        .acquire = nvkm_gpuobj_acquire,
 146};
 147
 148static int
 149nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
 150                 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
 151{
 152        u32 offset;
 153        int ret;
 154
 155        if (parent) {
 156                if (align >= 0) {
 157                        ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
 158                                           max(align, 1), &gpuobj->node);
 159                } else {
 160                        ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
 161                                           -align, &gpuobj->node);
 162                }
 163                if (ret)
 164                        return ret;
 165
 166                gpuobj->parent = parent;
 167                gpuobj->func = &nvkm_gpuobj_func;
 168                gpuobj->addr = parent->addr + gpuobj->node->offset;
 169                gpuobj->size = gpuobj->node->length;
 170
 171                if (zero) {
 172                        nvkm_kmap(gpuobj);
 173                        for (offset = 0; offset < gpuobj->size; offset += 4)
 174                                nvkm_wo32(gpuobj, offset, 0x00000000);
 175                        nvkm_done(gpuobj);
 176                }
 177        } else {
 178                ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
 179                                      abs(align), zero, &gpuobj->memory);
 180                if (ret)
 181                        return ret;
 182
 183                gpuobj->func = &nvkm_gpuobj_heap;
 184                gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
 185                gpuobj->size = nvkm_memory_size(gpuobj->memory);
 186        }
 187
 188        return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
 189}
 190
 191void
 192nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
 193{
 194        struct nvkm_gpuobj *gpuobj = *pgpuobj;
 195        if (gpuobj) {
 196                if (gpuobj->parent)
 197                        nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
 198                nvkm_mm_fini(&gpuobj->heap);
 199                nvkm_memory_del(&gpuobj->memory);
 200                kfree(*pgpuobj);
 201                *pgpuobj = NULL;
 202        }
 203}
 204
 205int
 206nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
 207                struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
 208{
 209        struct nvkm_gpuobj *gpuobj;
 210        int ret;
 211
 212        if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
 213                return -ENOMEM;
 214
 215        ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
 216        if (ret)
 217                nvkm_gpuobj_del(pgpuobj);
 218        return ret;
 219}
 220
 221int
 222nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
 223                u32 access, struct nvkm_vma *vma)
 224{
 225        struct nvkm_memory *memory = gpuobj->memory;
 226        int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
 227        if (ret == 0)
 228                nvkm_memory_map(memory, vma, 0);
 229        return ret;
 230}
 231
 232void
 233nvkm_gpuobj_unmap(struct nvkm_vma *vma)
 234{
 235        if (vma->node) {
 236                nvkm_vm_unmap(vma);
 237                nvkm_vm_put(vma);
 238        }
 239}
 240
 241/* the below is basically only here to support sharing the paged dma object
 242 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
 243 * anywhere else.
 244 */
 245
 246int
 247nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
 248{
 249        if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
 250                return -ENOMEM;
 251
 252        (*pgpuobj)->addr = nvkm_memory_addr(memory);
 253        (*pgpuobj)->size = nvkm_memory_size(memory);
 254        return 0;
 255}
 256
 257void
 258nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
 259                      u32 length)
 260{
 261        int i;
 262
 263        for (i = 0; i < length; i += 4)
 264                nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
 265}
 266
 267void
 268nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
 269                        u32 length)
 270{
 271        int i;
 272
 273        for (i = 0; i < length; i += 4)
 274                ((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
 275}
 276