linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "umem.h"
  23#include "ummu.h"
  24
  25#include <core/client.h>
  26#include <core/memory.h>
  27#include <subdev/bar.h>
  28
  29#include <nvif/class.h>
  30#include <nvif/if000a.h>
  31#include <nvif/unpack.h>
  32
  33static const struct nvkm_object_func nvkm_umem;
  34struct nvkm_memory *
  35nvkm_umem_search(struct nvkm_client *client, u64 handle)
  36{
  37        struct nvkm_client *master = client->object.client;
  38        struct nvkm_memory *memory = NULL;
  39        struct nvkm_object *object;
  40        struct nvkm_umem *umem;
  41
  42        object = nvkm_object_search(client, handle, &nvkm_umem);
  43        if (IS_ERR(object)) {
  44                if (client->super && client != master) {
  45                        spin_lock(&master->lock);
  46                        list_for_each_entry(umem, &master->umem, head) {
  47                                if (umem->object.object == handle) {
  48                                        memory = nvkm_memory_ref(umem->memory);
  49                                        break;
  50                                }
  51                        }
  52                        spin_unlock(&master->lock);
  53                }
  54        } else {
  55                umem = nvkm_umem(object);
  56                if (!umem->priv || client->super)
  57                        memory = nvkm_memory_ref(umem->memory);
  58        }
  59
  60        return memory ? memory : ERR_PTR(-ENOENT);
  61}
  62
  63static int
  64nvkm_umem_unmap(struct nvkm_object *object)
  65{
  66        struct nvkm_umem *umem = nvkm_umem(object);
  67
  68        if (!umem->map)
  69                return -EEXIST;
  70
  71        if (umem->io) {
  72                if (!IS_ERR(umem->bar)) {
  73                        struct nvkm_device *device = umem->mmu->subdev.device;
  74                        nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
  75                } else {
  76                        umem->bar = NULL;
  77                }
  78        } else {
  79                vunmap(umem->map);
  80                umem->map = NULL;
  81        }
  82
  83        return 0;
  84}
  85
  86static int
  87nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
  88              enum nvkm_object_map *type, u64 *handle, u64 *length)
  89{
  90        struct nvkm_umem *umem = nvkm_umem(object);
  91        struct nvkm_mmu *mmu = umem->mmu;
  92
  93        if (!umem->mappable)
  94                return -EINVAL;
  95        if (umem->map)
  96                return -EEXIST;
  97
  98        if ((umem->type & NVKM_MEM_HOST) && !argc) {
  99                int ret = nvkm_mem_map_host(umem->memory, &umem->map);
 100                if (ret)
 101                        return ret;
 102
 103                *handle = (unsigned long)(void *)umem->map;
 104                *length = nvkm_memory_size(umem->memory);
 105                *type = NVKM_OBJECT_MAP_VA;
 106                return 0;
 107        } else
 108        if ((umem->type & NVKM_MEM_VRAM) ||
 109            (umem->type & NVKM_MEM_KIND)) {
 110                int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
 111                                              handle, length, &umem->bar);
 112                if (ret)
 113                        return ret;
 114
 115                *type = NVKM_OBJECT_MAP_IO;
 116        } else {
 117                return -EINVAL;
 118        }
 119
 120        umem->io = (*type == NVKM_OBJECT_MAP_IO);
 121        return 0;
 122}
 123
 124static void *
 125nvkm_umem_dtor(struct nvkm_object *object)
 126{
 127        struct nvkm_umem *umem = nvkm_umem(object);
 128        spin_lock(&umem->object.client->lock);
 129        list_del_init(&umem->head);
 130        spin_unlock(&umem->object.client->lock);
 131        nvkm_memory_unref(&umem->memory);
 132        return umem;
 133}
 134
 135static const struct nvkm_object_func
 136nvkm_umem = {
 137        .dtor = nvkm_umem_dtor,
 138        .map = nvkm_umem_map,
 139        .unmap = nvkm_umem_unmap,
 140};
 141
 142int
 143nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 144              struct nvkm_object **pobject)
 145{
 146        struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
 147        union {
 148                struct nvif_mem_v0 v0;
 149        } *args = argv;
 150        struct nvkm_umem *umem;
 151        int type, ret = -ENOSYS;
 152        u8  page;
 153        u64 size;
 154
 155        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
 156                type = args->v0.type;
 157                page = args->v0.page;
 158                size = args->v0.size;
 159        } else
 160                return ret;
 161
 162        if (type >= mmu->type_nr)
 163                return -EINVAL;
 164
 165        if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
 166                return -ENOMEM;
 167        nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
 168        umem->mmu = mmu;
 169        umem->type = mmu->type[type].type;
 170        umem->priv = oclass->client->super;
 171        INIT_LIST_HEAD(&umem->head);
 172        *pobject = &umem->object;
 173
 174        if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
 175                page = max_t(u8, page, PAGE_SHIFT);
 176                umem->mappable = true;
 177        }
 178
 179        ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
 180                                &umem->memory);
 181        if (ret)
 182                return ret;
 183
 184        spin_lock(&umem->object.client->lock);
 185        list_add(&umem->head, &umem->object.client->umem);
 186        spin_unlock(&umem->object.client->lock);
 187
 188        args->v0.page = nvkm_memory_page(umem->memory);
 189        args->v0.addr = nvkm_memory_addr(umem->memory);
 190        args->v0.size = nvkm_memory_size(umem->memory);
 191        return 0;
 192}
 193