linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "uvmm.h"
  23#include "umem.h"
  24#include "ummu.h"
  25
  26#include <core/client.h>
  27#include <core/memory.h>
  28
  29#include <nvif/if000c.h>
  30#include <nvif/unpack.h>
  31
  32static const struct nvkm_object_func nvkm_uvmm;
  33struct nvkm_vmm *
  34nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
  35{
  36        struct nvkm_object *object;
  37
  38        object = nvkm_object_search(client, handle, &nvkm_uvmm);
  39        if (IS_ERR(object))
  40                return (void *)object;
  41
  42        return nvkm_uvmm(object)->vmm;
  43}
  44
  45static int
  46nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  47{
  48        union {
  49                struct nvif_vmm_pfnclr_v0 v0;
  50        } *args = argv;
  51        struct nvkm_vmm *vmm = uvmm->vmm;
  52        int ret = -ENOSYS;
  53        u64 addr, size;
  54
  55        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  56                addr = args->v0.addr;
  57                size = args->v0.size;
  58        } else
  59                return ret;
  60
  61        if (size) {
  62                mutex_lock(&vmm->mutex);
  63                ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
  64                mutex_unlock(&vmm->mutex);
  65        }
  66
  67        return ret;
  68}
  69
  70static int
  71nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  72{
  73        union {
  74                struct nvif_vmm_pfnmap_v0 v0;
  75        } *args = argv;
  76        struct nvkm_vmm *vmm = uvmm->vmm;
  77        int ret = -ENOSYS;
  78        u64 addr, size, *phys;
  79        u8  page;
  80
  81        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
  82                page = args->v0.page;
  83                addr = args->v0.addr;
  84                size = args->v0.size;
  85                phys = args->v0.phys;
  86                if (argc != (size >> page) * sizeof(args->v0.phys[0]))
  87                        return -EINVAL;
  88        } else
  89                return ret;
  90
  91        if (size) {
  92                mutex_lock(&vmm->mutex);
  93                ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
  94                mutex_unlock(&vmm->mutex);
  95        }
  96
  97        return ret;
  98}
  99
 100static int
 101nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 102{
 103        union {
 104                struct nvif_vmm_unmap_v0 v0;
 105        } *args = argv;
 106        struct nvkm_vmm *vmm = uvmm->vmm;
 107        struct nvkm_vma *vma;
 108        int ret = -ENOSYS;
 109        u64 addr;
 110
 111        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 112                addr = args->v0.addr;
 113        } else
 114                return ret;
 115
 116        mutex_lock(&vmm->mutex);
 117        vma = nvkm_vmm_node_search(vmm, addr);
 118        if (ret = -ENOENT, !vma || vma->addr != addr) {
 119                VMM_DEBUG(vmm, "lookup %016llx: %016llx",
 120                          addr, vma ? vma->addr : ~0ULL);
 121                goto done;
 122        }
 123
 124        if (ret = -ENOENT, vma->busy) {
 125                VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
 126                goto done;
 127        }
 128
 129        if (ret = -EINVAL, !vma->memory) {
 130                VMM_DEBUG(vmm, "unmapped");
 131                goto done;
 132        }
 133
 134        nvkm_vmm_unmap_locked(vmm, vma, false);
 135        ret = 0;
 136done:
 137        mutex_unlock(&vmm->mutex);
 138        return ret;
 139}
 140
 141static int
 142nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 143{
 144        struct nvkm_client *client = uvmm->object.client;
 145        union {
 146                struct nvif_vmm_map_v0 v0;
 147        } *args = argv;
 148        u64 addr, size, handle, offset;
 149        struct nvkm_vmm *vmm = uvmm->vmm;
 150        struct nvkm_vma *vma;
 151        struct nvkm_memory *memory;
 152        int ret = -ENOSYS;
 153
 154        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
 155                addr = args->v0.addr;
 156                size = args->v0.size;
 157                handle = args->v0.memory;
 158                offset = args->v0.offset;
 159        } else
 160                return ret;
 161
 162        memory = nvkm_umem_search(client, handle);
 163        if (IS_ERR(memory)) {
 164                VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
 165                return PTR_ERR(memory);
 166        }
 167
 168        mutex_lock(&vmm->mutex);
 169        if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
 170                VMM_DEBUG(vmm, "lookup %016llx", addr);
 171                goto fail;
 172        }
 173
 174        if (ret = -ENOENT, vma->busy) {
 175                VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
 176                goto fail;
 177        }
 178
 179        if (ret = -EINVAL, vma->mapped && !vma->memory) {
 180                VMM_DEBUG(vmm, "pfnmap %016llx", addr);
 181                goto fail;
 182        }
 183
 184        if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
 185                if (addr + size > vma->addr + vma->size || vma->memory ||
 186                    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
 187                        VMM_DEBUG(vmm, "split %d %d %d "
 188                                       "%016llx %016llx %016llx %016llx",
 189                                  !!vma->memory, vma->refd, vma->mapref,
 190                                  addr, size, vma->addr, (u64)vma->size);
 191                        goto fail;
 192                }
 193
 194                vma = nvkm_vmm_node_split(vmm, vma, addr, size);
 195                if (!vma) {
 196                        ret = -ENOMEM;
 197                        goto fail;
 198                }
 199        }
 200        vma->busy = true;
 201        mutex_unlock(&vmm->mutex);
 202
 203        ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
 204        if (ret == 0) {
 205                /* Successful map will clear vma->busy. */
 206                nvkm_memory_unref(&memory);
 207                return 0;
 208        }
 209
 210        mutex_lock(&vmm->mutex);
 211        vma->busy = false;
 212        nvkm_vmm_unmap_region(vmm, vma);
 213fail:
 214        mutex_unlock(&vmm->mutex);
 215        nvkm_memory_unref(&memory);
 216        return ret;
 217}
 218
 219static int
 220nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 221{
 222        union {
 223                struct nvif_vmm_put_v0 v0;
 224        } *args = argv;
 225        struct nvkm_vmm *vmm = uvmm->vmm;
 226        struct nvkm_vma *vma;
 227        int ret = -ENOSYS;
 228        u64 addr;
 229
 230        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 231                addr = args->v0.addr;
 232        } else
 233                return ret;
 234
 235        mutex_lock(&vmm->mutex);
 236        vma = nvkm_vmm_node_search(vmm, args->v0.addr);
 237        if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
 238                VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
 239                          vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
 240                goto done;
 241        }
 242
 243        if (ret = -ENOENT, vma->busy) {
 244                VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
 245                goto done;
 246        }
 247
 248        nvkm_vmm_put_locked(vmm, vma);
 249        ret = 0;
 250done:
 251        mutex_unlock(&vmm->mutex);
 252        return ret;
 253}
 254
 255static int
 256nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 257{
 258        union {
 259                struct nvif_vmm_get_v0 v0;
 260        } *args = argv;
 261        struct nvkm_vmm *vmm = uvmm->vmm;
 262        struct nvkm_vma *vma;
 263        int ret = -ENOSYS;
 264        bool getref, mapref, sparse;
 265        u8 page, align;
 266        u64 size;
 267
 268        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 269                getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
 270                mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
 271                sparse = args->v0.sparse;
 272                page = args->v0.page;
 273                align = args->v0.align;
 274                size = args->v0.size;
 275        } else
 276                return ret;
 277
 278        mutex_lock(&vmm->mutex);
 279        ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
 280                                  page, align, size, &vma);
 281        mutex_unlock(&vmm->mutex);
 282        if (ret)
 283                return ret;
 284
 285        args->v0.addr = vma->addr;
 286        return ret;
 287}
 288
 289static int
 290nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 291{
 292        union {
 293                struct nvif_vmm_page_v0 v0;
 294        } *args = argv;
 295        const struct nvkm_vmm_page *page;
 296        int ret = -ENOSYS;
 297        u8 type, index, nr;
 298
 299        page = uvmm->vmm->func->page;
 300        for (nr = 0; page[nr].shift; nr++);
 301
 302        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 303                if ((index = args->v0.index) >= nr)
 304                        return -EINVAL;
 305                type = page[index].type;
 306                args->v0.shift = page[index].shift;
 307                args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
 308                args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
 309                args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
 310                args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
 311        } else
 312                return -ENOSYS;
 313
 314        return 0;
 315}
 316
 317static int
 318nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
 319{
 320        struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
 321        switch (mthd) {
 322        case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
 323        case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
 324        case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
 325        case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
 326        case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
 327        case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
 328        case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
 329        case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
 330                if (uvmm->vmm->func->mthd) {
 331                        return uvmm->vmm->func->mthd(uvmm->vmm,
 332                                                     uvmm->object.client,
 333                                                     mthd, argv, argc);
 334                }
 335                break;
 336        default:
 337                break;
 338        }
 339        return -EINVAL;
 340}
 341
 342static void *
 343nvkm_uvmm_dtor(struct nvkm_object *object)
 344{
 345        struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
 346        nvkm_vmm_unref(&uvmm->vmm);
 347        return uvmm;
 348}
 349
 350static const struct nvkm_object_func
 351nvkm_uvmm = {
 352        .dtor = nvkm_uvmm_dtor,
 353        .mthd = nvkm_uvmm_mthd,
 354};
 355
 356int
 357nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 358              struct nvkm_object **pobject)
 359{
 360        struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
 361        const bool more = oclass->base.maxver >= 0;
 362        union {
 363                struct nvif_vmm_v0 v0;
 364        } *args = argv;
 365        const struct nvkm_vmm_page *page;
 366        struct nvkm_uvmm *uvmm;
 367        int ret = -ENOSYS;
 368        u64 addr, size;
 369        bool managed;
 370
 371        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
 372                managed = args->v0.managed != 0;
 373                addr = args->v0.addr;
 374                size = args->v0.size;
 375        } else
 376                return ret;
 377
 378        if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
 379                return -ENOMEM;
 380        nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
 381        *pobject = &uvmm->object;
 382
 383        if (!mmu->vmm) {
 384                ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
 385                                          NULL, "user", &uvmm->vmm);
 386                if (ret)
 387                        return ret;
 388
 389                uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
 390        } else {
 391                if (size)
 392                        return -EINVAL;
 393
 394                uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
 395        }
 396
 397        page = uvmm->vmm->func->page;
 398        args->v0.page_nr = 0;
 399        while (page && (page++)->shift)
 400                args->v0.page_nr++;
 401        args->v0.addr = uvmm->vmm->start;
 402        args->v0.size = uvmm->vmm->limit;
 403        return 0;
 404}
 405