linux/drivers/gpu/drm/nouveau/nvif/vmm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include <nvif/vmm.h>
  23#include <nvif/mem.h>
  24
  25#include <nvif/if000c.h>
  26
  27int
  28nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
  29{
  30        return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
  31                                &(struct nvif_vmm_unmap_v0) { .addr = addr },
  32                                sizeof(struct nvif_vmm_unmap_v0));
  33}
  34
  35int
  36nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
  37             struct nvif_mem *mem, u64 offset)
  38{
  39        struct nvif_vmm_map_v0 *args;
  40        u8 stack[48];
  41        int ret;
  42
  43        if (sizeof(*args) + argc > sizeof(stack)) {
  44                if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
  45                        return -ENOMEM;
  46        } else {
  47                args = (void *)stack;
  48        }
  49
  50        args->version = 0;
  51        args->addr = addr;
  52        args->size = size;
  53        args->memory = nvif_handle(&mem->object);
  54        args->offset = offset;
  55        memcpy(args->data, argv, argc);
  56
  57        ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
  58                               args, sizeof(*args) + argc);
  59        if (args != (void *)stack)
  60                kfree(args);
  61        return ret;
  62}
  63
  64void
  65nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
  66{
  67        if (vma->size) {
  68                WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
  69                                         &(struct nvif_vmm_put_v0) {
  70                                                .addr = vma->addr,
  71                                         }, sizeof(struct nvif_vmm_put_v0)));
  72                vma->size = 0;
  73        }
  74}
  75
  76int
  77nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
  78             u8 page, u8 align, u64 size, struct nvif_vma *vma)
  79{
  80        struct nvif_vmm_get_v0 args;
  81        int ret;
  82
  83        args.version = vma->size = 0;
  84        args.sparse = sparse;
  85        args.page = page;
  86        args.align = align;
  87        args.size = size;
  88
  89        switch (type) {
  90        case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
  91        case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
  92        case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
  93        default:
  94                WARN_ON(1);
  95                return -EINVAL;
  96        }
  97
  98        ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
  99                               &args, sizeof(args));
 100        if (ret == 0) {
 101                vma->addr = args.addr;
 102                vma->size = args.size;
 103        }
 104        return ret;
 105}
 106
 107void
 108nvif_vmm_fini(struct nvif_vmm *vmm)
 109{
 110        kfree(vmm->page);
 111        nvif_object_fini(&vmm->object);
 112}
 113
 114int
 115nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
 116              void *argv, u32 argc, struct nvif_vmm *vmm)
 117{
 118        struct nvif_vmm_v0 *args;
 119        u32 argn = sizeof(*args) + argc;
 120        int ret = -ENOSYS, i;
 121
 122        vmm->object.client = NULL;
 123        vmm->page = NULL;
 124
 125        if (!(args = kmalloc(argn, GFP_KERNEL)))
 126                return -ENOMEM;
 127        args->version = 0;
 128        args->addr = addr;
 129        args->size = size;
 130        memcpy(args->data, argv, argc);
 131
 132        ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
 133                               &vmm->object);
 134        if (ret)
 135                goto done;
 136
 137        vmm->start = args->addr;
 138        vmm->limit = args->size;
 139
 140        vmm->page_nr = args->page_nr;
 141        vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
 142                                  GFP_KERNEL);
 143        if (!vmm->page) {
 144                ret = -ENOMEM;
 145                goto done;
 146        }
 147
 148        for (i = 0; i < vmm->page_nr; i++) {
 149                struct nvif_vmm_page_v0 args = { .index = i };
 150
 151                ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
 152                                       &args, sizeof(args));
 153                if (ret)
 154                        break;
 155
 156                vmm->page[i].shift = args.shift;
 157                vmm->page[i].sparse = args.sparse;
 158                vmm->page[i].vram = args.vram;
 159                vmm->page[i].host = args.host;
 160                vmm->page[i].comp = args.comp;
 161        }
 162
 163done:
 164        if (ret)
 165                nvif_vmm_fini(vmm);
 166        kfree(args);
 167        return ret;
 168}
 169