linux/drivers/gpu/drm/nouveau/nouveau_mem.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "nouveau_mem.h"
  23#include "nouveau_drv.h"
  24#include "nouveau_bo.h"
  25
  26#include <drm/ttm/ttm_bo_driver.h>
  27
  28#include <nvif/class.h>
  29#include <nvif/if000a.h>
  30#include <nvif/if500b.h>
  31#include <nvif/if500d.h>
  32#include <nvif/if900b.h>
  33#include <nvif/if900d.h>
  34
  35int
  36nouveau_mem_map(struct nouveau_mem *mem,
  37                struct nvif_vmm *vmm, struct nvif_vma *vma)
  38{
  39        union {
  40                struct nv50_vmm_map_v0 nv50;
  41                struct gf100_vmm_map_v0 gf100;
  42        } args;
  43        u32 argc = 0;
  44
  45        switch (vmm->object.oclass) {
  46        case NVIF_CLASS_VMM_NV04:
  47                break;
  48        case NVIF_CLASS_VMM_NV50:
  49                args.nv50.version = 0;
  50                args.nv50.ro = 0;
  51                args.nv50.priv = 0;
  52                args.nv50.kind = mem->kind;
  53                args.nv50.comp = mem->comp;
  54                argc = sizeof(args.nv50);
  55                break;
  56        case NVIF_CLASS_VMM_GF100:
  57        case NVIF_CLASS_VMM_GM200:
  58        case NVIF_CLASS_VMM_GP100:
  59                args.gf100.version = 0;
  60                if (mem->mem.type & NVIF_MEM_VRAM)
  61                        args.gf100.vol = 0;
  62                else
  63                        args.gf100.vol = 1;
  64                args.gf100.ro = 0;
  65                args.gf100.priv = 0;
  66                args.gf100.kind = mem->kind;
  67                argc = sizeof(args.gf100);
  68                break;
  69        default:
  70                WARN_ON(1);
  71                return -ENOSYS;
  72        }
  73
  74        return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
  75}
  76
  77void
  78nouveau_mem_fini(struct nouveau_mem *mem)
  79{
  80        nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
  81        nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
  82        mutex_lock(&mem->cli->drm->master.lock);
  83        nvif_mem_dtor(&mem->mem);
  84        mutex_unlock(&mem->cli->drm->master.lock);
  85}
  86
  87int
  88nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
  89{
  90        struct nouveau_mem *mem = nouveau_mem(reg);
  91        struct nouveau_cli *cli = mem->cli;
  92        struct nouveau_drm *drm = cli->drm;
  93        struct nvif_mmu *mmu = &cli->mmu;
  94        struct nvif_mem_ram_v0 args = {};
  95        u8 type;
  96        int ret;
  97
  98        if (!nouveau_drm_use_coherent_gpu_mapping(drm))
  99                type = drm->ttm.type_ncoh[!!mem->kind];
 100        else
 101                type = drm->ttm.type_host[0];
 102
 103        if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
 104                mem->comp = mem->kind = 0;
 105        if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
 106                if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
 107                        mem->kind = mmu->kind[mem->kind];
 108                mem->comp = 0;
 109        }
 110
 111        if (tt->sg)
 112                args.sgl = tt->sg->sgl;
 113        else
 114                args.dma = tt->dma_address;
 115
 116        mutex_lock(&drm->master.lock);
 117        ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
 118                                 reg->num_pages << PAGE_SHIFT,
 119                                 &args, sizeof(args), &mem->mem);
 120        mutex_unlock(&drm->master.lock);
 121        return ret;
 122}
 123
 124int
 125nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
 126{
 127        struct nouveau_mem *mem = nouveau_mem(reg);
 128        struct nouveau_cli *cli = mem->cli;
 129        struct nouveau_drm *drm = cli->drm;
 130        struct nvif_mmu *mmu = &cli->mmu;
 131        u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
 132        int ret;
 133
 134        mutex_lock(&drm->master.lock);
 135        switch (cli->mem->oclass) {
 136        case NVIF_CLASS_MEM_GF100:
 137                ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
 138                                         drm->ttm.type_vram, page, size,
 139                                         &(struct gf100_mem_v0) {
 140                                                .contig = contig,
 141                                         }, sizeof(struct gf100_mem_v0),
 142                                         &mem->mem);
 143                break;
 144        case NVIF_CLASS_MEM_NV50:
 145                ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
 146                                         drm->ttm.type_vram, page, size,
 147                                         &(struct nv50_mem_v0) {
 148                                                .bankswz = mmu->kind[mem->kind] == 2,
 149                                                .contig = contig,
 150                                         }, sizeof(struct nv50_mem_v0),
 151                                         &mem->mem);
 152                break;
 153        default:
 154                ret = -ENOSYS;
 155                WARN_ON(1);
 156                break;
 157        }
 158        mutex_unlock(&drm->master.lock);
 159
 160        reg->start = mem->mem.addr >> PAGE_SHIFT;
 161        return ret;
 162}
 163
 164void
 165nouveau_mem_del(struct ttm_resource *reg)
 166{
 167        struct nouveau_mem *mem = nouveau_mem(reg);
 168
 169        nouveau_mem_fini(mem);
 170        kfree(mem);
 171}
 172
 173int
 174nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
 175                struct ttm_resource **res)
 176{
 177        struct nouveau_mem *mem;
 178
 179        if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
 180                return -ENOMEM;
 181
 182        mem->cli = cli;
 183        mem->kind = kind;
 184        mem->comp = comp;
 185
 186        *res = &mem->base;
 187        return 0;
 188}
 189