linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "vmm.h"
  23
  24#include <subdev/fb.h>
  25#include <subdev/ltc.h>
  26
  27#include <nvif/ifc00d.h>
  28#include <nvif/unpack.h>
  29
  30static inline void
  31gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  32                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  33{
  34        u64 data = (addr >> 4) | map->type;
  35
  36        map->type += ptes * map->ctag;
  37
  38        while (ptes--) {
  39                VMM_WO064(pt, vmm, ptei++ * 8, data);
  40                data += map->next;
  41        }
  42}
  43
  44static void
  45gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  46                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  47{
  48        VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  49}
  50
  51static void
  52gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  53                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  54{
  55        if (map->page->shift == PAGE_SHIFT) {
  56                VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
  57                nvkm_kmap(pt->memory);
  58                while (ptes--) {
  59                        const u64 data = (*map->dma++ >> 4) | map->type;
  60                        VMM_WO064(pt, vmm, ptei++ * 8, data);
  61                        map->type += map->ctag;
  62                }
  63                nvkm_done(pt->memory);
  64                return;
  65        }
  66
  67        VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  68}
  69
  70static void
  71gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  72                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  73{
  74        VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  75}
  76
  77static void
  78gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
  79                     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  80{
  81        /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
  82        VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
  83}
  84
  85static const struct nvkm_vmm_desc_func
  86gp100_vmm_desc_spt = {
  87        .unmap = gf100_vmm_pgt_unmap,
  88        .sparse = gp100_vmm_pgt_sparse,
  89        .mem = gp100_vmm_pgt_mem,
  90        .dma = gp100_vmm_pgt_dma,
  91        .sgl = gp100_vmm_pgt_sgl,
  92};
  93
  94static void
  95gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
  96                      struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  97{
  98        /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
  99        VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
 100}
 101
 102static const struct nvkm_vmm_desc_func
 103gp100_vmm_desc_lpt = {
 104        .invalid = gp100_vmm_lpt_invalid,
 105        .unmap = gf100_vmm_pgt_unmap,
 106        .sparse = gp100_vmm_pgt_sparse,
 107        .mem = gp100_vmm_pgt_mem,
 108};
 109
 110static inline void
 111gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 112                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
 113{
 114        u64 data = (addr >> 4) | map->type;
 115
 116        map->type += ptes * map->ctag;
 117
 118        while (ptes--) {
 119                VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
 120                data += map->next;
 121        }
 122}
 123
 124static void
 125gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 126                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 127{
 128        VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
 129}
 130
 131static inline bool
 132gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
 133{
 134        switch (nvkm_memory_target(pt->memory)) {
 135        case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
 136        case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
 137                *data |= BIT_ULL(3); /* VOL. */
 138                break;
 139        case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
 140        default:
 141                WARN_ON(1);
 142                return false;
 143        }
 144        *data |= pt->addr >> 4;
 145        return true;
 146}
 147
 148static void
 149gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
 150{
 151        struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
 152        struct nvkm_mmu_pt *pd = pgd->pt[0];
 153        u64 data[2] = {};
 154
 155        if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
 156                return;
 157        if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
 158                return;
 159
 160        nvkm_kmap(pd->memory);
 161        VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
 162        nvkm_done(pd->memory);
 163}
 164
 165static void
 166gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
 167                     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
 168{
 169        /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
 170        VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
 171}
 172
 173static void
 174gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
 175                    struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
 176{
 177        VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
 178}
 179
 180static const struct nvkm_vmm_desc_func
 181gp100_vmm_desc_pd0 = {
 182        .unmap = gp100_vmm_pd0_unmap,
 183        .sparse = gp100_vmm_pd0_sparse,
 184        .pde = gp100_vmm_pd0_pde,
 185        .mem = gp100_vmm_pd0_mem,
 186};
 187
 188static void
 189gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
 190{
 191        struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
 192        struct nvkm_mmu_pt *pd = pgd->pt[0];
 193        u64 data = 0;
 194
 195        if (!gp100_vmm_pde(pgt->pt[0], &data))
 196                return;
 197
 198        nvkm_kmap(pd->memory);
 199        VMM_WO064(pd, vmm, pdei * 8, data);
 200        nvkm_done(pd->memory);
 201}
 202
 203static const struct nvkm_vmm_desc_func
 204gp100_vmm_desc_pd1 = {
 205        .unmap = gf100_vmm_pgt_unmap,
 206        .sparse = gp100_vmm_pgt_sparse,
 207        .pde = gp100_vmm_pd1_pde,
 208};
 209
 210const struct nvkm_vmm_desc
 211gp100_vmm_desc_16[] = {
 212        { LPT, 5,  8, 0x0100, &gp100_vmm_desc_lpt },
 213        { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
 214        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 215        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 216        { PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
 217        {}
 218};
 219
 220const struct nvkm_vmm_desc
 221gp100_vmm_desc_12[] = {
 222        { SPT, 9,  8, 0x1000, &gp100_vmm_desc_spt },
 223        { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
 224        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 225        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 226        { PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
 227        {}
 228};
 229
 230int
 231gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 232                struct nvkm_vmm_map *map)
 233{
 234        const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
 235        const struct nvkm_vmm_page *page = map->page;
 236        union {
 237                struct gp100_vmm_map_vn vn;
 238                struct gp100_vmm_map_v0 v0;
 239        } *args = argv;
 240        struct nvkm_device *device = vmm->mmu->subdev.device;
 241        struct nvkm_memory *memory = map->memory;
 242        u8  kind, priv, ro, vol;
 243        int kindn, aper, ret = -ENOSYS;
 244        const u8 *kindm;
 245
 246        map->next = (1ULL << page->shift) >> 4;
 247        map->type = 0;
 248
 249        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 250                vol  = !!args->v0.vol;
 251                ro   = !!args->v0.ro;
 252                priv = !!args->v0.priv;
 253                kind =   args->v0.kind;
 254        } else
 255        if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 256                vol  = target == NVKM_MEM_TARGET_HOST;
 257                ro   = 0;
 258                priv = 0;
 259                kind = 0x00;
 260        } else {
 261                VMM_DEBUG(vmm, "args");
 262                return ret;
 263        }
 264
 265        aper = vmm->func->aper(target);
 266        if (WARN_ON(aper < 0))
 267                return aper;
 268
 269        kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
 270        if (kind >= kindn || kindm[kind] == 0xff) {
 271                VMM_DEBUG(vmm, "kind %02x", kind);
 272                return -EINVAL;
 273        }
 274
 275        if (kindm[kind] != kind) {
 276                u64 tags = nvkm_memory_size(memory) >> 16;
 277                if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
 278                        VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
 279                        return -EINVAL;
 280                }
 281
 282                ret = nvkm_memory_tags_get(memory, device, tags,
 283                                           nvkm_ltc_tags_clear,
 284                                           &map->tags);
 285                if (ret) {
 286                        VMM_DEBUG(vmm, "comp %d", ret);
 287                        return ret;
 288                }
 289
 290                if (map->tags->mn) {
 291                        tags = map->tags->mn->offset + (map->offset >> 16);
 292                        map->ctag |= ((1ULL << page->shift) >> 16) << 36;
 293                        map->type |= tags << 36;
 294                        map->next |= map->ctag;
 295                } else {
 296                        kind = kindm[kind];
 297                }
 298        }
 299
 300        map->type |= BIT(0);
 301        map->type |= (u64)aper << 1;
 302        map->type |= (u64) vol << 3;
 303        map->type |= (u64)priv << 5;
 304        map->type |= (u64)  ro << 6;
 305        map->type |= (u64)kind << 56;
 306        return 0;
 307}
 308
 309void
 310gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 311{
 312        gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
 313}
 314
 315int
 316gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 317{
 318        const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
 319        return gf100_vmm_join_(vmm, inst, base);
 320}
 321
 322static const struct nvkm_vmm_func
 323gp100_vmm = {
 324        .join = gp100_vmm_join,
 325        .part = gf100_vmm_part,
 326        .aper = gf100_vmm_aper,
 327        .valid = gp100_vmm_valid,
 328        .flush = gp100_vmm_flush,
 329        .page = {
 330                { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 331                { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
 332                { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
 333                { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
 334                { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
 335                { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
 336                {}
 337        }
 338};
 339
 340int
 341gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
 342              struct lock_class_key *key, const char *name,
 343              struct nvkm_vmm **pvmm)
 344{
 345        return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
 346                             argv, argc, key, name, pvmm);
 347}
 348