linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "vmm.h"
  23
  24#include <nvif/ifb00d.h>
  25#include <nvif/unpack.h>
  26
  27static void
  28gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
  29                     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  30{
  31        /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
  32        VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
  33}
  34
  35static const struct nvkm_vmm_desc_func
  36gm200_vmm_spt = {
  37        .unmap = gf100_vmm_pgt_unmap,
  38        .sparse = gm200_vmm_pgt_sparse,
  39        .mem = gf100_vmm_pgt_mem,
  40        .dma = gf100_vmm_pgt_dma,
  41        .sgl = gf100_vmm_pgt_sgl,
  42};
  43
  44static const struct nvkm_vmm_desc_func
  45gm200_vmm_lpt = {
  46        .invalid = gk104_vmm_lpt_invalid,
  47        .unmap = gf100_vmm_pgt_unmap,
  48        .sparse = gm200_vmm_pgt_sparse,
  49        .mem = gf100_vmm_pgt_mem,
  50};
  51
  52static void
  53gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
  54                     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
  55{
  56        /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
  57        VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
  58}
  59
  60static const struct nvkm_vmm_desc_func
  61gm200_vmm_pgd = {
  62        .unmap = gf100_vmm_pgt_unmap,
  63        .sparse = gm200_vmm_pgd_sparse,
  64        .pde = gf100_vmm_pgd_pde,
  65};
  66
  67const struct nvkm_vmm_desc
  68gm200_vmm_desc_17_12[] = {
  69        { SPT, 15, 8, 0x1000, &gm200_vmm_spt },
  70        { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
  71        {}
  72};
  73
  74const struct nvkm_vmm_desc
  75gm200_vmm_desc_17_17[] = {
  76        { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
  77        { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
  78        {}
  79};
  80
  81const struct nvkm_vmm_desc
  82gm200_vmm_desc_16_12[] = {
  83        { SPT, 14, 8, 0x1000, &gm200_vmm_spt },
  84        { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
  85        {}
  86};
  87
  88const struct nvkm_vmm_desc
  89gm200_vmm_desc_16_16[] = {
  90        { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
  91        { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
  92        {}
  93};
  94
  95int
  96gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
  97{
  98        if (vmm->func->page[1].shift == 16)
  99                base |= BIT_ULL(11);
 100        return gf100_vmm_join_(vmm, inst, base);
 101}
 102
 103int
 104gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 105{
 106        return gm200_vmm_join_(vmm, inst, 0);
 107}
 108
 109static const struct nvkm_vmm_func
 110gm200_vmm_17 = {
 111        .join = gm200_vmm_join,
 112        .part = gf100_vmm_part,
 113        .aper = gf100_vmm_aper,
 114        .valid = gf100_vmm_valid,
 115        .flush = gf100_vmm_flush,
 116        .invalidate_pdb = gf100_vmm_invalidate_pdb,
 117        .page = {
 118                { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
 119                { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
 120                { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
 121                {}
 122        }
 123};
 124
 125static const struct nvkm_vmm_func
 126gm200_vmm_16 = {
 127        .join = gm200_vmm_join,
 128        .part = gf100_vmm_part,
 129        .aper = gf100_vmm_aper,
 130        .valid = gf100_vmm_valid,
 131        .flush = gf100_vmm_flush,
 132        .invalidate_pdb = gf100_vmm_invalidate_pdb,
 133        .page = {
 134                { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
 135                { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
 136                { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
 137                {}
 138        }
 139};
 140
 141int
 142gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
 143               const struct nvkm_vmm_func *func_17,
 144               struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 145               void *argv, u32 argc, struct lock_class_key *key,
 146               const char *name, struct nvkm_vmm **pvmm)
 147{
 148        const struct nvkm_vmm_func *func;
 149        union {
 150                struct gm200_vmm_vn vn;
 151                struct gm200_vmm_v0 v0;
 152        } *args = argv;
 153        int ret = -ENOSYS;
 154
 155        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 156                switch (args->v0.bigpage) {
 157                case 16: func = func_16; break;
 158                case 17: func = func_17; break;
 159                default:
 160                        return -EINVAL;
 161                }
 162        } else
 163        if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 164                func = func_17;
 165        } else
 166                return ret;
 167
 168        return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
 169}
 170
 171int
 172gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 173              void *argv, u32 argc, struct lock_class_key *key,
 174              const char *name, struct nvkm_vmm **pvmm)
 175{
 176        return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
 177                              size, argv, argc, key, name, pvmm);
 178}
 179
 180int
 181gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 182                    void *argv, u32 argc, struct lock_class_key *key,
 183                    const char *name, struct nvkm_vmm **pvmm)
 184{
 185        return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
 186                              size, argv, argc, key, name, pvmm);
 187}
 188