linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "vmm.h"
  23
  24#include <core/client.h>
  25#include <subdev/fb.h>
  26#include <subdev/ltc.h>
  27#include <subdev/timer.h>
  28#include <engine/gr.h>
  29
  30#include <nvif/ifc00d.h>
  31#include <nvif/unpack.h>
  32
  33static void
  34gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
  35                    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  36{
  37        struct device *dev = vmm->mmu->subdev.device->dev;
  38        dma_addr_t addr;
  39
  40        nvkm_kmap(pt->memory);
  41        while (ptes--) {
  42                u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
  43                u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
  44                u64 data   = (u64)datahi << 32 | datalo;
  45                if ((data & (3ULL << 1)) != 0) {
  46                        addr = (data >> 8) << 12;
  47                        dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
  48                }
  49                ptei++;
  50        }
  51        nvkm_done(pt->memory);
  52}
  53
  54static bool
  55gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
  56                    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  57{
  58        bool dma = false;
  59        nvkm_kmap(pt->memory);
  60        while (ptes--) {
  61                u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
  62                u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
  63                u64 data   = (u64)datahi << 32 | datalo;
  64                if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
  65                        VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
  66                        dma = true;
  67                }
  68                ptei++;
  69        }
  70        nvkm_done(pt->memory);
  71        return dma;
  72}
  73
  74static void
  75gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  76                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  77{
  78        struct device *dev = vmm->mmu->subdev.device->dev;
  79        dma_addr_t addr;
  80
  81        nvkm_kmap(pt->memory);
  82        for (; ptes; ptes--, map->pfn++) {
  83                u64 data = 0;
  84
  85                if (!(*map->pfn & NVKM_VMM_PFN_V))
  86                        continue;
  87
  88                if (!(*map->pfn & NVKM_VMM_PFN_W))
  89                        data |= BIT_ULL(6); /* RO. */
  90
  91                if (!(*map->pfn & NVKM_VMM_PFN_A))
  92                        data |= BIT_ULL(7); /* Atomic disable. */
  93
  94                if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
  95                        addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
  96                        addr = dma_map_page(dev, pfn_to_page(addr), 0,
  97                                            PAGE_SIZE, DMA_BIDIRECTIONAL);
  98                        if (!WARN_ON(dma_mapping_error(dev, addr))) {
  99                                data |= addr >> 4;
 100                                data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
 101                                data |= BIT_ULL(3); /* VOL. */
 102                                data |= BIT_ULL(0); /* VALID. */
 103                        }
 104                } else {
 105                        data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
 106                        data |= BIT_ULL(0); /* VALID. */
 107                }
 108
 109                VMM_WO064(pt, vmm, ptei++ * 8, data);
 110        }
 111        nvkm_done(pt->memory);
 112}
 113
 114static inline void
 115gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 116                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
 117{
 118        u64 data = (addr >> 4) | map->type;
 119
 120        map->type += ptes * map->ctag;
 121
 122        while (ptes--) {
 123                VMM_WO064(pt, vmm, ptei++ * 8, data);
 124                data += map->next;
 125        }
 126}
 127
 128static void
 129gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 130                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 131{
 132        VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
 133}
 134
 135static void
 136gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 137                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 138{
 139        if (map->page->shift == PAGE_SHIFT) {
 140                VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
 141                nvkm_kmap(pt->memory);
 142                while (ptes--) {
 143                        const u64 data = (*map->dma++ >> 4) | map->type;
 144                        VMM_WO064(pt, vmm, ptei++ * 8, data);
 145                        map->type += map->ctag;
 146                }
 147                nvkm_done(pt->memory);
 148                return;
 149        }
 150
 151        VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
 152}
 153
 154static void
 155gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 156                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 157{
 158        VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
 159}
 160
 161static void
 162gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
 163                     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
 164{
 165        /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
 166        VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
 167}
 168
 169static const struct nvkm_vmm_desc_func
 170gp100_vmm_desc_spt = {
 171        .unmap = gf100_vmm_pgt_unmap,
 172        .sparse = gp100_vmm_pgt_sparse,
 173        .mem = gp100_vmm_pgt_mem,
 174        .dma = gp100_vmm_pgt_dma,
 175        .sgl = gp100_vmm_pgt_sgl,
 176        .pfn = gp100_vmm_pgt_pfn,
 177        .pfn_clear = gp100_vmm_pfn_clear,
 178        .pfn_unmap = gp100_vmm_pfn_unmap,
 179};
 180
 181static void
 182gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
 183                      struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
 184{
 185        /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
 186        VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
 187}
 188
 189static const struct nvkm_vmm_desc_func
 190gp100_vmm_desc_lpt = {
 191        .invalid = gp100_vmm_lpt_invalid,
 192        .unmap = gf100_vmm_pgt_unmap,
 193        .sparse = gp100_vmm_pgt_sparse,
 194        .mem = gp100_vmm_pgt_mem,
 195};
 196
 197static inline void
 198gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 199                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
 200{
 201        u64 data = (addr >> 4) | map->type;
 202
 203        map->type += ptes * map->ctag;
 204
 205        while (ptes--) {
 206                VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
 207                data += map->next;
 208        }
 209}
 210
 211static void
 212gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 213                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 214{
 215        VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
 216}
 217
 218static inline bool
 219gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
 220{
 221        switch (nvkm_memory_target(pt->memory)) {
 222        case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
 223        case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
 224                *data |= BIT_ULL(3); /* VOL. */
 225                break;
 226        case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
 227        default:
 228                WARN_ON(1);
 229                return false;
 230        }
 231        *data |= pt->addr >> 4;
 232        return true;
 233}
 234
 235static void
 236gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
 237{
 238        struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
 239        struct nvkm_mmu_pt *pd = pgd->pt[0];
 240        u64 data[2] = {};
 241
 242        if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
 243                return;
 244        if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
 245                return;
 246
 247        nvkm_kmap(pd->memory);
 248        VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
 249        nvkm_done(pd->memory);
 250}
 251
 252static void
 253gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
 254                     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
 255{
 256        /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
 257        VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
 258}
 259
 260static void
 261gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
 262                    struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
 263{
 264        VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
 265}
 266
 267static void
 268gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
 269                        struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
 270{
 271        struct device *dev = vmm->mmu->subdev.device->dev;
 272        dma_addr_t addr;
 273
 274        nvkm_kmap(pt->memory);
 275        while (ptes--) {
 276                u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
 277                u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
 278                u64 data   = (u64)datahi << 32 | datalo;
 279
 280                if ((data & (3ULL << 1)) != 0) {
 281                        addr = (data >> 8) << 12;
 282                        dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
 283                }
 284                ptei++;
 285        }
 286        nvkm_done(pt->memory);
 287}
 288
 289static bool
 290gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
 291                        struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
 292{
 293        bool dma = false;
 294
 295        nvkm_kmap(pt->memory);
 296        while (ptes--) {
 297                u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
 298                u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
 299                u64 data   = (u64)datahi << 32 | datalo;
 300
 301                if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
 302                        VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
 303                        dma = true;
 304                }
 305                ptei++;
 306        }
 307        nvkm_done(pt->memory);
 308        return dma;
 309}
 310
 311static void
 312gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 313                  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
 314{
 315        struct device *dev = vmm->mmu->subdev.device->dev;
 316        dma_addr_t addr;
 317
 318        nvkm_kmap(pt->memory);
 319        for (; ptes; ptes--, map->pfn++) {
 320                u64 data = 0;
 321
 322                if (!(*map->pfn & NVKM_VMM_PFN_V))
 323                        continue;
 324
 325                if (!(*map->pfn & NVKM_VMM_PFN_W))
 326                        data |= BIT_ULL(6); /* RO. */
 327
 328                if (!(*map->pfn & NVKM_VMM_PFN_A))
 329                        data |= BIT_ULL(7); /* Atomic disable. */
 330
 331                if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
 332                        addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
 333                        addr = dma_map_page(dev, pfn_to_page(addr), 0,
 334                                            1UL << 21, DMA_BIDIRECTIONAL);
 335                        if (!WARN_ON(dma_mapping_error(dev, addr))) {
 336                                data |= addr >> 4;
 337                                data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
 338                                data |= BIT_ULL(3); /* VOL. */
 339                                data |= BIT_ULL(0); /* VALID. */
 340                        }
 341                } else {
 342                        data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
 343                        data |= BIT_ULL(0); /* VALID. */
 344                }
 345
 346                VMM_WO064(pt, vmm, ptei++ * 16, data);
 347        }
 348        nvkm_done(pt->memory);
 349}
 350
 351static const struct nvkm_vmm_desc_func
 352gp100_vmm_desc_pd0 = {
 353        .unmap = gp100_vmm_pd0_unmap,
 354        .sparse = gp100_vmm_pd0_sparse,
 355        .pde = gp100_vmm_pd0_pde,
 356        .mem = gp100_vmm_pd0_mem,
 357        .pfn = gp100_vmm_pd0_pfn,
 358        .pfn_clear = gp100_vmm_pd0_pfn_clear,
 359        .pfn_unmap = gp100_vmm_pd0_pfn_unmap,
 360};
 361
 362static void
 363gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
 364{
 365        struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
 366        struct nvkm_mmu_pt *pd = pgd->pt[0];
 367        u64 data = 0;
 368
 369        if (!gp100_vmm_pde(pgt->pt[0], &data))
 370                return;
 371
 372        nvkm_kmap(pd->memory);
 373        VMM_WO064(pd, vmm, pdei * 8, data);
 374        nvkm_done(pd->memory);
 375}
 376
 377static const struct nvkm_vmm_desc_func
 378gp100_vmm_desc_pd1 = {
 379        .unmap = gf100_vmm_pgt_unmap,
 380        .sparse = gp100_vmm_pgt_sparse,
 381        .pde = gp100_vmm_pd1_pde,
 382};
 383
 384const struct nvkm_vmm_desc
 385gp100_vmm_desc_16[] = {
 386        { LPT, 5,  8, 0x0100, &gp100_vmm_desc_lpt },
 387        { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
 388        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 389        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 390        { PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
 391        {}
 392};
 393
 394const struct nvkm_vmm_desc
 395gp100_vmm_desc_12[] = {
 396        { SPT, 9,  8, 0x1000, &gp100_vmm_desc_spt },
 397        { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
 398        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 399        { PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
 400        { PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
 401        {}
 402};
 403
 404int
 405gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 406                struct nvkm_vmm_map *map)
 407{
 408        const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
 409        const struct nvkm_vmm_page *page = map->page;
 410        union {
 411                struct gp100_vmm_map_vn vn;
 412                struct gp100_vmm_map_v0 v0;
 413        } *args = argv;
 414        struct nvkm_device *device = vmm->mmu->subdev.device;
 415        struct nvkm_memory *memory = map->memory;
 416        u8  kind, kind_inv, priv, ro, vol;
 417        int kindn, aper, ret = -ENOSYS;
 418        const u8 *kindm;
 419
 420        map->next = (1ULL << page->shift) >> 4;
 421        map->type = 0;
 422
 423        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 424                vol  = !!args->v0.vol;
 425                ro   = !!args->v0.ro;
 426                priv = !!args->v0.priv;
 427                kind =   args->v0.kind;
 428        } else
 429        if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 430                vol  = target == NVKM_MEM_TARGET_HOST;
 431                ro   = 0;
 432                priv = 0;
 433                kind = 0x00;
 434        } else {
 435                VMM_DEBUG(vmm, "args");
 436                return ret;
 437        }
 438
 439        aper = vmm->func->aper(target);
 440        if (WARN_ON(aper < 0))
 441                return aper;
 442
 443        kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
 444        if (kind >= kindn || kindm[kind] == kind_inv) {
 445                VMM_DEBUG(vmm, "kind %02x", kind);
 446                return -EINVAL;
 447        }
 448
 449        if (kindm[kind] != kind) {
 450                u64 tags = nvkm_memory_size(memory) >> 16;
 451                if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
 452                        VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
 453                        return -EINVAL;
 454                }
 455
 456                ret = nvkm_memory_tags_get(memory, device, tags,
 457                                           nvkm_ltc_tags_clear,
 458                                           &map->tags);
 459                if (ret) {
 460                        VMM_DEBUG(vmm, "comp %d", ret);
 461                        return ret;
 462                }
 463
 464                if (map->tags->mn) {
 465                        tags = map->tags->mn->offset + (map->offset >> 16);
 466                        map->ctag |= ((1ULL << page->shift) >> 16) << 36;
 467                        map->type |= tags << 36;
 468                        map->next |= map->ctag;
 469                } else {
 470                        kind = kindm[kind];
 471                }
 472        }
 473
 474        map->type |= BIT(0);
 475        map->type |= (u64)aper << 1;
 476        map->type |= (u64) vol << 3;
 477        map->type |= (u64)priv << 5;
 478        map->type |= (u64)  ro << 6;
 479        map->type |= (u64)kind << 56;
 480        return 0;
 481}
 482
 483static int
 484gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
 485{
 486        struct nvkm_device *device = vmm->mmu->subdev.device;
 487        union {
 488                struct gp100_vmm_fault_cancel_v0 v0;
 489        } *args = argv;
 490        int ret = -ENOSYS;
 491        u32 inst, aper;
 492
 493        if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
 494                return ret;
 495
 496        /* Translate MaxwellFaultBufferA instance pointer to the same
 497         * format as the NV_GR_FECS_CURRENT_CTX register.
 498         */
 499        aper = (args->v0.inst >> 8) & 3;
 500        args->v0.inst >>= 12;
 501        args->v0.inst |= aper << 28;
 502        args->v0.inst |= 0x80000000;
 503
 504        if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
 505                if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) {
 506                        gf100_vmm_invalidate(vmm, 0x0000001b
 507                                             /* CANCEL_TARGETED. */ |
 508                                             (args->v0.hub    << 20) |
 509                                             (args->v0.gpc    << 15) |
 510                                             (args->v0.client << 9));
 511                }
 512                WARN_ON(nvkm_gr_ctxsw_resume(device));
 513        }
 514
 515        return 0;
 516}
 517
 518static int
 519gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
 520{
 521        union {
 522                struct gp100_vmm_fault_replay_vn vn;
 523        } *args = argv;
 524        int ret = -ENOSYS;
 525
 526        if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 527                gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
 528        }
 529
 530        return ret;
 531}
 532
 533int
 534gp100_vmm_mthd(struct nvkm_vmm *vmm,
 535               struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
 536{
 537        switch (mthd) {
 538        case GP100_VMM_VN_FAULT_REPLAY:
 539                return gp100_vmm_fault_replay(vmm, argv, argc);
 540        case GP100_VMM_VN_FAULT_CANCEL:
 541                return gp100_vmm_fault_cancel(vmm, argv, argc);
 542        default:
 543                break;
 544        }
 545        return -EINVAL;
 546}
 547
 548void
 549gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
 550{
 551        struct nvkm_device *device = vmm->mmu->subdev.device;
 552        nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
 553        nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
 554}
 555
 556void
 557gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 558{
 559        u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
 560        if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
 561                type |= 0x00000004; /* HUB_ONLY */
 562        type |= 0x00000001; /* PAGE_ALL */
 563        gf100_vmm_invalidate(vmm, type);
 564}
 565
 566int
 567gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 568{
 569        u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
 570        if (vmm->replay) {
 571                base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
 572                base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
 573        }
 574        return gf100_vmm_join_(vmm, inst, base);
 575}
 576
 577static const struct nvkm_vmm_func
 578gp100_vmm = {
 579        .join = gp100_vmm_join,
 580        .part = gf100_vmm_part,
 581        .aper = gf100_vmm_aper,
 582        .valid = gp100_vmm_valid,
 583        .flush = gp100_vmm_flush,
 584        .mthd = gp100_vmm_mthd,
 585        .invalidate_pdb = gp100_vmm_invalidate_pdb,
 586        .page = {
 587                { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 588                { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
 589                { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
 590                { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
 591                { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
 592                { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
 593                {}
 594        }
 595};
 596
 597int
 598gp100_vmm_new_(const struct nvkm_vmm_func *func,
 599               struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 600               void *argv, u32 argc, struct lock_class_key *key,
 601               const char *name, struct nvkm_vmm **pvmm)
 602{
 603        union {
 604                struct gp100_vmm_vn vn;
 605                struct gp100_vmm_v0 v0;
 606        } *args = argv;
 607        int ret = -ENOSYS;
 608        bool replay;
 609
 610        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 611                replay = args->v0.fault_replay != 0;
 612        } else
 613        if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
 614                replay = false;
 615        } else
 616                return ret;
 617
 618        ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
 619        if (ret)
 620                return ret;
 621
 622        (*pvmm)->replay = replay;
 623        return 0;
 624}
 625
 626int
 627gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 628              void *argv, u32 argc, struct lock_class_key *key,
 629              const char *name, struct nvkm_vmm **pvmm)
 630{
 631        return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
 632                              argv, argc, key, name, pvmm);
 633}
 634