linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2#ifndef __NVKM_MMU_H__
   3#define __NVKM_MMU_H__
   4#include <core/subdev.h>
   5
   6struct nvkm_vma {
   7        struct list_head head;
   8        struct rb_node tree;
   9        u64 addr;
  10        u64 size:50;
  11        bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
  12        bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
  13#define NVKM_VMA_PAGE_NONE 7
  14        u8   page:3; /* Requested page type (index, or NONE for automatic). */
  15        u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
  16        bool used:1; /* Region allocated. */
  17        bool part:1; /* Region was split from an allocated region by map(). */
  18        bool user:1; /* Region user-allocated. */
  19        bool busy:1; /* Region busy (for temporarily preventing user access). */
  20        bool mapped:1; /* Region contains valid pages. */
  21        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
  22        struct nvkm_tags *tags; /* Compression tag reference. */
  23};
  24
  25struct nvkm_vmm {
  26        const struct nvkm_vmm_func *func;
  27        struct nvkm_mmu *mmu;
  28        const char *name;
  29        u32 debug;
  30        struct kref kref;
  31        struct mutex mutex;
  32
  33        u64 start;
  34        u64 limit;
  35
  36        struct nvkm_vmm_pt *pd;
  37        struct list_head join;
  38
  39        struct list_head list;
  40        struct rb_root free;
  41        struct rb_root root;
  42
  43        bool bootstrapped;
  44        atomic_t engref[NVKM_SUBDEV_NR];
  45
  46        dma_addr_t null;
  47        void *nullp;
  48
  49        bool replay;
  50};
  51
  52int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
  53                 struct lock_class_key *, const char *name, struct nvkm_vmm **);
  54struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
  55void nvkm_vmm_unref(struct nvkm_vmm **);
  56int nvkm_vmm_boot(struct nvkm_vmm *);
  57int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
  58void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
  59int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
  60void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
  61
  62struct nvkm_vmm_map {
  63        struct nvkm_memory *memory;
  64        u64 offset;
  65
  66        struct nvkm_mm_node *mem;
  67        struct scatterlist *sgl;
  68        dma_addr_t *dma;
  69        u64 *pfn;
  70        u64 off;
  71
  72        const struct nvkm_vmm_page *page;
  73
  74        struct nvkm_tags *tags;
  75        u64 next;
  76        u64 type;
  77        u64 ctag;
  78};
  79
  80int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
  81                 struct nvkm_vmm_map *);
  82void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
  83
  84struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
  85struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
  86
  87struct nvkm_mmu {
  88        const struct nvkm_mmu_func *func;
  89        struct nvkm_subdev subdev;
  90
  91        u8  dma_bits;
  92
  93        int heap_nr;
  94        struct {
  95#define NVKM_MEM_VRAM                                                      0x01
  96#define NVKM_MEM_HOST                                                      0x02
  97#define NVKM_MEM_COMP                                                      0x04
  98#define NVKM_MEM_DISP                                                      0x08
  99                u8  type;
 100                u64 size;
 101        } heap[4];
 102
 103        int type_nr;
 104        struct {
 105#define NVKM_MEM_KIND                                                      0x10
 106#define NVKM_MEM_MAPPABLE                                                  0x20
 107#define NVKM_MEM_COHERENT                                                  0x40
 108#define NVKM_MEM_UNCACHED                                                  0x80
 109                u8 type;
 110                u8 heap;
 111        } type[16];
 112
 113        struct nvkm_vmm *vmm;
 114
 115        struct {
 116                struct mutex mutex;
 117                struct list_head list;
 118        } ptc, ptp;
 119
 120        struct nvkm_device_oclass user;
 121};
 122
 123int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 124int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 125int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 126int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 127int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 128int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 129int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 130int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 131int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 132int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 133int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 134int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 135int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 136int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 137int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 138#endif
 139