linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2#ifndef __NVKM_MMU_H__
   3#define __NVKM_MMU_H__
   4#include <core/subdev.h>
   5
   6struct nvkm_vma {
   7        struct list_head head;
   8        struct rb_node tree;
   9        u64 addr;
  10        u64 size:50;
  11        bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
  12        bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
  13#define NVKM_VMA_PAGE_NONE 7
  14        u8   page:3; /* Requested page type (index, or NONE for automatic). */
  15        u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
  16        bool used:1; /* Region allocated. */
  17        bool part:1; /* Region was split from an allocated region by map(). */
  18        bool busy:1; /* Region busy (for temporarily preventing user access). */
  19        bool mapped:1; /* Region contains valid pages. */
  20        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
  21        struct nvkm_tags *tags; /* Compression tag reference. */
  22};
  23
  24struct nvkm_vmm {
  25        const struct nvkm_vmm_func *func;
  26        struct nvkm_mmu *mmu;
  27        const char *name;
  28        u32 debug;
  29        struct kref kref;
  30        struct mutex mutex;
  31
  32        u64 start;
  33        u64 limit;
  34
  35        struct nvkm_vmm_pt *pd;
  36        struct list_head join;
  37
  38        struct list_head list;
  39        struct rb_root free;
  40        struct rb_root root;
  41
  42        bool bootstrapped;
  43        atomic_t engref[NVKM_SUBDEV_NR];
  44
  45        dma_addr_t null;
  46        void *nullp;
  47
  48        bool replay;
  49};
  50
  51int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
  52                 struct lock_class_key *, const char *name, struct nvkm_vmm **);
  53struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
  54void nvkm_vmm_unref(struct nvkm_vmm **);
  55int nvkm_vmm_boot(struct nvkm_vmm *);
  56int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
  57void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
  58int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
  59void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
  60
  61struct nvkm_vmm_map {
  62        struct nvkm_memory *memory;
  63        u64 offset;
  64
  65        struct nvkm_mm_node *mem;
  66        struct scatterlist *sgl;
  67        dma_addr_t *dma;
  68        u64 *pfn;
  69        u64 off;
  70
  71        const struct nvkm_vmm_page *page;
  72
  73        struct nvkm_tags *tags;
  74        u64 next;
  75        u64 type;
  76        u64 ctag;
  77};
  78
  79int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
  80                 struct nvkm_vmm_map *);
  81void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
  82
  83struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
  84struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
  85
  86struct nvkm_mmu {
  87        const struct nvkm_mmu_func *func;
  88        struct nvkm_subdev subdev;
  89
  90        u8  dma_bits;
  91
  92        int heap_nr;
  93        struct {
  94#define NVKM_MEM_VRAM                                                      0x01
  95#define NVKM_MEM_HOST                                                      0x02
  96#define NVKM_MEM_COMP                                                      0x04
  97#define NVKM_MEM_DISP                                                      0x08
  98                u8  type;
  99                u64 size;
 100        } heap[4];
 101
 102        int type_nr;
 103        struct {
 104#define NVKM_MEM_KIND                                                      0x10
 105#define NVKM_MEM_MAPPABLE                                                  0x20
 106#define NVKM_MEM_COHERENT                                                  0x40
 107#define NVKM_MEM_UNCACHED                                                  0x80
 108                u8 type;
 109                u8 heap;
 110        } type[16];
 111
 112        struct nvkm_vmm *vmm;
 113
 114        struct {
 115                struct mutex mutex;
 116                struct list_head list;
 117        } ptc, ptp;
 118
 119        struct mutex mutex; /* serialises mmu invalidations */
 120
 121        struct nvkm_device_oclass user;
 122};
 123
 124int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 125int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 126int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 127int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 128int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 129int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 130int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 131int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 132int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 133int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 134int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 135int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 136int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 137int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 138int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
 139#endif
 140