1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <core/gpuobj.h>
26
27#include "nv04.h"
28
29#define NV04_PDMA_SIZE (128 * 1024 * 1024)
30#define NV04_PDMA_PAGE ( 4 * 1024)
31
32
33
34
35
36static void
37nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
38 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
39{
40 pte = 0x00008 + (pte * 4);
41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++;
44 while (cnt && page--) {
45 nv_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE;
47 pte += 4;
48 cnt -= 1;
49 }
50 }
51}
52
53static void
54nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
55{
56 pte = 0x00008 + (pte * 4);
57 while (cnt--) {
58 nv_wo32(pgt, pte, 0x00000000);
59 pte += 4;
60 }
61}
62
63static void
64nv04_vm_flush(struct nouveau_vm *vm)
65{
66}
67
68
69
70
71
72int
73nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
74 struct nouveau_vm **pvm)
75{
76 return -EINVAL;
77}
78
79
80
81
82
83static int
84nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
85 struct nouveau_oclass *oclass, void *data, u32 size,
86 struct nouveau_object **pobject)
87{
88 struct nv04_vmmgr_priv *priv;
89 struct nouveau_gpuobj *dma;
90 int ret;
91
92 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
93 "pcigart", &priv);
94 *pobject = nv_object(priv);
95 if (ret)
96 return ret;
97
98 priv->base.create = nv04_vm_create;
99 priv->base.limit = NV04_PDMA_SIZE;
100 priv->base.dma_bits = 32;
101 priv->base.pgt_bits = 32 - 12;
102 priv->base.spg_shift = 12;
103 priv->base.lpg_shift = 12;
104 priv->base.map_sg = nv04_vm_map_sg;
105 priv->base.unmap = nv04_vm_unmap;
106 priv->base.flush = nv04_vm_flush;
107
108 ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
109 &priv->vm);
110 if (ret)
111 return ret;
112
113 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
115 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
116 &priv->vm->pgt[0].obj[0]);
117 dma = priv->vm->pgt[0].obj[0];
118 priv->vm->pgt[0].refcount[0] = 1;
119 if (ret)
120 return ret;
121
122 nv_wo32(dma, 0x00000, 0x0002103d);
123 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
124 return 0;
125}
126
127void
128nv04_vmmgr_dtor(struct nouveau_object *object)
129{
130 struct nv04_vmmgr_priv *priv = (void *)object;
131 if (priv->vm) {
132 nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
133 nouveau_vm_ref(NULL, &priv->vm, NULL);
134 }
135 if (priv->nullp) {
136 pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
137 priv->nullp, priv->null);
138 }
139 nouveau_vmmgr_destroy(&priv->base);
140}
141
142struct nouveau_oclass
143nv04_vmmgr_oclass = {
144 .handle = NV_SUBDEV(VM, 0x04),
145 .ofuncs = &(struct nouveau_ofuncs) {
146 .ctor = nv04_vmmgr_ctor,
147 .dtor = nv04_vmmgr_dtor,
148 .init = _nouveau_vmmgr_init,
149 .fini = _nouveau_vmmgr_fini,
150 },
151};
152