1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "nv04.h"
25
26#include <core/gpuobj.h>
27#include <core/option.h>
28#include <subdev/timer.h>
29
30#define NV44_GART_SIZE (512 * 1024 * 1024)
31#define NV44_GART_PAGE ( 4 * 1024)
32
33
34
35
36
37static void
38nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
39 dma_addr_t *list, u32 pte, u32 cnt)
40{
41 u32 base = (pte << 2) & ~0x0000000f;
42 u32 tmp[4];
43
44 tmp[0] = nvkm_ro32(pgt, base + 0x0);
45 tmp[1] = nvkm_ro32(pgt, base + 0x4);
46 tmp[2] = nvkm_ro32(pgt, base + 0x8);
47 tmp[3] = nvkm_ro32(pgt, base + 0xc);
48
49 while (cnt--) {
50 u32 addr = list ? (*list++ >> 12) : (null >> 12);
51 switch (pte++ & 0x3) {
52 case 0:
53 tmp[0] &= ~0x07ffffff;
54 tmp[0] |= addr;
55 break;
56 case 1:
57 tmp[0] &= ~0xf8000000;
58 tmp[0] |= addr << 27;
59 tmp[1] &= ~0x003fffff;
60 tmp[1] |= addr >> 5;
61 break;
62 case 2:
63 tmp[1] &= ~0xffc00000;
64 tmp[1] |= addr << 22;
65 tmp[2] &= ~0x0001ffff;
66 tmp[2] |= addr >> 10;
67 break;
68 case 3:
69 tmp[2] &= ~0xfffe0000;
70 tmp[2] |= addr << 17;
71 tmp[3] &= ~0x00000fff;
72 tmp[3] |= addr >> 15;
73 break;
74 }
75 }
76
77 nvkm_wo32(pgt, base + 0x0, tmp[0]);
78 nvkm_wo32(pgt, base + 0x4, tmp[1]);
79 nvkm_wo32(pgt, base + 0x8, tmp[2]);
80 nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
81}
82
83static void
84nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
85 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
86{
87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
88 u32 tmp[4];
89 int i;
90
91 nvkm_kmap(pgt);
92 if (pte & 3) {
93 u32 max = 4 - (pte & 3);
94 u32 part = (cnt > max) ? max : cnt;
95 nv44_vm_fill(pgt, mmu->null, list, pte, part);
96 pte += part;
97 list += part;
98 cnt -= part;
99 }
100
101 while (cnt >= 4) {
102 for (i = 0; i < 4; i++)
103 tmp[i] = *list++ >> 12;
104 nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
105 nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
106 nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
107 nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
108 cnt -= 4;
109 }
110
111 if (cnt)
112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
113 nvkm_done(pgt);
114}
115
116static void
117nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
118{
119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
120
121 nvkm_kmap(pgt);
122 if (pte & 3) {
123 u32 max = 4 - (pte & 3);
124 u32 part = (cnt > max) ? max : cnt;
125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
126 pte += part;
127 cnt -= part;
128 }
129
130 while (cnt >= 4) {
131 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
132 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
133 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
134 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
135 cnt -= 4;
136 }
137
138 if (cnt)
139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
140 nvkm_done(pgt);
141}
142
143static void
144nv44_vm_flush(struct nvkm_vm *vm)
145{
146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
147 struct nvkm_device *device = mmu->base.subdev.device;
148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
149 nvkm_wr32(device, 0x100808, 0x00000020);
150 nvkm_msec(device, 2000,
151 if (nvkm_rd32(device, 0x100808) & 0x00000001)
152 break;
153 );
154 nvkm_wr32(device, 0x100808, 0x00000000);
155}
156
157
158
159
160
161static int
162nv44_mmu_oneinit(struct nvkm_mmu *base)
163{
164 struct nv04_mmu *mmu = nv04_mmu(base);
165 struct nvkm_device *device = mmu->base.subdev.device;
166 int ret;
167
168 mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
169 &mmu->null, GFP_KERNEL);
170 if (!mmu->nullp) {
171 nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
172 mmu->null = 0;
173 }
174
175 ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
176 &mmu->vm);
177 if (ret)
178 return ret;
179
180 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
181 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
182 512 * 1024, true,
183 &mmu->vm->pgt[0].mem[0]);
184 mmu->vm->pgt[0].refcount[0] = 1;
185 return ret;
186}
187
188static void
189nv44_mmu_init(struct nvkm_mmu *base)
190{
191 struct nv04_mmu *mmu = nv04_mmu(base);
192 struct nvkm_device *device = mmu->base.subdev.device;
193 struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
194 u32 addr;
195
196
197
198
199
200 addr = nvkm_rd32(device, 0x10020c);
201 addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
202
203 nvkm_wr32(device, 0x100850, 0x80000000);
204 nvkm_wr32(device, 0x100818, mmu->null);
205 nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
206 nvkm_wr32(device, 0x100850, 0x00008000);
207 nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
208 nvkm_wr32(device, 0x100820, 0x00000000);
209 nvkm_wr32(device, 0x10082c, 0x00000001);
210 nvkm_wr32(device, 0x100800, addr | 0x00000010);
211}
212
213static const struct nvkm_mmu_func
214nv44_mmu = {
215 .dtor = nv04_mmu_dtor,
216 .oneinit = nv44_mmu_oneinit,
217 .init = nv44_mmu_init,
218 .limit = NV44_GART_SIZE,
219 .dma_bits = 39,
220 .pgt_bits = 32 - 12,
221 .spg_shift = 12,
222 .lpg_shift = 12,
223 .map_sg = nv44_vm_map_sg,
224 .unmap = nv44_vm_unmap,
225 .flush = nv44_vm_flush,
226};
227
228int
229nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
230{
231 if (device->type == NVKM_DEVICE_AGP ||
232 !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
233 return nv04_mmu_new(device, index, pmmu);
234
235 return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
236}
237