1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nouveau_vm.h"
29
30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
32{
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len;
42
43 list_for_each_entry(r, &vram->regions, rl_entry) {
44 u64 phys = (u64)r->offset << 12;
45 u32 num = r->length >> bits;
46
47 while (num) {
48 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
49
50 end = (pte + num);
51 if (unlikely(end >= max))
52 end = max;
53 len = end - pte;
54
55 vm->map(vma, pgt, vram, pte, len, phys);
56
57 num -= len;
58 pte += len;
59 if (unlikely(end >= max)) {
60 pde++;
61 pte = 0;
62 }
63 }
64 }
65
66 vm->flush(vm);
67}
68
69void
70nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
71{
72 nouveau_vm_map_at(vma, 0, vram);
73}
74
75void
76nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
77 dma_addr_t *list)
78{
79 struct nouveau_vm *vm = vma->vm;
80 int big = vma->node->type != vm->spg_shift;
81 u32 offset = vma->node->offset + (delta >> 12);
82 u32 bits = vma->node->type - 12;
83 u32 num = length >> vma->node->type;
84 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
85 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
86 u32 max = 1 << (vm->pgt_bits - bits);
87 u32 end, len;
88
89 while (num) {
90 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
91
92 end = (pte + num);
93 if (unlikely(end >= max))
94 end = max;
95 len = end - pte;
96
97 vm->map_sg(vma, pgt, pte, list, len);
98
99 num -= len;
100 pte += len;
101 list += len;
102 if (unlikely(end >= max)) {
103 pde++;
104 pte = 0;
105 }
106 }
107
108 vm->flush(vm);
109}
110
111void
112nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
113{
114 struct nouveau_vm *vm = vma->vm;
115 int big = vma->node->type != vm->spg_shift;
116 u32 offset = vma->node->offset + (delta >> 12);
117 u32 bits = vma->node->type - 12;
118 u32 num = length >> vma->node->type;
119 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
120 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
121 u32 max = 1 << (vm->pgt_bits - bits);
122 u32 end, len;
123
124 while (num) {
125 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
126
127 end = (pte + num);
128 if (unlikely(end >= max))
129 end = max;
130 len = end - pte;
131
132 vm->unmap(pgt, pte, len);
133
134 num -= len;
135 pte += len;
136 if (unlikely(end >= max)) {
137 pde++;
138 pte = 0;
139 }
140 }
141
142 vm->flush(vm);
143}
144
145void
146nouveau_vm_unmap(struct nouveau_vma *vma)
147{
148 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
149}
150
151static void
152nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
153{
154 struct nouveau_vm_pgd *vpgd;
155 struct nouveau_vm_pgt *vpgt;
156 struct nouveau_gpuobj *pgt;
157 u32 pde;
158
159 for (pde = fpde; pde <= lpde; pde++) {
160 vpgt = &vm->pgt[pde - vm->fpde];
161 if (--vpgt->refcount[big])
162 continue;
163
164 pgt = vpgt->obj[big];
165 vpgt->obj[big] = NULL;
166
167 list_for_each_entry(vpgd, &vm->pgd_list, head) {
168 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
169 }
170
171 mutex_unlock(&vm->mm->mutex);
172 nouveau_gpuobj_ref(NULL, &pgt);
173 mutex_lock(&vm->mm->mutex);
174 }
175}
176
177static int
178nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
179{
180 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
181 struct nouveau_vm_pgd *vpgd;
182 struct nouveau_gpuobj *pgt;
183 int big = (type != vm->spg_shift);
184 u32 pgt_size;
185 int ret;
186
187 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
188 pgt_size *= 8;
189
190 mutex_unlock(&vm->mm->mutex);
191 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
192 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
193 mutex_lock(&vm->mm->mutex);
194 if (unlikely(ret))
195 return ret;
196
197
198 if (unlikely(vpgt->refcount[big]++)) {
199 mutex_unlock(&vm->mm->mutex);
200 nouveau_gpuobj_ref(NULL, &pgt);
201 mutex_lock(&vm->mm->mutex);
202 return 0;
203 }
204
205 vpgt->obj[big] = pgt;
206 list_for_each_entry(vpgd, &vm->pgd_list, head) {
207 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
208 }
209
210 return 0;
211}
212
213int
214nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
215 u32 access, struct nouveau_vma *vma)
216{
217 u32 align = (1 << page_shift) >> 12;
218 u32 msize = size >> 12;
219 u32 fpde, lpde, pde;
220 int ret;
221
222 mutex_lock(&vm->mm->mutex);
223 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
224 if (unlikely(ret != 0)) {
225 mutex_unlock(&vm->mm->mutex);
226 return ret;
227 }
228
229 fpde = (vma->node->offset >> vm->pgt_bits);
230 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
231 for (pde = fpde; pde <= lpde; pde++) {
232 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
233 int big = (vma->node->type != vm->spg_shift);
234
235 if (likely(vpgt->refcount[big])) {
236 vpgt->refcount[big]++;
237 continue;
238 }
239
240 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
241 if (ret) {
242 if (pde != fpde)
243 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
244 nouveau_mm_put(vm->mm, vma->node);
245 mutex_unlock(&vm->mm->mutex);
246 vma->node = NULL;
247 return ret;
248 }
249 }
250 mutex_unlock(&vm->mm->mutex);
251
252 vma->vm = vm;
253 vma->offset = (u64)vma->node->offset << 12;
254 vma->access = access;
255 return 0;
256}
257
258void
259nouveau_vm_put(struct nouveau_vma *vma)
260{
261 struct nouveau_vm *vm = vma->vm;
262 u32 fpde, lpde;
263
264 if (unlikely(vma->node == NULL))
265 return;
266 fpde = (vma->node->offset >> vm->pgt_bits);
267 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
268
269 mutex_lock(&vm->mm->mutex);
270 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
271 nouveau_mm_put(vm->mm, vma->node);
272 vma->node = NULL;
273 mutex_unlock(&vm->mm->mutex);
274}
275
276int
277nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
278 struct nouveau_vm **pvm)
279{
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_vm *vm;
282 u64 mm_length = (offset + length) - mm_offset;
283 u32 block, pgt_bits;
284 int ret;
285
286 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
287 if (!vm)
288 return -ENOMEM;
289
290 if (dev_priv->card_type == NV_50) {
291 vm->map_pgt = nv50_vm_map_pgt;
292 vm->map = nv50_vm_map;
293 vm->map_sg = nv50_vm_map_sg;
294 vm->unmap = nv50_vm_unmap;
295 vm->flush = nv50_vm_flush;
296 vm->spg_shift = 12;
297 vm->lpg_shift = 16;
298
299 pgt_bits = 29;
300 block = (1 << pgt_bits);
301 if (length < block)
302 block = length;
303
304 } else
305 if (dev_priv->card_type == NV_C0) {
306 vm->map_pgt = nvc0_vm_map_pgt;
307 vm->map = nvc0_vm_map;
308 vm->map_sg = nvc0_vm_map_sg;
309 vm->unmap = nvc0_vm_unmap;
310 vm->flush = nvc0_vm_flush;
311 vm->spg_shift = 12;
312 vm->lpg_shift = 17;
313 pgt_bits = 27;
314
315
316
317
318
319 if (length < (1ULL << 40))
320 block = 4096;
321 else {
322 block = (1 << pgt_bits);
323 if (length < block)
324 block = length;
325 }
326 } else {
327 kfree(vm);
328 return -ENOSYS;
329 }
330
331 vm->fpde = offset >> pgt_bits;
332 vm->lpde = (offset + length - 1) >> pgt_bits;
333 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
334 if (!vm->pgt) {
335 kfree(vm);
336 return -ENOMEM;
337 }
338
339 INIT_LIST_HEAD(&vm->pgd_list);
340 vm->dev = dev;
341 vm->refcount = 1;
342 vm->pgt_bits = pgt_bits - 12;
343
344 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
345 block >> 12);
346 if (ret) {
347 kfree(vm);
348 return ret;
349 }
350
351 *pvm = vm;
352 return 0;
353}
354
355static int
356nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
357{
358 struct nouveau_vm_pgd *vpgd;
359 int i;
360
361 if (!pgd)
362 return 0;
363
364 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
365 if (!vpgd)
366 return -ENOMEM;
367
368 nouveau_gpuobj_ref(pgd, &vpgd->obj);
369
370 mutex_lock(&vm->mm->mutex);
371 for (i = vm->fpde; i <= vm->lpde; i++)
372 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
373 list_add(&vpgd->head, &vm->pgd_list);
374 mutex_unlock(&vm->mm->mutex);
375 return 0;
376}
377
378static void
379nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
380{
381 struct nouveau_vm_pgd *vpgd, *tmp;
382
383 if (!pgd)
384 return;
385
386 mutex_lock(&vm->mm->mutex);
387 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
388 if (vpgd->obj != pgd)
389 continue;
390
391 list_del(&vpgd->head);
392 nouveau_gpuobj_ref(NULL, &vpgd->obj);
393 kfree(vpgd);
394 }
395 mutex_unlock(&vm->mm->mutex);
396}
397
398static void
399nouveau_vm_del(struct nouveau_vm *vm)
400{
401 struct nouveau_vm_pgd *vpgd, *tmp;
402
403 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
404 nouveau_vm_unlink(vm, vpgd->obj);
405 }
406 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
407
408 kfree(vm->pgt);
409 kfree(vm);
410}
411
412int
413nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
414 struct nouveau_gpuobj *pgd)
415{
416 struct nouveau_vm *vm;
417 int ret;
418
419 vm = ref;
420 if (vm) {
421 ret = nouveau_vm_link(vm, pgd);
422 if (ret)
423 return ret;
424
425 vm->refcount++;
426 }
427
428 vm = *ptr;
429 *ptr = ref;
430
431 if (vm) {
432 nouveau_vm_unlink(vm, pgd);
433
434 if (--vm->refcount == 0)
435 nouveau_vm_del(vm);
436 }
437
438 return 0;
439}
440