1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37#include "nouveau_ramht.h"
38#include "nouveau_vm.h"
39
40struct nouveau_gpuobj_method {
41 struct list_head head;
42 u32 mthd;
43 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
44};
45
46struct nouveau_gpuobj_class {
47 struct list_head head;
48 struct list_head methods;
49 u32 id;
50 u32 engine;
51};
52
53int
54nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
55{
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 struct nouveau_gpuobj_class *oc;
58
59 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
60 if (!oc)
61 return -ENOMEM;
62
63 INIT_LIST_HEAD(&oc->methods);
64 oc->id = class;
65 oc->engine = engine;
66 list_add(&oc->head, &dev_priv->classes);
67 return 0;
68}
69
70int
71nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
72 int (*exec)(struct nouveau_channel *, u32, u32, u32))
73{
74 struct drm_nouveau_private *dev_priv = dev->dev_private;
75 struct nouveau_gpuobj_method *om;
76 struct nouveau_gpuobj_class *oc;
77
78 list_for_each_entry(oc, &dev_priv->classes, head) {
79 if (oc->id == class)
80 goto found;
81 }
82
83 return -EINVAL;
84
85found:
86 om = kzalloc(sizeof(*om), GFP_KERNEL);
87 if (!om)
88 return -ENOMEM;
89
90 om->mthd = mthd;
91 om->exec = exec;
92 list_add(&om->head, &oc->methods);
93 return 0;
94}
95
96int
97nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
98 u32 class, u32 mthd, u32 data)
99{
100 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
101 struct nouveau_gpuobj_method *om;
102 struct nouveau_gpuobj_class *oc;
103
104 list_for_each_entry(oc, &dev_priv->classes, head) {
105 if (oc->id != class)
106 continue;
107
108 list_for_each_entry(om, &oc->methods, head) {
109 if (om->mthd == mthd)
110 return om->exec(chan, class, mthd, data);
111 }
112 }
113
114 return -ENOENT;
115}
116
117int
118nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
119 u32 class, u32 mthd, u32 data)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_channel *chan = NULL;
123 unsigned long flags;
124 int ret = -EINVAL;
125
126 spin_lock_irqsave(&dev_priv->channels.lock, flags);
127 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
128 chan = dev_priv->channels.ptr[chid];
129 if (chan)
130 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
131 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
132 return ret;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166int
167nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
168 uint32_t size, int align, uint32_t flags,
169 struct nouveau_gpuobj **gpuobj_ret)
170{
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
173 struct nouveau_gpuobj *gpuobj;
174 struct drm_mm_node *ramin = NULL;
175 int ret, i;
176
177 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
178 chan ? chan->id : -1, size, align, flags);
179
180 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
181 if (!gpuobj)
182 return -ENOMEM;
183 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
184 gpuobj->dev = dev;
185 gpuobj->flags = flags;
186 kref_init(&gpuobj->refcount);
187 gpuobj->size = size;
188
189 spin_lock(&dev_priv->ramin_lock);
190 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
191 spin_unlock(&dev_priv->ramin_lock);
192
193 if (chan) {
194 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
195 if (ramin)
196 ramin = drm_mm_get_block(ramin, size, align);
197 if (!ramin) {
198 nouveau_gpuobj_ref(NULL, &gpuobj);
199 return -ENOMEM;
200 }
201
202 gpuobj->pinst = chan->ramin->pinst;
203 if (gpuobj->pinst != ~0)
204 gpuobj->pinst += ramin->start;
205
206 gpuobj->cinst = ramin->start;
207 gpuobj->vinst = ramin->start + chan->ramin->vinst;
208 gpuobj->node = ramin;
209 } else {
210 ret = instmem->get(gpuobj, size, align);
211 if (ret) {
212 nouveau_gpuobj_ref(NULL, &gpuobj);
213 return ret;
214 }
215
216 ret = -ENOSYS;
217 if (!(flags & NVOBJ_FLAG_DONT_MAP))
218 ret = instmem->map(gpuobj);
219 if (ret)
220 gpuobj->pinst = ~0;
221
222 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
223 }
224
225 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
226 for (i = 0; i < gpuobj->size; i += 4)
227 nv_wo32(gpuobj, i, 0);
228 instmem->flush(dev);
229 }
230
231
232 *gpuobj_ret = gpuobj;
233 return 0;
234}
235
236int
237nouveau_gpuobj_init(struct drm_device *dev)
238{
239 struct drm_nouveau_private *dev_priv = dev->dev_private;
240
241 NV_DEBUG(dev, "\n");
242
243 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
244 INIT_LIST_HEAD(&dev_priv->classes);
245 spin_lock_init(&dev_priv->ramin_lock);
246 dev_priv->ramin_base = ~0;
247
248 return 0;
249}
250
251void
252nouveau_gpuobj_takedown(struct drm_device *dev)
253{
254 struct drm_nouveau_private *dev_priv = dev->dev_private;
255 struct nouveau_gpuobj_method *om, *tm;
256 struct nouveau_gpuobj_class *oc, *tc;
257
258 NV_DEBUG(dev, "\n");
259
260 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
261 list_for_each_entry_safe(om, tm, &oc->methods, head) {
262 list_del(&om->head);
263 kfree(om);
264 }
265 list_del(&oc->head);
266 kfree(oc);
267 }
268
269 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
270}
271
272
273static void
274nouveau_gpuobj_del(struct kref *ref)
275{
276 struct nouveau_gpuobj *gpuobj =
277 container_of(ref, struct nouveau_gpuobj, refcount);
278 struct drm_device *dev = gpuobj->dev;
279 struct drm_nouveau_private *dev_priv = dev->dev_private;
280 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
281 int i;
282
283 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
284
285 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
286 for (i = 0; i < gpuobj->size; i += 4)
287 nv_wo32(gpuobj, i, 0);
288 instmem->flush(dev);
289 }
290
291 if (gpuobj->dtor)
292 gpuobj->dtor(dev, gpuobj);
293
294 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
295 if (gpuobj->node) {
296 instmem->unmap(gpuobj);
297 instmem->put(gpuobj);
298 }
299 } else {
300 if (gpuobj->node) {
301 spin_lock(&dev_priv->ramin_lock);
302 drm_mm_put_block(gpuobj->node);
303 spin_unlock(&dev_priv->ramin_lock);
304 }
305 }
306
307 spin_lock(&dev_priv->ramin_lock);
308 list_del(&gpuobj->list);
309 spin_unlock(&dev_priv->ramin_lock);
310
311 kfree(gpuobj);
312}
313
314void
315nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
316{
317 if (ref)
318 kref_get(&ref->refcount);
319
320 if (*ptr)
321 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
322
323 *ptr = ref;
324}
325
326int
327nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
328 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
329{
330 struct drm_nouveau_private *dev_priv = dev->dev_private;
331 struct nouveau_gpuobj *gpuobj = NULL;
332 int i;
333
334 NV_DEBUG(dev,
335 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
336 pinst, vinst, size, flags);
337
338 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
339 if (!gpuobj)
340 return -ENOMEM;
341 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
342 gpuobj->dev = dev;
343 gpuobj->flags = flags;
344 kref_init(&gpuobj->refcount);
345 gpuobj->size = size;
346 gpuobj->pinst = pinst;
347 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
348 gpuobj->vinst = vinst;
349
350 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
351 for (i = 0; i < gpuobj->size; i += 4)
352 nv_wo32(gpuobj, i, 0);
353 dev_priv->engine.instmem.flush(dev);
354 }
355
356 spin_lock(&dev_priv->ramin_lock);
357 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
358 spin_unlock(&dev_priv->ramin_lock);
359 *pgpuobj = gpuobj;
360 return 0;
361}
362
363
364static uint32_t
365nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
366{
367 struct drm_nouveau_private *dev_priv = dev->dev_private;
368
369
370 if (dev_priv->card_type >= NV_50)
371 return 24;
372 if (dev_priv->card_type >= NV_40)
373 return 32;
374 return 16;
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405void
406nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
407 u64 base, u64 size, int target, int access,
408 u32 type, u32 comp)
409{
410 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
411 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
412 u32 flags0;
413
414 flags0 = (comp << 29) | (type << 22) | class;
415 flags0 |= 0x00100000;
416
417 switch (access) {
418 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
419 case NV_MEM_ACCESS_RW:
420 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
421 default:
422 break;
423 }
424
425 switch (target) {
426 case NV_MEM_TARGET_VRAM:
427 flags0 |= 0x00010000;
428 break;
429 case NV_MEM_TARGET_PCI:
430 flags0 |= 0x00020000;
431 break;
432 case NV_MEM_TARGET_PCI_NOSNOOP:
433 flags0 |= 0x00030000;
434 break;
435 case NV_MEM_TARGET_GART:
436 base += dev_priv->gart_info.aper_base;
437 default:
438 flags0 &= ~0x00100000;
439 break;
440 }
441
442
443 size = (base + size) - 1;
444
445 nv_wo32(obj, offset + 0x00, flags0);
446 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
447 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
448 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
449 upper_32_bits(base));
450 nv_wo32(obj, offset + 0x10, 0x00000000);
451 nv_wo32(obj, offset + 0x14, 0x00000000);
452
453 pinstmem->flush(obj->dev);
454}
455
456int
457nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
458 int target, int access, u32 type, u32 comp,
459 struct nouveau_gpuobj **pobj)
460{
461 struct drm_device *dev = chan->dev;
462 int ret;
463
464 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
465 if (ret)
466 return ret;
467
468 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
469 access, type, comp);
470 return 0;
471}
472
473int
474nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
475 u64 size, int access, int target,
476 struct nouveau_gpuobj **pobj)
477{
478 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
479 struct drm_device *dev = chan->dev;
480 struct nouveau_gpuobj *obj;
481 u32 flags0, flags2;
482 int ret;
483
484 if (dev_priv->card_type >= NV_50) {
485 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
486 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
487
488 return nv50_gpuobj_dma_new(chan, class, base, size,
489 target, access, type, comp, pobj);
490 }
491
492 if (target == NV_MEM_TARGET_GART) {
493 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
494 target = NV_MEM_TARGET_PCI_NOSNOOP;
495 base += dev_priv->gart_info.aper_base;
496 } else
497 if (base != 0) {
498 base = nouveau_sgdma_get_physical(dev, base);
499 target = NV_MEM_TARGET_PCI;
500 } else {
501 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
502 return 0;
503 }
504 }
505
506 flags0 = class;
507 flags0 |= 0x00003000;
508 flags2 = 0;
509
510 switch (target) {
511 case NV_MEM_TARGET_PCI:
512 flags0 |= 0x00020000;
513 break;
514 case NV_MEM_TARGET_PCI_NOSNOOP:
515 flags0 |= 0x00030000;
516 break;
517 default:
518 break;
519 }
520
521 switch (access) {
522 case NV_MEM_ACCESS_RO:
523 flags0 |= 0x00004000;
524 break;
525 case NV_MEM_ACCESS_WO:
526 flags0 |= 0x00008000;
527 default:
528 flags2 |= 0x00000002;
529 break;
530 }
531
532 flags0 |= (base & 0x00000fff) << 20;
533 flags2 |= (base & 0xfffff000);
534
535 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
536 if (ret)
537 return ret;
538
539 nv_wo32(obj, 0x00, flags0);
540 nv_wo32(obj, 0x04, size - 1);
541 nv_wo32(obj, 0x08, flags2);
542 nv_wo32(obj, 0x0c, flags2);
543
544 obj->engine = NVOBJ_ENGINE_SW;
545 obj->class = class;
546 *pobj = obj;
547 return 0;
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601static int
602nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
603 struct nouveau_gpuobj **gpuobj_ret)
604{
605 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
606 struct nouveau_gpuobj *gpuobj;
607
608 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
609 if (!gpuobj)
610 return -ENOMEM;
611 gpuobj->dev = chan->dev;
612 gpuobj->engine = NVOBJ_ENGINE_SW;
613 gpuobj->class = class;
614 kref_init(&gpuobj->refcount);
615 gpuobj->cinst = 0x40;
616
617 spin_lock(&dev_priv->ramin_lock);
618 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
619 spin_unlock(&dev_priv->ramin_lock);
620 *gpuobj_ret = gpuobj;
621 return 0;
622}
623
624int
625nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
626{
627 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
628 struct drm_device *dev = chan->dev;
629 struct nouveau_gpuobj_class *oc;
630 struct nouveau_gpuobj *gpuobj;
631 int ret;
632
633 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
634
635 list_for_each_entry(oc, &dev_priv->classes, head) {
636 if (oc->id == class)
637 goto found;
638 }
639
640 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
641 return -EINVAL;
642
643found:
644 switch (oc->engine) {
645 case NVOBJ_ENGINE_SW:
646 if (dev_priv->card_type < NV_C0) {
647 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
648 if (ret)
649 return ret;
650 goto insert;
651 }
652 break;
653 case NVOBJ_ENGINE_GR:
654 if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
655 (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
656 struct nouveau_pgraph_engine *pgraph =
657 &dev_priv->engine.graph;
658
659 ret = pgraph->create_context(chan);
660 if (ret)
661 return ret;
662 }
663 break;
664 case NVOBJ_ENGINE_CRYPT:
665 if (!chan->crypt_ctx) {
666 struct nouveau_crypt_engine *pcrypt =
667 &dev_priv->engine.crypt;
668
669 ret = pcrypt->create_context(chan);
670 if (ret)
671 return ret;
672 }
673 break;
674 }
675
676
677 if (dev_priv->card_type >= NV_C0)
678 return 0;
679
680 ret = nouveau_gpuobj_new(dev, chan,
681 nouveau_gpuobj_class_instmem_size(dev, class),
682 16,
683 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
684 &gpuobj);
685 if (ret) {
686 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
687 return ret;
688 }
689
690 if (dev_priv->card_type >= NV_50) {
691 nv_wo32(gpuobj, 0, class);
692 nv_wo32(gpuobj, 20, 0x00010000);
693 } else {
694 switch (class) {
695 case NV_CLASS_NULL:
696 nv_wo32(gpuobj, 0, 0x00001030);
697 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
698 break;
699 default:
700 if (dev_priv->card_type >= NV_40) {
701 nv_wo32(gpuobj, 0, class);
702#ifdef __BIG_ENDIAN
703 nv_wo32(gpuobj, 8, 0x01000000);
704#endif
705 } else {
706#ifdef __BIG_ENDIAN
707 nv_wo32(gpuobj, 0, class | 0x00080000);
708#else
709 nv_wo32(gpuobj, 0, class);
710#endif
711 }
712 }
713 }
714 dev_priv->engine.instmem.flush(dev);
715
716 gpuobj->engine = oc->engine;
717 gpuobj->class = oc->id;
718
719insert:
720 ret = nouveau_ramht_insert(chan, handle, gpuobj);
721 if (ret)
722 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
723 nouveau_gpuobj_ref(NULL, &gpuobj);
724 return ret;
725}
726
727static int
728nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
729{
730 struct drm_device *dev = chan->dev;
731 struct drm_nouveau_private *dev_priv = dev->dev_private;
732 uint32_t size;
733 uint32_t base;
734 int ret;
735
736 NV_DEBUG(dev, "ch%d\n", chan->id);
737
738
739 size = 0x2000;
740 base = 0;
741
742
743 size += dev_priv->engine.graph.grctx_size;
744
745 if (dev_priv->card_type == NV_50) {
746
747 size += 0x1400;
748 size += 0x4000;
749 base = 0x6000;
750
751 size += 0x8000;
752
753 size += 0x1000;
754 }
755
756 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
757 if (ret) {
758 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
759 return ret;
760 }
761
762 ret = drm_mm_init(&chan->ramin_heap, base, size);
763 if (ret) {
764 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
765 nouveau_gpuobj_ref(NULL, &chan->ramin);
766 return ret;
767 }
768
769 return 0;
770}
771
772int
773nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
774 uint32_t vram_h, uint32_t tt_h)
775{
776 struct drm_device *dev = chan->dev;
777 struct drm_nouveau_private *dev_priv = dev->dev_private;
778 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
779 int ret;
780
781 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
782
783 if (dev_priv->card_type == NV_C0) {
784 struct nouveau_vm *vm = dev_priv->chan_vm;
785 struct nouveau_vm_pgd *vpgd;
786
787 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
788 &chan->ramin);
789 if (ret)
790 return ret;
791
792 nouveau_vm_ref(vm, &chan->vm, NULL);
793
794 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
795 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
796 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
797 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
798 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
799 return 0;
800 }
801
802
803 ret = nouveau_gpuobj_channel_init_pramin(chan);
804 if (ret) {
805 NV_ERROR(dev, "init pramin\n");
806 return ret;
807 }
808
809
810
811
812
813 if (dev_priv->chan_vm) {
814 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
815 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
816 u32 vm_pinst = chan->ramin->pinst;
817
818 if (vm_pinst != ~0)
819 vm_pinst += pgd_offs;
820
821 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
822 0, &chan->vm_pd);
823 if (ret)
824 return ret;
825
826 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
827 }
828
829
830 if (dev_priv->card_type < NV_50) {
831 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
832 } else {
833 struct nouveau_gpuobj *ramht = NULL;
834
835 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
836 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
837 if (ret)
838 return ret;
839
840 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
841 nouveau_gpuobj_ref(NULL, &ramht);
842 if (ret)
843 return ret;
844 }
845
846
847 if (dev_priv->card_type >= NV_50) {
848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
849 0, (1ULL << 40), NV_MEM_ACCESS_RW,
850 NV_MEM_TARGET_VM, &vram);
851 if (ret) {
852 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
853 return ret;
854 }
855 } else {
856 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
857 0, dev_priv->fb_available_size,
858 NV_MEM_ACCESS_RW,
859 NV_MEM_TARGET_VRAM, &vram);
860 if (ret) {
861 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
862 return ret;
863 }
864 }
865
866 ret = nouveau_ramht_insert(chan, vram_h, vram);
867 nouveau_gpuobj_ref(NULL, &vram);
868 if (ret) {
869 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
870 return ret;
871 }
872
873
874 if (dev_priv->card_type >= NV_50) {
875 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
876 0, (1ULL << 40), NV_MEM_ACCESS_RW,
877 NV_MEM_TARGET_VM, &tt);
878 } else {
879 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
880 0, dev_priv->gart_info.aper_size,
881 NV_MEM_ACCESS_RW,
882 NV_MEM_TARGET_GART, &tt);
883 }
884
885 if (ret) {
886 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
887 return ret;
888 }
889
890 ret = nouveau_ramht_insert(chan, tt_h, tt);
891 nouveau_gpuobj_ref(NULL, &tt);
892 if (ret) {
893 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
894 return ret;
895 }
896
897 return 0;
898}
899
900void
901nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
902{
903 struct drm_device *dev = chan->dev;
904
905 NV_DEBUG(dev, "ch%d\n", chan->id);
906
907 nouveau_ramht_ref(NULL, &chan->ramht, chan);
908
909 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
910 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
911
912 if (chan->ramin_heap.free_stack.next)
913 drm_mm_takedown(&chan->ramin_heap);
914 nouveau_gpuobj_ref(NULL, &chan->ramin);
915}
916
917int
918nouveau_gpuobj_suspend(struct drm_device *dev)
919{
920 struct drm_nouveau_private *dev_priv = dev->dev_private;
921 struct nouveau_gpuobj *gpuobj;
922 int i;
923
924 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
925 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
926 continue;
927
928 gpuobj->suspend = vmalloc(gpuobj->size);
929 if (!gpuobj->suspend) {
930 nouveau_gpuobj_resume(dev);
931 return -ENOMEM;
932 }
933
934 for (i = 0; i < gpuobj->size; i += 4)
935 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
936 }
937
938 return 0;
939}
940
941void
942nouveau_gpuobj_resume(struct drm_device *dev)
943{
944 struct drm_nouveau_private *dev_priv = dev->dev_private;
945 struct nouveau_gpuobj *gpuobj;
946 int i;
947
948 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
949 if (!gpuobj->suspend)
950 continue;
951
952 for (i = 0; i < gpuobj->size; i += 4)
953 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
954
955 vfree(gpuobj->suspend);
956 gpuobj->suspend = NULL;
957 }
958
959 dev_priv->engine.instmem.flush(dev);
960}
961
962int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
963 struct drm_file *file_priv)
964{
965 struct drm_nouveau_grobj_alloc *init = data;
966 struct nouveau_channel *chan;
967 int ret;
968
969 if (init->handle == ~0)
970 return -EINVAL;
971
972 chan = nouveau_channel_get(dev, file_priv, init->channel);
973 if (IS_ERR(chan))
974 return PTR_ERR(chan);
975
976 if (nouveau_ramht_find(chan, init->handle)) {
977 ret = -EEXIST;
978 goto out;
979 }
980
981 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
982 if (ret) {
983 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
984 ret, init->channel, init->handle);
985 }
986
987out:
988 nouveau_channel_put(&chan);
989 return ret;
990}
991
992int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
993 struct drm_file *file_priv)
994{
995 struct drm_nouveau_gpuobj_free *objfree = data;
996 struct nouveau_channel *chan;
997 int ret;
998
999 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1000 if (IS_ERR(chan))
1001 return PTR_ERR(chan);
1002
1003
1004 nouveau_channel_idle(chan);
1005
1006 ret = nouveau_ramht_remove(chan, objfree->handle);
1007 nouveau_channel_put(&chan);
1008 return ret;
1009}
1010
1011u32
1012nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1013{
1014 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1015 struct drm_device *dev = gpuobj->dev;
1016
1017 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1018 u64 ptr = gpuobj->vinst + offset;
1019 u32 base = ptr >> 16;
1020 u32 val;
1021
1022 spin_lock(&dev_priv->ramin_lock);
1023 if (dev_priv->ramin_base != base) {
1024 dev_priv->ramin_base = base;
1025 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1026 }
1027 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1028 spin_unlock(&dev_priv->ramin_lock);
1029 return val;
1030 }
1031
1032 return nv_ri32(dev, gpuobj->pinst + offset);
1033}
1034
1035void
1036nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1037{
1038 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1039 struct drm_device *dev = gpuobj->dev;
1040
1041 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1042 u64 ptr = gpuobj->vinst + offset;
1043 u32 base = ptr >> 16;
1044
1045 spin_lock(&dev_priv->ramin_lock);
1046 if (dev_priv->ramin_base != base) {
1047 dev_priv->ramin_base = base;
1048 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1049 }
1050 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1051 spin_unlock(&dev_priv->ramin_lock);
1052 return;
1053 }
1054
1055 nv_wi32(dev, gpuobj->pinst + offset, val);
1056}
1057