1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37#include "nouveau_ramht.h"
38#include "nouveau_vm.h"
39#include "nv50_display.h"
40
41struct nouveau_gpuobj_method {
42 struct list_head head;
43 u32 mthd;
44 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
45};
46
47struct nouveau_gpuobj_class {
48 struct list_head head;
49 struct list_head methods;
50 u32 id;
51 u32 engine;
52};
53
54int
55nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
56{
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_gpuobj_class *oc;
59
60 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
61 if (!oc)
62 return -ENOMEM;
63
64 INIT_LIST_HEAD(&oc->methods);
65 oc->id = class;
66 oc->engine = engine;
67 list_add(&oc->head, &dev_priv->classes);
68 return 0;
69}
70
71int
72nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
73 int (*exec)(struct nouveau_channel *, u32, u32, u32))
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_gpuobj_method *om;
77 struct nouveau_gpuobj_class *oc;
78
79 list_for_each_entry(oc, &dev_priv->classes, head) {
80 if (oc->id == class)
81 goto found;
82 }
83
84 return -EINVAL;
85
86found:
87 om = kzalloc(sizeof(*om), GFP_KERNEL);
88 if (!om)
89 return -ENOMEM;
90
91 om->mthd = mthd;
92 om->exec = exec;
93 list_add(&om->head, &oc->methods);
94 return 0;
95}
96
97int
98nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
99 u32 class, u32 mthd, u32 data)
100{
101 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
102 struct nouveau_gpuobj_method *om;
103 struct nouveau_gpuobj_class *oc;
104
105 list_for_each_entry(oc, &dev_priv->classes, head) {
106 if (oc->id != class)
107 continue;
108
109 list_for_each_entry(om, &oc->methods, head) {
110 if (om->mthd == mthd)
111 return om->exec(chan, class, mthd, data);
112 }
113 }
114
115 return -ENOENT;
116}
117
118int
119nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_channel *chan = NULL;
124 unsigned long flags;
125 int ret = -EINVAL;
126
127 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
129 chan = dev_priv->channels.ptr[chid];
130 if (chan)
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
132 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
133 return ret;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167int
168nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
169 uint32_t size, int align, uint32_t flags,
170 struct nouveau_gpuobj **gpuobj_ret)
171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
174 struct nouveau_gpuobj *gpuobj;
175 struct drm_mm_node *ramin = NULL;
176 int ret, i;
177
178 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
179 chan ? chan->id : -1, size, align, flags);
180
181 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
182 if (!gpuobj)
183 return -ENOMEM;
184 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
185 gpuobj->dev = dev;
186 gpuobj->flags = flags;
187 kref_init(&gpuobj->refcount);
188 gpuobj->size = size;
189
190 spin_lock(&dev_priv->ramin_lock);
191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
192 spin_unlock(&dev_priv->ramin_lock);
193
194 if (!(flags & NVOBJ_FLAG_VM) && chan) {
195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
196 if (ramin)
197 ramin = drm_mm_get_block(ramin, size, align);
198 if (!ramin) {
199 nouveau_gpuobj_ref(NULL, &gpuobj);
200 return -ENOMEM;
201 }
202
203 gpuobj->pinst = chan->ramin->pinst;
204 if (gpuobj->pinst != ~0)
205 gpuobj->pinst += ramin->start;
206
207 gpuobj->cinst = ramin->start;
208 gpuobj->vinst = ramin->start + chan->ramin->vinst;
209 gpuobj->node = ramin;
210 } else {
211 ret = instmem->get(gpuobj, chan, size, align);
212 if (ret) {
213 nouveau_gpuobj_ref(NULL, &gpuobj);
214 return ret;
215 }
216
217 ret = -ENOSYS;
218 if (!(flags & NVOBJ_FLAG_DONT_MAP))
219 ret = instmem->map(gpuobj);
220 if (ret)
221 gpuobj->pinst = ~0;
222
223 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
224 }
225
226 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
227 for (i = 0; i < gpuobj->size; i += 4)
228 nv_wo32(gpuobj, i, 0);
229 instmem->flush(dev);
230 }
231
232
233 *gpuobj_ret = gpuobj;
234 return 0;
235}
236
237int
238nouveau_gpuobj_init(struct drm_device *dev)
239{
240 struct drm_nouveau_private *dev_priv = dev->dev_private;
241
242 NV_DEBUG(dev, "\n");
243
244 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
245 INIT_LIST_HEAD(&dev_priv->classes);
246 spin_lock_init(&dev_priv->ramin_lock);
247 dev_priv->ramin_base = ~0;
248
249 return 0;
250}
251
252void
253nouveau_gpuobj_takedown(struct drm_device *dev)
254{
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_gpuobj_method *om, *tm;
257 struct nouveau_gpuobj_class *oc, *tc;
258
259 NV_DEBUG(dev, "\n");
260
261 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
262 list_for_each_entry_safe(om, tm, &oc->methods, head) {
263 list_del(&om->head);
264 kfree(om);
265 }
266 list_del(&oc->head);
267 kfree(oc);
268 }
269
270 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
271}
272
273
274static void
275nouveau_gpuobj_del(struct kref *ref)
276{
277 struct nouveau_gpuobj *gpuobj =
278 container_of(ref, struct nouveau_gpuobj, refcount);
279 struct drm_device *dev = gpuobj->dev;
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
282 int i;
283
284 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
285
286 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
287 for (i = 0; i < gpuobj->size; i += 4)
288 nv_wo32(gpuobj, i, 0);
289 instmem->flush(dev);
290 }
291
292 if (gpuobj->dtor)
293 gpuobj->dtor(dev, gpuobj);
294
295 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
296 if (gpuobj->node) {
297 instmem->unmap(gpuobj);
298 instmem->put(gpuobj);
299 }
300 } else {
301 if (gpuobj->node) {
302 spin_lock(&dev_priv->ramin_lock);
303 drm_mm_put_block(gpuobj->node);
304 spin_unlock(&dev_priv->ramin_lock);
305 }
306 }
307
308 spin_lock(&dev_priv->ramin_lock);
309 list_del(&gpuobj->list);
310 spin_unlock(&dev_priv->ramin_lock);
311
312 kfree(gpuobj);
313}
314
315void
316nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
317{
318 if (ref)
319 kref_get(&ref->refcount);
320
321 if (*ptr)
322 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
323
324 *ptr = ref;
325}
326
327int
328nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
329 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
330{
331 struct drm_nouveau_private *dev_priv = dev->dev_private;
332 struct nouveau_gpuobj *gpuobj = NULL;
333 int i;
334
335 NV_DEBUG(dev,
336 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
337 pinst, vinst, size, flags);
338
339 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
340 if (!gpuobj)
341 return -ENOMEM;
342 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
343 gpuobj->dev = dev;
344 gpuobj->flags = flags;
345 kref_init(&gpuobj->refcount);
346 gpuobj->size = size;
347 gpuobj->pinst = pinst;
348 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
349 gpuobj->vinst = vinst;
350
351 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
352 for (i = 0; i < gpuobj->size; i += 4)
353 nv_wo32(gpuobj, i, 0);
354 dev_priv->engine.instmem.flush(dev);
355 }
356
357 spin_lock(&dev_priv->ramin_lock);
358 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
359 spin_unlock(&dev_priv->ramin_lock);
360 *pgpuobj = gpuobj;
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392void
393nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
394 u64 base, u64 size, int target, int access,
395 u32 type, u32 comp)
396{
397 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
398 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
399 u32 flags0;
400
401 flags0 = (comp << 29) | (type << 22) | class;
402 flags0 |= 0x00100000;
403
404 switch (access) {
405 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
406 case NV_MEM_ACCESS_RW:
407 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
408 default:
409 break;
410 }
411
412 switch (target) {
413 case NV_MEM_TARGET_VRAM:
414 flags0 |= 0x00010000;
415 break;
416 case NV_MEM_TARGET_PCI:
417 flags0 |= 0x00020000;
418 break;
419 case NV_MEM_TARGET_PCI_NOSNOOP:
420 flags0 |= 0x00030000;
421 break;
422 case NV_MEM_TARGET_GART:
423 base += dev_priv->gart_info.aper_base;
424 default:
425 flags0 &= ~0x00100000;
426 break;
427 }
428
429
430 size = (base + size) - 1;
431
432 nv_wo32(obj, offset + 0x00, flags0);
433 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
434 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
435 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
436 upper_32_bits(base));
437 nv_wo32(obj, offset + 0x10, 0x00000000);
438 nv_wo32(obj, offset + 0x14, 0x00000000);
439
440 pinstmem->flush(obj->dev);
441}
442
443int
444nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
445 int target, int access, u32 type, u32 comp,
446 struct nouveau_gpuobj **pobj)
447{
448 struct drm_device *dev = chan->dev;
449 int ret;
450
451 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
452 if (ret)
453 return ret;
454
455 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
456 access, type, comp);
457 return 0;
458}
459
460int
461nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
462 u64 size, int access, int target,
463 struct nouveau_gpuobj **pobj)
464{
465 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
466 struct drm_device *dev = chan->dev;
467 struct nouveau_gpuobj *obj;
468 u32 flags0, flags2;
469 int ret;
470
471 if (dev_priv->card_type >= NV_50) {
472 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
473 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
474
475 return nv50_gpuobj_dma_new(chan, class, base, size,
476 target, access, type, comp, pobj);
477 }
478
479 if (target == NV_MEM_TARGET_GART) {
480 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
481
482 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
483 if (base == 0) {
484 nouveau_gpuobj_ref(gart, pobj);
485 return 0;
486 }
487
488 base = nouveau_sgdma_get_physical(dev, base);
489 target = NV_MEM_TARGET_PCI;
490 } else {
491 base += dev_priv->gart_info.aper_base;
492 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
493 target = NV_MEM_TARGET_PCI_NOSNOOP;
494 else
495 target = NV_MEM_TARGET_PCI;
496 }
497 }
498
499 flags0 = class;
500 flags0 |= 0x00003000;
501 flags2 = 0;
502
503 switch (target) {
504 case NV_MEM_TARGET_PCI:
505 flags0 |= 0x00020000;
506 break;
507 case NV_MEM_TARGET_PCI_NOSNOOP:
508 flags0 |= 0x00030000;
509 break;
510 default:
511 break;
512 }
513
514 switch (access) {
515 case NV_MEM_ACCESS_RO:
516 flags0 |= 0x00004000;
517 break;
518 case NV_MEM_ACCESS_WO:
519 flags0 |= 0x00008000;
520 default:
521 flags2 |= 0x00000002;
522 break;
523 }
524
525 flags0 |= (base & 0x00000fff) << 20;
526 flags2 |= (base & 0xfffff000);
527
528 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
529 if (ret)
530 return ret;
531
532 nv_wo32(obj, 0x00, flags0);
533 nv_wo32(obj, 0x04, size - 1);
534 nv_wo32(obj, 0x08, flags2);
535 nv_wo32(obj, 0x0c, flags2);
536
537 obj->engine = NVOBJ_ENGINE_SW;
538 obj->class = class;
539 *pobj = obj;
540 return 0;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594static int
595nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
596{
597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
598 struct nouveau_gpuobj *gpuobj;
599 int ret;
600
601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
602 if (!gpuobj)
603 return -ENOMEM;
604 gpuobj->dev = chan->dev;
605 gpuobj->engine = NVOBJ_ENGINE_SW;
606 gpuobj->class = class;
607 kref_init(&gpuobj->refcount);
608 gpuobj->cinst = 0x40;
609
610 spin_lock(&dev_priv->ramin_lock);
611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
612 spin_unlock(&dev_priv->ramin_lock);
613
614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
616 return ret;
617}
618
619int
620nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
621{
622 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
623 struct drm_device *dev = chan->dev;
624 struct nouveau_gpuobj_class *oc;
625 int ret;
626
627 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
628
629 list_for_each_entry(oc, &dev_priv->classes, head) {
630 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
631
632 if (oc->id != class)
633 continue;
634
635 if (oc->engine == NVOBJ_ENGINE_SW)
636 return nouveau_gpuobj_sw_new(chan, handle, class);
637
638 if (!chan->engctx[oc->engine]) {
639 ret = eng->context_new(chan, oc->engine);
640 if (ret)
641 return ret;
642 }
643
644 return eng->object_new(chan, oc->engine, handle, class);
645 }
646
647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
648 return -EINVAL;
649}
650
651static int
652nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
653{
654 struct drm_device *dev = chan->dev;
655 struct drm_nouveau_private *dev_priv = dev->dev_private;
656 uint32_t size;
657 uint32_t base;
658 int ret;
659
660 NV_DEBUG(dev, "ch%d\n", chan->id);
661
662
663 size = 0x2000;
664 base = 0;
665
666 if (dev_priv->card_type == NV_50) {
667
668 size += 0x1400;
669 size += 0x4000;
670 base = 0x6000;
671
672 size += 0x8000;
673
674 size += 0x1000;
675 }
676
677 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
678 if (ret) {
679 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
680 return ret;
681 }
682
683 ret = drm_mm_init(&chan->ramin_heap, base, size);
684 if (ret) {
685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
686 nouveau_gpuobj_ref(NULL, &chan->ramin);
687 return ret;
688 }
689
690 return 0;
691}
692
693static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{
696 struct drm_device *dev = chan->dev;
697 struct nouveau_gpuobj *pgd = NULL;
698 struct nouveau_vm_pgd *vpgd;
699 int ret, i;
700
701 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
702 if (ret)
703 return ret;
704
705
706
707
708
709 if (list_empty(&vm->pgd_list)) {
710 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
711 if (ret)
712 return ret;
713 }
714 nouveau_vm_ref(vm, &chan->vm, pgd);
715 nouveau_gpuobj_ref(NULL, &pgd);
716
717
718 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
719 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
720 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
721 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
722 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
723
724
725 for (i = 0; i < 2; i++) {
726 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
727
728 ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
729 &chan->dispc_vma[i]);
730 if (ret)
731 return ret;
732 }
733
734 return 0;
735}
736
737int
738nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
739 uint32_t vram_h, uint32_t tt_h)
740{
741 struct drm_device *dev = chan->dev;
742 struct drm_nouveau_private *dev_priv = dev->dev_private;
743 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
744 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
745 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
746 int ret, i;
747
748 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
749 if (dev_priv->card_type == NV_C0)
750 return nvc0_gpuobj_channel_init(chan, vm);
751
752
753 ret = nouveau_gpuobj_channel_init_pramin(chan);
754 if (ret) {
755 NV_ERROR(dev, "init pramin\n");
756 return ret;
757 }
758
759
760
761
762
763 if (vm) {
764 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
765 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
766 u32 vm_pinst = chan->ramin->pinst;
767
768 if (vm_pinst != ~0)
769 vm_pinst += pgd_offs;
770
771 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
772 0, &chan->vm_pd);
773 if (ret)
774 return ret;
775
776 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
777 }
778
779
780 if (dev_priv->card_type < NV_50) {
781 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
782 } else {
783 struct nouveau_gpuobj *ramht = NULL;
784
785 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
786 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
787 if (ret)
788 return ret;
789
790 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
791 nouveau_gpuobj_ref(NULL, &ramht);
792 if (ret)
793 return ret;
794
795
796 for (i = 0; i < 2; i++) {
797 struct nouveau_gpuobj *sem = NULL;
798 struct nv50_display_crtc *dispc =
799 &nv50_display(dev)->crtc[i];
800 u64 offset = dispc->sem.bo->bo.offset;
801
802 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
803 NV_MEM_ACCESS_RW,
804 NV_MEM_TARGET_VRAM, &sem);
805 if (ret)
806 return ret;
807
808 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
809 nouveau_gpuobj_ref(NULL, &sem);
810 if (ret)
811 return ret;
812 }
813 }
814
815
816 if (dev_priv->card_type >= NV_50) {
817 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
818 0, (1ULL << 40), NV_MEM_ACCESS_RW,
819 NV_MEM_TARGET_VM, &vram);
820 if (ret) {
821 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
822 return ret;
823 }
824 } else {
825 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
826 0, dev_priv->fb_available_size,
827 NV_MEM_ACCESS_RW,
828 NV_MEM_TARGET_VRAM, &vram);
829 if (ret) {
830 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
831 return ret;
832 }
833 }
834
835 ret = nouveau_ramht_insert(chan, vram_h, vram);
836 nouveau_gpuobj_ref(NULL, &vram);
837 if (ret) {
838 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
839 return ret;
840 }
841
842
843 if (dev_priv->card_type >= NV_50) {
844 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
845 0, (1ULL << 40), NV_MEM_ACCESS_RW,
846 NV_MEM_TARGET_VM, &tt);
847 } else {
848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
849 0, dev_priv->gart_info.aper_size,
850 NV_MEM_ACCESS_RW,
851 NV_MEM_TARGET_GART, &tt);
852 }
853
854 if (ret) {
855 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
856 return ret;
857 }
858
859 ret = nouveau_ramht_insert(chan, tt_h, tt);
860 nouveau_gpuobj_ref(NULL, &tt);
861 if (ret) {
862 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
863 return ret;
864 }
865
866 return 0;
867}
868
869void
870nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
871{
872 struct drm_device *dev = chan->dev;
873 struct drm_nouveau_private *dev_priv = dev->dev_private;
874 int i;
875
876 NV_DEBUG(dev, "ch%d\n", chan->id);
877
878 if (dev_priv->card_type >= NV_50) {
879 struct nv50_display *disp = nv50_display(dev);
880
881 for (i = 0; i < 2; i++) {
882 struct nv50_display_crtc *dispc = &disp->crtc[i];
883 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
884 }
885
886 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
887 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
888 }
889
890 if (drm_mm_initialized(&chan->ramin_heap))
891 drm_mm_takedown(&chan->ramin_heap);
892 nouveau_gpuobj_ref(NULL, &chan->ramin);
893}
894
895int
896nouveau_gpuobj_suspend(struct drm_device *dev)
897{
898 struct drm_nouveau_private *dev_priv = dev->dev_private;
899 struct nouveau_gpuobj *gpuobj;
900 int i;
901
902 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
903 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
904 continue;
905
906 gpuobj->suspend = vmalloc(gpuobj->size);
907 if (!gpuobj->suspend) {
908 nouveau_gpuobj_resume(dev);
909 return -ENOMEM;
910 }
911
912 for (i = 0; i < gpuobj->size; i += 4)
913 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
914 }
915
916 return 0;
917}
918
919void
920nouveau_gpuobj_resume(struct drm_device *dev)
921{
922 struct drm_nouveau_private *dev_priv = dev->dev_private;
923 struct nouveau_gpuobj *gpuobj;
924 int i;
925
926 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
927 if (!gpuobj->suspend)
928 continue;
929
930 for (i = 0; i < gpuobj->size; i += 4)
931 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
932
933 vfree(gpuobj->suspend);
934 gpuobj->suspend = NULL;
935 }
936
937 dev_priv->engine.instmem.flush(dev);
938}
939
940int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
941 struct drm_file *file_priv)
942{
943 struct drm_nouveau_grobj_alloc *init = data;
944 struct nouveau_channel *chan;
945 int ret;
946
947 if (init->handle == ~0)
948 return -EINVAL;
949
950 chan = nouveau_channel_get(file_priv, init->channel);
951 if (IS_ERR(chan))
952 return PTR_ERR(chan);
953
954 if (nouveau_ramht_find(chan, init->handle)) {
955 ret = -EEXIST;
956 goto out;
957 }
958
959 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
960 if (ret) {
961 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
962 ret, init->channel, init->handle);
963 }
964
965out:
966 nouveau_channel_put(&chan);
967 return ret;
968}
969
970int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
971 struct drm_file *file_priv)
972{
973 struct drm_nouveau_gpuobj_free *objfree = data;
974 struct nouveau_channel *chan;
975 int ret;
976
977 chan = nouveau_channel_get(file_priv, objfree->channel);
978 if (IS_ERR(chan))
979 return PTR_ERR(chan);
980
981
982 nouveau_channel_idle(chan);
983
984 ret = nouveau_ramht_remove(chan, objfree->handle);
985 nouveau_channel_put(&chan);
986 return ret;
987}
988
989u32
990nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
991{
992 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
993 struct drm_device *dev = gpuobj->dev;
994 unsigned long flags;
995
996 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
997 u64 ptr = gpuobj->vinst + offset;
998 u32 base = ptr >> 16;
999 u32 val;
1000
1001 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1002 if (dev_priv->ramin_base != base) {
1003 dev_priv->ramin_base = base;
1004 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1005 }
1006 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1007 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1008 return val;
1009 }
1010
1011 return nv_ri32(dev, gpuobj->pinst + offset);
1012}
1013
1014void
1015nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1016{
1017 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1018 struct drm_device *dev = gpuobj->dev;
1019 unsigned long flags;
1020
1021 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1022 u64 ptr = gpuobj->vinst + offset;
1023 u32 base = ptr >> 16;
1024
1025 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1026 if (dev_priv->ramin_base != base) {
1027 dev_priv->ramin_base = base;
1028 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1029 }
1030 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1031 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1032 return;
1033 }
1034
1035 nv_wi32(dev, gpuobj->pinst + offset, val);
1036}
1037