1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
24
25#include "omap_drv.h"
26#include "omap_dmm_tiler.h"
27
28
29struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
33
34
35
36
37
38#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39
40
41#define OMAP_BO_DMA 0x01000000
42#define OMAP_BO_EXT_SYNC 0x02000000
43#define OMAP_BO_EXT_MEM 0x04000000
44
45
46struct omap_gem_object {
47 struct drm_gem_object base;
48
49 struct list_head mm_list;
50
51 uint32_t flags;
52
53
54 uint16_t width, height;
55
56
57 uint32_t roll;
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 dma_addr_t paddr;
73
74
75
76
77 uint32_t paddr_cnt;
78
79
80
81
82 struct tiler_block *block;
83
84
85
86
87
88 struct page **pages;
89
90
91 dma_addr_t *addrs;
92
93
94
95
96 void *vaddr;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 struct {
115 uint32_t write_pending;
116 uint32_t write_complete;
117 uint32_t read_pending;
118 uint32_t read_complete;
119 } *sync;
120};
121
122static int get_pages(struct drm_gem_object *obj, struct page ***pages);
123static uint64_t mmap_offset(struct drm_gem_object *obj);
124
125
126
127
128
129
130
131
132
133
134
135
136#define NUM_USERGART_ENTRIES 2
137struct usergart_entry {
138 struct tiler_block *block;
139 dma_addr_t paddr;
140 struct drm_gem_object *obj;
141 pgoff_t obj_pgoff;
142
143};
144static struct {
145 struct usergart_entry entry[NUM_USERGART_ENTRIES];
146 int height;
147 int height_shift;
148 int slot_shift;
149 int stride_pfn;
150 int last;
151} *usergart;
152
153static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry)
155{
156 if (obj->dev->dev_mapping) {
157 struct omap_gem_object *omap_obj = to_omap_bo(obj);
158 int n = usergart[fmt].height;
159 size_t size = PAGE_SIZE * n;
160 loff_t off = mmap_offset(obj) +
161 (entry->obj_pgoff << PAGE_SHIFT);
162 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
163 if (m > 1) {
164 int i;
165
166 for (i = n; i > 0; i--) {
167 unmap_mapping_range(obj->dev->dev_mapping,
168 off, PAGE_SIZE, 1);
169 off += PAGE_SIZE * m;
170 }
171 } else {
172 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
173 }
174 }
175
176 entry->obj = NULL;
177}
178
179
180static void evict(struct drm_gem_object *obj)
181{
182 struct omap_gem_object *omap_obj = to_omap_bo(obj);
183
184 if (omap_obj->flags & OMAP_BO_TILED) {
185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 int i;
187
188 if (!usergart)
189 return;
190
191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 struct usergart_entry *entry = &usergart[fmt].entry[i];
193 if (entry->obj == obj)
194 evict_entry(obj, fmt, entry);
195 }
196 }
197}
198
199
200
201
202
203
204
205
206static inline bool is_shmem(struct drm_gem_object *obj)
207{
208 return obj->filp != NULL;
209}
210
211
212
213
214
215static inline bool is_cached_coherent(struct drm_gem_object *obj)
216{
217 struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 return is_shmem(obj) &&
219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220}
221
222static DEFINE_SPINLOCK(sync_lock);
223
224
225static int omap_gem_attach_pages(struct drm_gem_object *obj)
226{
227 struct drm_device *dev = obj->dev;
228 struct omap_gem_object *omap_obj = to_omap_bo(obj);
229 struct page **pages;
230 int npages = obj->size >> PAGE_SHIFT;
231 int i, ret;
232 dma_addr_t *addrs;
233
234 WARN_ON(omap_obj->pages);
235
236
237
238
239
240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
241 if (IS_ERR(pages)) {
242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
243 return PTR_ERR(pages);
244 }
245
246
247
248
249 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
250 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
251 if (!addrs) {
252 ret = -ENOMEM;
253 goto free_pages;
254 }
255
256 for (i = 0; i < npages; i++) {
257 addrs[i] = dma_map_page(dev->dev, pages[i],
258 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
259 }
260 } else {
261 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
262 if (!addrs) {
263 ret = -ENOMEM;
264 goto free_pages;
265 }
266 }
267
268 omap_obj->addrs = addrs;
269 omap_obj->pages = pages;
270
271 return 0;
272
273free_pages:
274 drm_gem_put_pages(obj, pages, true, false);
275
276 return ret;
277}
278
279
280static void omap_gem_detach_pages(struct drm_gem_object *obj)
281{
282 struct omap_gem_object *omap_obj = to_omap_bo(obj);
283
284
285
286
287 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
288 int i, npages = obj->size >> PAGE_SHIFT;
289 for (i = 0; i < npages; i++) {
290 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
291 PAGE_SIZE, DMA_BIDIRECTIONAL);
292 }
293 }
294
295 kfree(omap_obj->addrs);
296 omap_obj->addrs = NULL;
297
298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
299 omap_obj->pages = NULL;
300}
301
302
303uint32_t omap_gem_flags(struct drm_gem_object *obj)
304{
305 return to_omap_bo(obj)->flags;
306}
307
308
309static uint64_t mmap_offset(struct drm_gem_object *obj)
310{
311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
314
315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
316
317
318 size = omap_gem_mmap_size(obj);
319 ret = drm_gem_create_mmap_offset_size(obj, size);
320 if (ret) {
321 dev_err(dev->dev, "could not allocate mmap offset\n");
322 return 0;
323 }
324
325 return drm_vma_node_offset_addr(&obj->vma_node);
326}
327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
329{
330 uint64_t offset;
331 mutex_lock(&obj->dev->struct_mutex);
332 offset = mmap_offset(obj);
333 mutex_unlock(&obj->dev->struct_mutex);
334 return offset;
335}
336
337
338size_t omap_gem_mmap_size(struct drm_gem_object *obj)
339{
340 struct omap_gem_object *omap_obj = to_omap_bo(obj);
341 size_t size = obj->size;
342
343 if (omap_obj->flags & OMAP_BO_TILED) {
344
345
346
347
348
349
350 size = tiler_vsize(gem2fmt(omap_obj->flags),
351 omap_obj->width, omap_obj->height);
352 }
353
354 return size;
355}
356
357
358int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
359{
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
361 if (omap_obj->flags & OMAP_BO_TILED) {
362 *w = omap_obj->width;
363 *h = omap_obj->height;
364 return 0;
365 }
366 return -EINVAL;
367}
368
369
370static int fault_1d(struct drm_gem_object *obj,
371 struct vm_area_struct *vma, struct vm_fault *vmf)
372{
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 unsigned long pfn;
375 pgoff_t pgoff;
376
377
378 pgoff = ((unsigned long)vmf->virtual_address -
379 vma->vm_start) >> PAGE_SHIFT;
380
381 if (omap_obj->pages) {
382 omap_gem_cpu_sync(obj, pgoff);
383 pfn = page_to_pfn(omap_obj->pages[pgoff]);
384 } else {
385 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
386 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
387 }
388
389 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
390 pfn, pfn << PAGE_SHIFT);
391
392 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
393}
394
395
396static int fault_2d(struct drm_gem_object *obj,
397 struct vm_area_struct *vma, struct vm_fault *vmf)
398{
399 struct omap_gem_object *omap_obj = to_omap_bo(obj);
400 struct usergart_entry *entry;
401 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
402 struct page *pages[64];
403 unsigned long pfn;
404 pgoff_t pgoff, base_pgoff;
405 void __user *vaddr;
406 int i, ret, slots;
407
408
409
410
411
412
413 const int n = usergart[fmt].height;
414 const int n_shift = usergart[fmt].height_shift;
415
416
417
418
419
420
421
422 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
423
424
425 pgoff = ((unsigned long)vmf->virtual_address -
426 vma->vm_start) >> PAGE_SHIFT;
427
428
429
430
431
432 base_pgoff = round_down(pgoff, m << n_shift);
433
434
435 slots = omap_obj->width >> usergart[fmt].slot_shift;
436
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
438
439 entry = &usergart[fmt].entry[usergart[fmt].last];
440
441
442 if (entry->obj)
443 evict_entry(entry->obj, fmt, entry);
444
445 entry->obj = obj;
446 entry->obj_pgoff = base_pgoff;
447
448
449 base_pgoff = (base_pgoff >> n_shift) * slots;
450
451
452 if (m > 1) {
453 int off = pgoff % m;
454 entry->obj_pgoff += off;
455 base_pgoff /= m;
456 slots = min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
458 vaddr += off << PAGE_SHIFT;
459 }
460
461
462
463
464
465
466
467
468 memcpy(pages, &omap_obj->pages[base_pgoff],
469 sizeof(struct page *) * slots);
470 memset(pages + slots, 0,
471 sizeof(struct page *) * (n - slots));
472
473 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
474 if (ret) {
475 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
476 return ret;
477 }
478
479 pfn = entry->paddr >> PAGE_SHIFT;
480
481 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
482 pfn, pfn << PAGE_SHIFT);
483
484 for (i = n; i > 0; i--) {
485 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
486 pfn += usergart[fmt].stride_pfn;
487 vaddr += PAGE_SIZE * m;
488 }
489
490
491 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
492
493 return 0;
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
510{
511 struct drm_gem_object *obj = vma->vm_private_data;
512 struct omap_gem_object *omap_obj = to_omap_bo(obj);
513 struct drm_device *dev = obj->dev;
514 struct page **pages;
515 int ret;
516
517
518
519
520 mutex_lock(&dev->struct_mutex);
521
522
523 ret = get_pages(obj, &pages);
524 if (ret)
525 goto fail;
526
527
528
529
530
531
532
533 if (omap_obj->flags & OMAP_BO_TILED)
534 ret = fault_2d(obj, vma, vmf);
535 else
536 ret = fault_1d(obj, vma, vmf);
537
538
539fail:
540 mutex_unlock(&dev->struct_mutex);
541 switch (ret) {
542 case 0:
543 case -ERESTARTSYS:
544 case -EINTR:
545 return VM_FAULT_NOPAGE;
546 case -ENOMEM:
547 return VM_FAULT_OOM;
548 default:
549 return VM_FAULT_SIGBUS;
550 }
551}
552
553
554int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
555{
556 int ret;
557
558 ret = drm_gem_mmap(filp, vma);
559 if (ret) {
560 DBG("mmap failed: %d", ret);
561 return ret;
562 }
563
564 return omap_gem_mmap_obj(vma->vm_private_data, vma);
565}
566
567int omap_gem_mmap_obj(struct drm_gem_object *obj,
568 struct vm_area_struct *vma)
569{
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
571
572 vma->vm_flags &= ~VM_PFNMAP;
573 vma->vm_flags |= VM_MIXEDMAP;
574
575 if (omap_obj->flags & OMAP_BO_WC) {
576 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
577 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
578 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
579 } else {
580
581
582
583
584
585 if (WARN_ON(!obj->filp))
586 return -EINVAL;
587
588
589
590
591
592
593 fput(vma->vm_file);
594 vma->vm_pgoff = 0;
595 vma->vm_file = get_file(obj->filp);
596
597 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
598 }
599
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
615 struct drm_mode_create_dumb *args)
616{
617 union omap_gem_size gsize;
618
619
620 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
621 args->size = PAGE_ALIGN(args->pitch * args->height);
622
623 gsize = (union omap_gem_size){
624 .bytes = args->size,
625 };
626
627 return omap_gem_new_handle(dev, file, gsize,
628 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
629}
630
631
632
633
634
635
636
637
638
639
640int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
641 uint32_t handle, uint64_t *offset)
642{
643 struct drm_gem_object *obj;
644 int ret = 0;
645
646
647 obj = drm_gem_object_lookup(dev, file, handle);
648 if (obj == NULL) {
649 ret = -ENOENT;
650 goto fail;
651 }
652
653 *offset = omap_gem_mmap_offset(obj);
654
655 drm_gem_object_unreference_unlocked(obj);
656
657fail:
658 return ret;
659}
660
661
662
663
664
665
666int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
667{
668 struct omap_gem_object *omap_obj = to_omap_bo(obj);
669 uint32_t npages = obj->size >> PAGE_SHIFT;
670 int ret = 0;
671
672 if (roll > npages) {
673 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
674 return -EINVAL;
675 }
676
677 omap_obj->roll = roll;
678
679 mutex_lock(&obj->dev->struct_mutex);
680
681
682 if (omap_obj->block) {
683 struct page **pages;
684 ret = get_pages(obj, &pages);
685 if (ret)
686 goto fail;
687 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
688 if (ret)
689 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
690 }
691
692fail:
693 mutex_unlock(&obj->dev->struct_mutex);
694
695 return ret;
696}
697
698
699
700
701void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
702{
703 struct drm_device *dev = obj->dev;
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
705
706 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
707 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
708 PAGE_SIZE, DMA_BIDIRECTIONAL);
709 omap_obj->addrs[pgoff] = 0;
710 }
711}
712
713
714void omap_gem_dma_sync(struct drm_gem_object *obj,
715 enum dma_data_direction dir)
716{
717 struct drm_device *dev = obj->dev;
718 struct omap_gem_object *omap_obj = to_omap_bo(obj);
719
720 if (is_cached_coherent(obj)) {
721 int i, npages = obj->size >> PAGE_SHIFT;
722 struct page **pages = omap_obj->pages;
723 bool dirty = false;
724
725 for (i = 0; i < npages; i++) {
726 if (!omap_obj->addrs[i]) {
727 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
728 PAGE_SIZE, DMA_BIDIRECTIONAL);
729 dirty = true;
730 }
731 }
732
733 if (dirty) {
734 unmap_mapping_range(obj->filp->f_mapping, 0,
735 omap_gem_mmap_size(obj), 1);
736 }
737 }
738}
739
740
741
742
743
744int omap_gem_get_paddr(struct drm_gem_object *obj,
745 dma_addr_t *paddr, bool remap)
746{
747 struct omap_drm_private *priv = obj->dev->dev_private;
748 struct omap_gem_object *omap_obj = to_omap_bo(obj);
749 int ret = 0;
750
751 mutex_lock(&obj->dev->struct_mutex);
752
753 if (remap && is_shmem(obj) && priv->has_dmm) {
754 if (omap_obj->paddr_cnt == 0) {
755 struct page **pages;
756 uint32_t npages = obj->size >> PAGE_SHIFT;
757 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
758 struct tiler_block *block;
759
760 BUG_ON(omap_obj->block);
761
762 ret = get_pages(obj, &pages);
763 if (ret)
764 goto fail;
765
766 if (omap_obj->flags & OMAP_BO_TILED) {
767 block = tiler_reserve_2d(fmt,
768 omap_obj->width,
769 omap_obj->height, 0);
770 } else {
771 block = tiler_reserve_1d(obj->size);
772 }
773
774 if (IS_ERR(block)) {
775 ret = PTR_ERR(block);
776 dev_err(obj->dev->dev,
777 "could not remap: %d (%d)\n", ret, fmt);
778 goto fail;
779 }
780
781
782 ret = tiler_pin(block, pages, npages,
783 omap_obj->roll, true);
784 if (ret) {
785 tiler_release(block);
786 dev_err(obj->dev->dev,
787 "could not pin: %d\n", ret);
788 goto fail;
789 }
790
791 omap_obj->paddr = tiler_ssptr(block);
792 omap_obj->block = block;
793
794 DBG("got paddr: %08x", omap_obj->paddr);
795 }
796
797 omap_obj->paddr_cnt++;
798
799 *paddr = omap_obj->paddr;
800 } else if (omap_obj->flags & OMAP_BO_DMA) {
801 *paddr = omap_obj->paddr;
802 } else {
803 ret = -EINVAL;
804 goto fail;
805 }
806
807fail:
808 mutex_unlock(&obj->dev->struct_mutex);
809
810 return ret;
811}
812
813
814
815
816int omap_gem_put_paddr(struct drm_gem_object *obj)
817{
818 struct omap_gem_object *omap_obj = to_omap_bo(obj);
819 int ret = 0;
820
821 mutex_lock(&obj->dev->struct_mutex);
822 if (omap_obj->paddr_cnt > 0) {
823 omap_obj->paddr_cnt--;
824 if (omap_obj->paddr_cnt == 0) {
825 ret = tiler_unpin(omap_obj->block);
826 if (ret) {
827 dev_err(obj->dev->dev,
828 "could not unpin pages: %d\n", ret);
829 goto fail;
830 }
831 ret = tiler_release(omap_obj->block);
832 if (ret) {
833 dev_err(obj->dev->dev,
834 "could not release unmap: %d\n", ret);
835 }
836 omap_obj->block = NULL;
837 }
838 }
839fail:
840 mutex_unlock(&obj->dev->struct_mutex);
841 return ret;
842}
843
844
845
846
847
848int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
849 int x, int y, dma_addr_t *paddr)
850{
851 struct omap_gem_object *omap_obj = to_omap_bo(obj);
852 int ret = -EINVAL;
853
854 mutex_lock(&obj->dev->struct_mutex);
855 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
856 (omap_obj->flags & OMAP_BO_TILED)) {
857 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
858 ret = 0;
859 }
860 mutex_unlock(&obj->dev->struct_mutex);
861 return ret;
862}
863
864
865int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
866{
867 struct omap_gem_object *omap_obj = to_omap_bo(obj);
868 int ret = -EINVAL;
869 if (omap_obj->flags & OMAP_BO_TILED)
870 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
871 return ret;
872}
873
874
875
876
877static int get_pages(struct drm_gem_object *obj, struct page ***pages)
878{
879 struct omap_gem_object *omap_obj = to_omap_bo(obj);
880 int ret = 0;
881
882 if (is_shmem(obj) && !omap_obj->pages) {
883 ret = omap_gem_attach_pages(obj);
884 if (ret) {
885 dev_err(obj->dev->dev, "could not attach pages\n");
886 return ret;
887 }
888 }
889
890
891 *pages = omap_obj->pages;
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903
904
905
906int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
907 bool remap)
908{
909 int ret;
910 if (!remap) {
911 struct omap_gem_object *omap_obj = to_omap_bo(obj);
912 if (!omap_obj->pages)
913 return -ENOMEM;
914 *pages = omap_obj->pages;
915 return 0;
916 }
917 mutex_lock(&obj->dev->struct_mutex);
918 ret = get_pages(obj, pages);
919 mutex_unlock(&obj->dev->struct_mutex);
920 return ret;
921}
922
923
924int omap_gem_put_pages(struct drm_gem_object *obj)
925{
926
927
928
929
930 return 0;
931}
932
933
934
935
936
937void *omap_gem_vaddr(struct drm_gem_object *obj)
938{
939 struct omap_gem_object *omap_obj = to_omap_bo(obj);
940 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
941 if (!omap_obj->vaddr) {
942 struct page **pages;
943 int ret = get_pages(obj, &pages);
944 if (ret)
945 return ERR_PTR(ret);
946 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
947 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
948 }
949 return omap_obj->vaddr;
950}
951
952#ifdef CONFIG_PM
953
954int omap_gem_resume(struct device *dev)
955{
956 struct drm_device *drm_dev = dev_get_drvdata(dev);
957 struct omap_drm_private *priv = drm_dev->dev_private;
958 struct omap_gem_object *omap_obj;
959 int ret = 0;
960
961 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
962 if (omap_obj->block) {
963 struct drm_gem_object *obj = &omap_obj->base;
964 uint32_t npages = obj->size >> PAGE_SHIFT;
965 WARN_ON(!omap_obj->pages);
966 ret = tiler_pin(omap_obj->block,
967 omap_obj->pages, npages,
968 omap_obj->roll, true);
969 if (ret) {
970 dev_err(dev, "could not repin: %d\n", ret);
971 return ret;
972 }
973 }
974 }
975
976 return 0;
977}
978#endif
979
980#ifdef CONFIG_DEBUG_FS
981void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
982{
983 struct drm_device *dev = obj->dev;
984 struct omap_gem_object *omap_obj = to_omap_bo(obj);
985 uint64_t off;
986
987 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
988
989 off = drm_vma_node_start(&obj->vma_node);
990
991 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
992 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
993 off, omap_obj->paddr, omap_obj->paddr_cnt,
994 omap_obj->vaddr, omap_obj->roll);
995
996 if (omap_obj->flags & OMAP_BO_TILED) {
997 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
998 if (omap_obj->block) {
999 struct tcm_area *area = &omap_obj->block->area;
1000 seq_printf(m, " (%dx%d, %dx%d)",
1001 area->p0.x, area->p0.y,
1002 area->p1.x, area->p1.y);
1003 }
1004 } else {
1005 seq_printf(m, " %d", obj->size);
1006 }
1007
1008 seq_printf(m, "\n");
1009}
1010
1011void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1012{
1013 struct omap_gem_object *omap_obj;
1014 int count = 0;
1015 size_t size = 0;
1016
1017 list_for_each_entry(omap_obj, list, mm_list) {
1018 struct drm_gem_object *obj = &omap_obj->base;
1019 seq_printf(m, " ");
1020 omap_gem_describe(obj, m);
1021 count++;
1022 size += obj->size;
1023 }
1024
1025 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1026}
1027#endif
1028
1029
1030
1031
1032struct omap_gem_sync_waiter {
1033 struct list_head list;
1034 struct omap_gem_object *omap_obj;
1035 enum omap_gem_op op;
1036 uint32_t read_target, write_target;
1037
1038 void (*notify)(void *arg);
1039 void *arg;
1040};
1041
1042
1043
1044
1045
1046
1047static LIST_HEAD(waiters);
1048
1049static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1050{
1051 struct omap_gem_object *omap_obj = waiter->omap_obj;
1052 if ((waiter->op & OMAP_GEM_READ) &&
1053 (omap_obj->sync->read_complete < waiter->read_target))
1054 return true;
1055 if ((waiter->op & OMAP_GEM_WRITE) &&
1056 (omap_obj->sync->write_complete < waiter->write_target))
1057 return true;
1058 return false;
1059}
1060
1061
1062#define SYNCDBG 0
1063#define SYNC(fmt, ...) do { if (SYNCDBG) \
1064 printk(KERN_ERR "%s:%d: "fmt"\n", \
1065 __func__, __LINE__, ##__VA_ARGS__); \
1066 } while (0)
1067
1068
1069static void sync_op_update(void)
1070{
1071 struct omap_gem_sync_waiter *waiter, *n;
1072 list_for_each_entry_safe(waiter, n, &waiters, list) {
1073 if (!is_waiting(waiter)) {
1074 list_del(&waiter->list);
1075 SYNC("notify: %p", waiter);
1076 waiter->notify(waiter->arg);
1077 kfree(waiter);
1078 }
1079 }
1080}
1081
1082static inline int sync_op(struct drm_gem_object *obj,
1083 enum omap_gem_op op, bool start)
1084{
1085 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1086 int ret = 0;
1087
1088 spin_lock(&sync_lock);
1089
1090 if (!omap_obj->sync) {
1091 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1092 if (!omap_obj->sync) {
1093 ret = -ENOMEM;
1094 goto unlock;
1095 }
1096 }
1097
1098 if (start) {
1099 if (op & OMAP_GEM_READ)
1100 omap_obj->sync->read_pending++;
1101 if (op & OMAP_GEM_WRITE)
1102 omap_obj->sync->write_pending++;
1103 } else {
1104 if (op & OMAP_GEM_READ)
1105 omap_obj->sync->read_complete++;
1106 if (op & OMAP_GEM_WRITE)
1107 omap_obj->sync->write_complete++;
1108 sync_op_update();
1109 }
1110
1111unlock:
1112 spin_unlock(&sync_lock);
1113
1114 return ret;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124void omap_gem_op_update(void)
1125{
1126 spin_lock(&sync_lock);
1127 sync_op_update();
1128 spin_unlock(&sync_lock);
1129}
1130
1131
1132int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1133{
1134 return sync_op(obj, op, true);
1135}
1136
1137int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1138{
1139 return sync_op(obj, op, false);
1140}
1141
1142static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1143
1144static void sync_notify(void *arg)
1145{
1146 struct task_struct **waiter_task = arg;
1147 *waiter_task = NULL;
1148 wake_up_all(&sync_event);
1149}
1150
1151int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1152{
1153 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1154 int ret = 0;
1155 if (omap_obj->sync) {
1156 struct task_struct *waiter_task = current;
1157 struct omap_gem_sync_waiter *waiter =
1158 kzalloc(sizeof(*waiter), GFP_KERNEL);
1159
1160 if (!waiter)
1161 return -ENOMEM;
1162
1163 waiter->omap_obj = omap_obj;
1164 waiter->op = op;
1165 waiter->read_target = omap_obj->sync->read_pending;
1166 waiter->write_target = omap_obj->sync->write_pending;
1167 waiter->notify = sync_notify;
1168 waiter->arg = &waiter_task;
1169
1170 spin_lock(&sync_lock);
1171 if (is_waiting(waiter)) {
1172 SYNC("waited: %p", waiter);
1173 list_add_tail(&waiter->list, &waiters);
1174 spin_unlock(&sync_lock);
1175 ret = wait_event_interruptible(sync_event,
1176 (waiter_task == NULL));
1177 spin_lock(&sync_lock);
1178 if (waiter_task) {
1179 SYNC("interrupted: %p", waiter);
1180
1181 list_del(&waiter->list);
1182 waiter_task = NULL;
1183 } else {
1184
1185 waiter = NULL;
1186 }
1187 }
1188 spin_unlock(&sync_lock);
1189
1190 if (waiter)
1191 kfree(waiter);
1192 }
1193 return ret;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1206 void (*fxn)(void *arg), void *arg)
1207{
1208 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1209 if (omap_obj->sync) {
1210 struct omap_gem_sync_waiter *waiter =
1211 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1212
1213 if (!waiter)
1214 return -ENOMEM;
1215
1216 waiter->omap_obj = omap_obj;
1217 waiter->op = op;
1218 waiter->read_target = omap_obj->sync->read_pending;
1219 waiter->write_target = omap_obj->sync->write_pending;
1220 waiter->notify = fxn;
1221 waiter->arg = arg;
1222
1223 spin_lock(&sync_lock);
1224 if (is_waiting(waiter)) {
1225 SYNC("waited: %p", waiter);
1226 list_add_tail(&waiter->list, &waiters);
1227 spin_unlock(&sync_lock);
1228 return 0;
1229 }
1230
1231 spin_unlock(&sync_lock);
1232 }
1233
1234
1235 fxn(arg);
1236
1237 return 0;
1238}
1239
1240
1241
1242
1243
1244
1245int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1246{
1247 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1248 int ret = 0;
1249
1250 spin_lock(&sync_lock);
1251
1252 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1253
1254 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1255 GFP_ATOMIC);
1256 if (!syncobj) {
1257 ret = -ENOMEM;
1258 goto unlock;
1259 }
1260 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1261 omap_obj->sync = syncobj;
1262 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1263
1264 if (omap_obj->sync) {
1265 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1266 kfree(omap_obj->sync);
1267 }
1268 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1269 omap_obj->sync = syncobj;
1270 }
1271
1272unlock:
1273 spin_unlock(&sync_lock);
1274 return ret;
1275}
1276
1277int omap_gem_init_object(struct drm_gem_object *obj)
1278{
1279 return -EINVAL;
1280}
1281
1282
1283
1284
1285void omap_gem_free_object(struct drm_gem_object *obj)
1286{
1287 struct drm_device *dev = obj->dev;
1288 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1289
1290 evict(obj);
1291
1292 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1293
1294 list_del(&omap_obj->mm_list);
1295
1296 drm_gem_free_mmap_offset(obj);
1297
1298
1299
1300
1301 WARN_ON(omap_obj->paddr_cnt > 0);
1302
1303
1304 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1305 if (omap_obj->pages)
1306 omap_gem_detach_pages(obj);
1307
1308 if (!is_shmem(obj)) {
1309 dma_free_writecombine(dev->dev, obj->size,
1310 omap_obj->vaddr, omap_obj->paddr);
1311 } else if (omap_obj->vaddr) {
1312 vunmap(omap_obj->vaddr);
1313 }
1314 }
1315
1316
1317 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1318 kfree(omap_obj->sync);
1319
1320 drm_gem_object_release(obj);
1321
1322 kfree(obj);
1323}
1324
1325
1326int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1327 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1328{
1329 struct drm_gem_object *obj;
1330 int ret;
1331
1332 obj = omap_gem_new(dev, gsize, flags);
1333 if (!obj)
1334 return -ENOMEM;
1335
1336 ret = drm_gem_handle_create(file, obj, handle);
1337 if (ret) {
1338 drm_gem_object_release(obj);
1339 kfree(obj);
1340 return ret;
1341 }
1342
1343
1344 drm_gem_object_unreference_unlocked(obj);
1345
1346 return 0;
1347}
1348
1349
1350struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1351 union omap_gem_size gsize, uint32_t flags)
1352{
1353 struct omap_drm_private *priv = dev->dev_private;
1354 struct omap_gem_object *omap_obj;
1355 struct drm_gem_object *obj = NULL;
1356 size_t size;
1357 int ret;
1358
1359 if (flags & OMAP_BO_TILED) {
1360 if (!usergart) {
1361 dev_err(dev->dev, "Tiled buffers require DMM\n");
1362 goto fail;
1363 }
1364
1365
1366
1367
1368 flags &= ~OMAP_BO_SCANOUT;
1369
1370
1371
1372
1373 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1374 flags |= OMAP_BO_WC;
1375
1376
1377 tiler_align(gem2fmt(flags),
1378 &gsize.tiled.width, &gsize.tiled.height);
1379
1380
1381 size = tiler_size(gem2fmt(flags),
1382 gsize.tiled.width, gsize.tiled.height);
1383 } else {
1384 size = PAGE_ALIGN(gsize.bytes);
1385 }
1386
1387 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1388 if (!omap_obj)
1389 goto fail;
1390
1391 list_add(&omap_obj->mm_list, &priv->obj_list);
1392
1393 obj = &omap_obj->base;
1394
1395 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1396
1397
1398
1399 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1400 &omap_obj->paddr, GFP_KERNEL);
1401 if (omap_obj->vaddr)
1402 flags |= OMAP_BO_DMA;
1403
1404 }
1405
1406 omap_obj->flags = flags;
1407
1408 if (flags & OMAP_BO_TILED) {
1409 omap_obj->width = gsize.tiled.width;
1410 omap_obj->height = gsize.tiled.height;
1411 }
1412
1413 ret = 0;
1414 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1415 drm_gem_private_object_init(dev, obj, size);
1416 else
1417 ret = drm_gem_object_init(dev, obj, size);
1418
1419 if (ret)
1420 goto fail;
1421
1422 return obj;
1423
1424fail:
1425 if (obj)
1426 omap_gem_free_object(obj);
1427
1428 return NULL;
1429}
1430
1431
1432void omap_gem_init(struct drm_device *dev)
1433{
1434 struct omap_drm_private *priv = dev->dev_private;
1435 const enum tiler_fmt fmts[] = {
1436 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1437 };
1438 int i, j;
1439
1440 if (!dmm_is_available()) {
1441
1442 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1443 return;
1444 }
1445
1446 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1447 if (!usergart)
1448 return;
1449
1450
1451 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1452 uint16_t h = 1, w = PAGE_SIZE >> i;
1453 tiler_align(fmts[i], &w, &h);
1454
1455
1456
1457
1458 usergart[i].height = h;
1459 usergart[i].height_shift = ilog2(h);
1460 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1461 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1462 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1463 struct usergart_entry *entry = &usergart[i].entry[j];
1464 struct tiler_block *block =
1465 tiler_reserve_2d(fmts[i], w, h,
1466 PAGE_SIZE);
1467 if (IS_ERR(block)) {
1468 dev_err(dev->dev,
1469 "reserve failed: %d, %d, %ld\n",
1470 i, j, PTR_ERR(block));
1471 return;
1472 }
1473 entry->paddr = tiler_ssptr(block);
1474 entry->block = block;
1475
1476 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1477 entry->paddr,
1478 usergart[i].stride_pfn << PAGE_SHIFT);
1479 }
1480 }
1481
1482 priv->has_dmm = true;
1483}
1484
1485void omap_gem_deinit(struct drm_device *dev)
1486{
1487
1488
1489
1490 kfree(usergart);
1491}
1492