1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h>
23
24#include "omap_drv.h"
25#include "omap_dmm_tiler.h"
26
27
28struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
29void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
30 bool dirty, bool accessed);
31int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
32
33
34
35
36
37#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
38
39
40#define OMAP_BO_DMA 0x01000000
41#define OMAP_BO_EXT_SYNC 0x02000000
42#define OMAP_BO_EXT_MEM 0x04000000
43
44
45struct omap_gem_object {
46 struct drm_gem_object base;
47
48 struct list_head mm_list;
49
50 uint32_t flags;
51
52
53 uint16_t width, height;
54
55
56 uint32_t roll;
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71 dma_addr_t paddr;
72
73
74
75
76 uint32_t paddr_cnt;
77
78
79
80
81 struct tiler_block *block;
82
83
84
85
86
87 struct page **pages;
88
89
90 dma_addr_t *addrs;
91
92
93
94
95 void *vaddr;
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 struct {
114 uint32_t write_pending;
115 uint32_t write_complete;
116 uint32_t read_pending;
117 uint32_t read_complete;
118 } *sync;
119};
120
121static int get_pages(struct drm_gem_object *obj, struct page ***pages);
122static uint64_t mmap_offset(struct drm_gem_object *obj);
123
124
125
126
127
128
129
130
131
132
133
134
135#define NUM_USERGART_ENTRIES 2
136struct usergart_entry {
137 struct tiler_block *block;
138 dma_addr_t paddr;
139 struct drm_gem_object *obj;
140 pgoff_t obj_pgoff;
141
142};
143static struct {
144 struct usergart_entry entry[NUM_USERGART_ENTRIES];
145 int height;
146 int height_shift;
147 int slot_shift;
148 int stride_pfn;
149 int last;
150} *usergart;
151
152static void evict_entry(struct drm_gem_object *obj,
153 enum tiler_fmt fmt, struct usergart_entry *entry)
154{
155 if (obj->dev->dev_mapping) {
156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT);
161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 if (m > 1) {
163 int i;
164
165 for (i = n; i > 0; i--) {
166 unmap_mapping_range(obj->dev->dev_mapping,
167 off, PAGE_SIZE, 1);
168 off += PAGE_SIZE * m;
169 }
170 } else {
171 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
172 }
173 }
174
175 entry->obj = NULL;
176}
177
178
179static void evict(struct drm_gem_object *obj)
180{
181 struct omap_gem_object *omap_obj = to_omap_bo(obj);
182
183 if (omap_obj->flags & OMAP_BO_TILED) {
184 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
185 int i;
186
187 if (!usergart)
188 return;
189
190 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
191 struct usergart_entry *entry = &usergart[fmt].entry[i];
192 if (entry->obj == obj)
193 evict_entry(obj, fmt, entry);
194 }
195 }
196}
197
198
199
200
201
202
203
204
205static inline bool is_shmem(struct drm_gem_object *obj)
206{
207 return obj->filp != NULL;
208}
209
210
211
212
213
214static inline bool is_cached_coherent(struct drm_gem_object *obj)
215{
216 struct omap_gem_object *omap_obj = to_omap_bo(obj);
217 return is_shmem(obj) &&
218 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
219}
220
221static DEFINE_SPINLOCK(sync_lock);
222
223
224static int omap_gem_attach_pages(struct drm_gem_object *obj)
225{
226 struct drm_device *dev = obj->dev;
227 struct omap_gem_object *omap_obj = to_omap_bo(obj);
228 struct page **pages;
229 int npages = obj->size >> PAGE_SHIFT;
230 int i, ret;
231 dma_addr_t *addrs;
232
233 WARN_ON(omap_obj->pages);
234
235
236
237
238
239 pages = _drm_gem_get_pages(obj, GFP_KERNEL);
240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
245
246
247
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
255 for (i = 0; i < npages; i++) {
256 addrs[i] = dma_map_page(dev->dev, pages[i],
257 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
258 }
259 } else {
260 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
261 if (!addrs) {
262 ret = -ENOMEM;
263 goto free_pages;
264 }
265 }
266
267 omap_obj->addrs = addrs;
268 omap_obj->pages = pages;
269
270 return 0;
271
272free_pages:
273 _drm_gem_put_pages(obj, pages, true, false);
274
275 return ret;
276}
277
278
279static void omap_gem_detach_pages(struct drm_gem_object *obj)
280{
281 struct omap_gem_object *omap_obj = to_omap_bo(obj);
282
283
284
285
286 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
287 int i, npages = obj->size >> PAGE_SHIFT;
288 for (i = 0; i < npages; i++) {
289 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
290 PAGE_SIZE, DMA_BIDIRECTIONAL);
291 }
292 }
293
294 kfree(omap_obj->addrs);
295 omap_obj->addrs = NULL;
296
297 _drm_gem_put_pages(obj, omap_obj->pages, true, false);
298 omap_obj->pages = NULL;
299}
300
301
302uint32_t omap_gem_flags(struct drm_gem_object *obj)
303{
304 return to_omap_bo(obj)->flags;
305}
306
307
308static uint64_t mmap_offset(struct drm_gem_object *obj)
309{
310 struct drm_device *dev = obj->dev;
311
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313
314 if (!obj->map_list.map) {
315
316 size_t size = omap_gem_mmap_size(obj);
317 int ret = _drm_gem_create_mmap_offset_size(obj, size);
318
319 if (ret) {
320 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return 0;
322 }
323 }
324
325 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
326}
327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
329{
330 uint64_t offset;
331 mutex_lock(&obj->dev->struct_mutex);
332 offset = mmap_offset(obj);
333 mutex_unlock(&obj->dev->struct_mutex);
334 return offset;
335}
336
337
338size_t omap_gem_mmap_size(struct drm_gem_object *obj)
339{
340 struct omap_gem_object *omap_obj = to_omap_bo(obj);
341 size_t size = obj->size;
342
343 if (omap_obj->flags & OMAP_BO_TILED) {
344
345
346
347
348
349
350 size = tiler_vsize(gem2fmt(omap_obj->flags),
351 omap_obj->width, omap_obj->height);
352 }
353
354 return size;
355}
356
357
358int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
359{
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
361 if (omap_obj->flags & OMAP_BO_TILED) {
362 *w = omap_obj->width;
363 *h = omap_obj->height;
364 return 0;
365 }
366 return -EINVAL;
367}
368
369
370static int fault_1d(struct drm_gem_object *obj,
371 struct vm_area_struct *vma, struct vm_fault *vmf)
372{
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 unsigned long pfn;
375 pgoff_t pgoff;
376
377
378 pgoff = ((unsigned long)vmf->virtual_address -
379 vma->vm_start) >> PAGE_SHIFT;
380
381 if (omap_obj->pages) {
382 omap_gem_cpu_sync(obj, pgoff);
383 pfn = page_to_pfn(omap_obj->pages[pgoff]);
384 } else {
385 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
386 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
387 }
388
389 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
390 pfn, pfn << PAGE_SHIFT);
391
392 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
393}
394
395
396static int fault_2d(struct drm_gem_object *obj,
397 struct vm_area_struct *vma, struct vm_fault *vmf)
398{
399 struct omap_gem_object *omap_obj = to_omap_bo(obj);
400 struct usergart_entry *entry;
401 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
402 struct page *pages[64];
403 unsigned long pfn;
404 pgoff_t pgoff, base_pgoff;
405 void __user *vaddr;
406 int i, ret, slots;
407
408
409
410
411
412
413 const int n = usergart[fmt].height;
414 const int n_shift = usergart[fmt].height_shift;
415
416
417
418
419
420
421
422 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
423
424
425 pgoff = ((unsigned long)vmf->virtual_address -
426 vma->vm_start) >> PAGE_SHIFT;
427
428
429
430
431
432 base_pgoff = round_down(pgoff, m << n_shift);
433
434
435 slots = omap_obj->width >> usergart[fmt].slot_shift;
436
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
438
439 entry = &usergart[fmt].entry[usergart[fmt].last];
440
441
442 if (entry->obj)
443 evict_entry(entry->obj, fmt, entry);
444
445 entry->obj = obj;
446 entry->obj_pgoff = base_pgoff;
447
448
449 base_pgoff = (base_pgoff >> n_shift) * slots;
450
451
452 if (m > 1) {
453 int off = pgoff % m;
454 entry->obj_pgoff += off;
455 base_pgoff /= m;
456 slots = min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
458 vaddr += off << PAGE_SHIFT;
459 }
460
461
462
463
464
465
466
467
468 memcpy(pages, &omap_obj->pages[base_pgoff],
469 sizeof(struct page *) * slots);
470 memset(pages + slots, 0,
471 sizeof(struct page *) * (n - slots));
472
473 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
474 if (ret) {
475 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
476 return ret;
477 }
478
479 pfn = entry->paddr >> PAGE_SHIFT;
480
481 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
482 pfn, pfn << PAGE_SHIFT);
483
484 for (i = n; i > 0; i--) {
485 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
486 pfn += usergart[fmt].stride_pfn;
487 vaddr += PAGE_SIZE * m;
488 }
489
490
491 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
492
493 return 0;
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
510{
511 struct drm_gem_object *obj = vma->vm_private_data;
512 struct omap_gem_object *omap_obj = to_omap_bo(obj);
513 struct drm_device *dev = obj->dev;
514 struct page **pages;
515 int ret;
516
517
518
519
520 mutex_lock(&dev->struct_mutex);
521
522
523 ret = get_pages(obj, &pages);
524 if (ret)
525 goto fail;
526
527
528
529
530
531
532
533 if (omap_obj->flags & OMAP_BO_TILED)
534 ret = fault_2d(obj, vma, vmf);
535 else
536 ret = fault_1d(obj, vma, vmf);
537
538
539fail:
540 mutex_unlock(&dev->struct_mutex);
541 switch (ret) {
542 case 0:
543 case -ERESTARTSYS:
544 case -EINTR:
545 return VM_FAULT_NOPAGE;
546 case -ENOMEM:
547 return VM_FAULT_OOM;
548 default:
549 return VM_FAULT_SIGBUS;
550 }
551}
552
553
554int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
555{
556 int ret;
557
558 ret = drm_gem_mmap(filp, vma);
559 if (ret) {
560 DBG("mmap failed: %d", ret);
561 return ret;
562 }
563
564 return omap_gem_mmap_obj(vma->vm_private_data, vma);
565}
566
567int omap_gem_mmap_obj(struct drm_gem_object *obj,
568 struct vm_area_struct *vma)
569{
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
571
572 vma->vm_flags &= ~VM_PFNMAP;
573 vma->vm_flags |= VM_MIXEDMAP;
574
575 if (omap_obj->flags & OMAP_BO_WC) {
576 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
577 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
578 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
579 } else {
580
581
582
583
584
585 if (WARN_ON(!obj->filp))
586 return -EINVAL;
587
588
589
590
591
592
593 fput(vma->vm_file);
594 vma->vm_pgoff = 0;
595 vma->vm_file = get_file(obj->filp);
596
597 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
598 }
599
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
615 struct drm_mode_create_dumb *args)
616{
617 union omap_gem_size gsize;
618
619
620 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
621 args->size = PAGE_ALIGN(args->pitch * args->height);
622
623 gsize = (union omap_gem_size){
624 .bytes = args->size,
625 };
626
627 return omap_gem_new_handle(dev, file, gsize,
628 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
629}
630
631
632
633
634
635
636
637
638
639int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
640 uint32_t handle)
641{
642
643 return drm_gem_handle_delete(file, handle);
644}
645
646
647
648
649
650
651
652
653
654
655int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
656 uint32_t handle, uint64_t *offset)
657{
658 struct drm_gem_object *obj;
659 int ret = 0;
660
661
662 obj = drm_gem_object_lookup(dev, file, handle);
663 if (obj == NULL) {
664 ret = -ENOENT;
665 goto fail;
666 }
667
668 *offset = omap_gem_mmap_offset(obj);
669
670 drm_gem_object_unreference_unlocked(obj);
671
672fail:
673 return ret;
674}
675
676
677
678
679
680
681int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
682{
683 struct omap_gem_object *omap_obj = to_omap_bo(obj);
684 uint32_t npages = obj->size >> PAGE_SHIFT;
685 int ret = 0;
686
687 if (roll > npages) {
688 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
689 return -EINVAL;
690 }
691
692 omap_obj->roll = roll;
693
694 mutex_lock(&obj->dev->struct_mutex);
695
696
697 if (omap_obj->block) {
698 struct page **pages;
699 ret = get_pages(obj, &pages);
700 if (ret)
701 goto fail;
702 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
703 if (ret)
704 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
705 }
706
707fail:
708 mutex_unlock(&obj->dev->struct_mutex);
709
710 return ret;
711}
712
713
714
715
716void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
717{
718 struct drm_device *dev = obj->dev;
719 struct omap_gem_object *omap_obj = to_omap_bo(obj);
720
721 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
722 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
723 PAGE_SIZE, DMA_BIDIRECTIONAL);
724 omap_obj->addrs[pgoff] = 0;
725 }
726}
727
728
729void omap_gem_dma_sync(struct drm_gem_object *obj,
730 enum dma_data_direction dir)
731{
732 struct drm_device *dev = obj->dev;
733 struct omap_gem_object *omap_obj = to_omap_bo(obj);
734
735 if (is_cached_coherent(obj)) {
736 int i, npages = obj->size >> PAGE_SHIFT;
737 struct page **pages = omap_obj->pages;
738 bool dirty = false;
739
740 for (i = 0; i < npages; i++) {
741 if (!omap_obj->addrs[i]) {
742 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
743 PAGE_SIZE, DMA_BIDIRECTIONAL);
744 dirty = true;
745 }
746 }
747
748 if (dirty) {
749 unmap_mapping_range(obj->filp->f_mapping, 0,
750 omap_gem_mmap_size(obj), 1);
751 }
752 }
753}
754
755
756
757
758
759int omap_gem_get_paddr(struct drm_gem_object *obj,
760 dma_addr_t *paddr, bool remap)
761{
762 struct omap_drm_private *priv = obj->dev->dev_private;
763 struct omap_gem_object *omap_obj = to_omap_bo(obj);
764 int ret = 0;
765
766 mutex_lock(&obj->dev->struct_mutex);
767
768 if (remap && is_shmem(obj) && priv->has_dmm) {
769 if (omap_obj->paddr_cnt == 0) {
770 struct page **pages;
771 uint32_t npages = obj->size >> PAGE_SHIFT;
772 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
773 struct tiler_block *block;
774
775 BUG_ON(omap_obj->block);
776
777 ret = get_pages(obj, &pages);
778 if (ret)
779 goto fail;
780
781 if (omap_obj->flags & OMAP_BO_TILED) {
782 block = tiler_reserve_2d(fmt,
783 omap_obj->width,
784 omap_obj->height, 0);
785 } else {
786 block = tiler_reserve_1d(obj->size);
787 }
788
789 if (IS_ERR(block)) {
790 ret = PTR_ERR(block);
791 dev_err(obj->dev->dev,
792 "could not remap: %d (%d)\n", ret, fmt);
793 goto fail;
794 }
795
796
797 ret = tiler_pin(block, pages, npages,
798 omap_obj->roll, true);
799 if (ret) {
800 tiler_release(block);
801 dev_err(obj->dev->dev,
802 "could not pin: %d\n", ret);
803 goto fail;
804 }
805
806 omap_obj->paddr = tiler_ssptr(block);
807 omap_obj->block = block;
808
809 DBG("got paddr: %08x", omap_obj->paddr);
810 }
811
812 omap_obj->paddr_cnt++;
813
814 *paddr = omap_obj->paddr;
815 } else if (omap_obj->flags & OMAP_BO_DMA) {
816 *paddr = omap_obj->paddr;
817 } else {
818 ret = -EINVAL;
819 goto fail;
820 }
821
822fail:
823 mutex_unlock(&obj->dev->struct_mutex);
824
825 return ret;
826}
827
828
829
830
831int omap_gem_put_paddr(struct drm_gem_object *obj)
832{
833 struct omap_gem_object *omap_obj = to_omap_bo(obj);
834 int ret = 0;
835
836 mutex_lock(&obj->dev->struct_mutex);
837 if (omap_obj->paddr_cnt > 0) {
838 omap_obj->paddr_cnt--;
839 if (omap_obj->paddr_cnt == 0) {
840 ret = tiler_unpin(omap_obj->block);
841 if (ret) {
842 dev_err(obj->dev->dev,
843 "could not unpin pages: %d\n", ret);
844 goto fail;
845 }
846 ret = tiler_release(omap_obj->block);
847 if (ret) {
848 dev_err(obj->dev->dev,
849 "could not release unmap: %d\n", ret);
850 }
851 omap_obj->block = NULL;
852 }
853 }
854fail:
855 mutex_unlock(&obj->dev->struct_mutex);
856 return ret;
857}
858
859
860
861
862
863int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
864 int x, int y, dma_addr_t *paddr)
865{
866 struct omap_gem_object *omap_obj = to_omap_bo(obj);
867 int ret = -EINVAL;
868
869 mutex_lock(&obj->dev->struct_mutex);
870 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
871 (omap_obj->flags & OMAP_BO_TILED)) {
872 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
873 ret = 0;
874 }
875 mutex_unlock(&obj->dev->struct_mutex);
876 return ret;
877}
878
879
880int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
881{
882 struct omap_gem_object *omap_obj = to_omap_bo(obj);
883 int ret = -EINVAL;
884 if (omap_obj->flags & OMAP_BO_TILED)
885 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
886 return ret;
887}
888
889
890
891
892static int get_pages(struct drm_gem_object *obj, struct page ***pages)
893{
894 struct omap_gem_object *omap_obj = to_omap_bo(obj);
895 int ret = 0;
896
897 if (is_shmem(obj) && !omap_obj->pages) {
898 ret = omap_gem_attach_pages(obj);
899 if (ret) {
900 dev_err(obj->dev->dev, "could not attach pages\n");
901 return ret;
902 }
903 }
904
905
906 *pages = omap_obj->pages;
907
908 return 0;
909}
910
911
912
913
914
915
916
917
918
919
920
921int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
922 bool remap)
923{
924 int ret;
925 if (!remap) {
926 struct omap_gem_object *omap_obj = to_omap_bo(obj);
927 if (!omap_obj->pages)
928 return -ENOMEM;
929 *pages = omap_obj->pages;
930 return 0;
931 }
932 mutex_lock(&obj->dev->struct_mutex);
933 ret = get_pages(obj, pages);
934 mutex_unlock(&obj->dev->struct_mutex);
935 return ret;
936}
937
938
939int omap_gem_put_pages(struct drm_gem_object *obj)
940{
941
942
943
944
945 return 0;
946}
947
948
949
950
951
952void *omap_gem_vaddr(struct drm_gem_object *obj)
953{
954 struct omap_gem_object *omap_obj = to_omap_bo(obj);
955 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
956 if (!omap_obj->vaddr) {
957 struct page **pages;
958 int ret = get_pages(obj, &pages);
959 if (ret)
960 return ERR_PTR(ret);
961 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
962 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
963 }
964 return omap_obj->vaddr;
965}
966
967#ifdef CONFIG_PM
968
969int omap_gem_resume(struct device *dev)
970{
971 struct drm_device *drm_dev = dev_get_drvdata(dev);
972 struct omap_drm_private *priv = drm_dev->dev_private;
973 struct omap_gem_object *omap_obj;
974 int ret = 0;
975
976 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
977 if (omap_obj->block) {
978 struct drm_gem_object *obj = &omap_obj->base;
979 uint32_t npages = obj->size >> PAGE_SHIFT;
980 WARN_ON(!omap_obj->pages);
981 ret = tiler_pin(omap_obj->block,
982 omap_obj->pages, npages,
983 omap_obj->roll, true);
984 if (ret) {
985 dev_err(dev, "could not repin: %d\n", ret);
986 return ret;
987 }
988 }
989 }
990
991 return 0;
992}
993#endif
994
995#ifdef CONFIG_DEBUG_FS
996void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
997{
998 struct drm_device *dev = obj->dev;
999 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1000 uint64_t off = 0;
1001
1002 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1003
1004 if (obj->map_list.map)
1005 off = (uint64_t)obj->map_list.hash.key;
1006
1007 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
1008 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
1009 off, omap_obj->paddr, omap_obj->paddr_cnt,
1010 omap_obj->vaddr, omap_obj->roll);
1011
1012 if (omap_obj->flags & OMAP_BO_TILED) {
1013 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1014 if (omap_obj->block) {
1015 struct tcm_area *area = &omap_obj->block->area;
1016 seq_printf(m, " (%dx%d, %dx%d)",
1017 area->p0.x, area->p0.y,
1018 area->p1.x, area->p1.y);
1019 }
1020 } else {
1021 seq_printf(m, " %d", obj->size);
1022 }
1023
1024 seq_printf(m, "\n");
1025}
1026
1027void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1028{
1029 struct omap_gem_object *omap_obj;
1030 int count = 0;
1031 size_t size = 0;
1032
1033 list_for_each_entry(omap_obj, list, mm_list) {
1034 struct drm_gem_object *obj = &omap_obj->base;
1035 seq_printf(m, " ");
1036 omap_gem_describe(obj, m);
1037 count++;
1038 size += obj->size;
1039 }
1040
1041 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1042}
1043#endif
1044
1045
1046
1047
1048struct omap_gem_sync_waiter {
1049 struct list_head list;
1050 struct omap_gem_object *omap_obj;
1051 enum omap_gem_op op;
1052 uint32_t read_target, write_target;
1053
1054 void (*notify)(void *arg);
1055 void *arg;
1056};
1057
1058
1059
1060
1061
1062
1063static LIST_HEAD(waiters);
1064
1065static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1066{
1067 struct omap_gem_object *omap_obj = waiter->omap_obj;
1068 if ((waiter->op & OMAP_GEM_READ) &&
1069 (omap_obj->sync->read_complete < waiter->read_target))
1070 return true;
1071 if ((waiter->op & OMAP_GEM_WRITE) &&
1072 (omap_obj->sync->write_complete < waiter->write_target))
1073 return true;
1074 return false;
1075}
1076
1077
1078#define SYNCDBG 0
1079#define SYNC(fmt, ...) do { if (SYNCDBG) \
1080 printk(KERN_ERR "%s:%d: "fmt"\n", \
1081 __func__, __LINE__, ##__VA_ARGS__); \
1082 } while (0)
1083
1084
1085static void sync_op_update(void)
1086{
1087 struct omap_gem_sync_waiter *waiter, *n;
1088 list_for_each_entry_safe(waiter, n, &waiters, list) {
1089 if (!is_waiting(waiter)) {
1090 list_del(&waiter->list);
1091 SYNC("notify: %p", waiter);
1092 waiter->notify(waiter->arg);
1093 kfree(waiter);
1094 }
1095 }
1096}
1097
1098static inline int sync_op(struct drm_gem_object *obj,
1099 enum omap_gem_op op, bool start)
1100{
1101 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1102 int ret = 0;
1103
1104 spin_lock(&sync_lock);
1105
1106 if (!omap_obj->sync) {
1107 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1108 if (!omap_obj->sync) {
1109 ret = -ENOMEM;
1110 goto unlock;
1111 }
1112 }
1113
1114 if (start) {
1115 if (op & OMAP_GEM_READ)
1116 omap_obj->sync->read_pending++;
1117 if (op & OMAP_GEM_WRITE)
1118 omap_obj->sync->write_pending++;
1119 } else {
1120 if (op & OMAP_GEM_READ)
1121 omap_obj->sync->read_complete++;
1122 if (op & OMAP_GEM_WRITE)
1123 omap_obj->sync->write_complete++;
1124 sync_op_update();
1125 }
1126
1127unlock:
1128 spin_unlock(&sync_lock);
1129
1130 return ret;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140void omap_gem_op_update(void)
1141{
1142 spin_lock(&sync_lock);
1143 sync_op_update();
1144 spin_unlock(&sync_lock);
1145}
1146
1147
1148int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1149{
1150 return sync_op(obj, op, true);
1151}
1152
1153int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1154{
1155 return sync_op(obj, op, false);
1156}
1157
1158static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1159
1160static void sync_notify(void *arg)
1161{
1162 struct task_struct **waiter_task = arg;
1163 *waiter_task = NULL;
1164 wake_up_all(&sync_event);
1165}
1166
1167int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1168{
1169 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1170 int ret = 0;
1171 if (omap_obj->sync) {
1172 struct task_struct *waiter_task = current;
1173 struct omap_gem_sync_waiter *waiter =
1174 kzalloc(sizeof(*waiter), GFP_KERNEL);
1175
1176 if (!waiter)
1177 return -ENOMEM;
1178
1179 waiter->omap_obj = omap_obj;
1180 waiter->op = op;
1181 waiter->read_target = omap_obj->sync->read_pending;
1182 waiter->write_target = omap_obj->sync->write_pending;
1183 waiter->notify = sync_notify;
1184 waiter->arg = &waiter_task;
1185
1186 spin_lock(&sync_lock);
1187 if (is_waiting(waiter)) {
1188 SYNC("waited: %p", waiter);
1189 list_add_tail(&waiter->list, &waiters);
1190 spin_unlock(&sync_lock);
1191 ret = wait_event_interruptible(sync_event,
1192 (waiter_task == NULL));
1193 spin_lock(&sync_lock);
1194 if (waiter_task) {
1195 SYNC("interrupted: %p", waiter);
1196
1197 list_del(&waiter->list);
1198 waiter_task = NULL;
1199 } else {
1200
1201 waiter = NULL;
1202 }
1203 }
1204 spin_unlock(&sync_lock);
1205
1206 if (waiter)
1207 kfree(waiter);
1208 }
1209 return ret;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1222 void (*fxn)(void *arg), void *arg)
1223{
1224 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1225 if (omap_obj->sync) {
1226 struct omap_gem_sync_waiter *waiter =
1227 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1228
1229 if (!waiter)
1230 return -ENOMEM;
1231
1232 waiter->omap_obj = omap_obj;
1233 waiter->op = op;
1234 waiter->read_target = omap_obj->sync->read_pending;
1235 waiter->write_target = omap_obj->sync->write_pending;
1236 waiter->notify = fxn;
1237 waiter->arg = arg;
1238
1239 spin_lock(&sync_lock);
1240 if (is_waiting(waiter)) {
1241 SYNC("waited: %p", waiter);
1242 list_add_tail(&waiter->list, &waiters);
1243 spin_unlock(&sync_lock);
1244 return 0;
1245 }
1246
1247 spin_unlock(&sync_lock);
1248 }
1249
1250
1251 fxn(arg);
1252
1253 return 0;
1254}
1255
1256
1257
1258
1259
1260
1261int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1262{
1263 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1264 int ret = 0;
1265
1266 spin_lock(&sync_lock);
1267
1268 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1269
1270 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1271 GFP_ATOMIC);
1272 if (!syncobj) {
1273 ret = -ENOMEM;
1274 goto unlock;
1275 }
1276 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1277 omap_obj->sync = syncobj;
1278 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1279
1280 if (omap_obj->sync) {
1281 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1282 kfree(omap_obj->sync);
1283 }
1284 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1285 omap_obj->sync = syncobj;
1286 }
1287
1288unlock:
1289 spin_unlock(&sync_lock);
1290 return ret;
1291}
1292
1293int omap_gem_init_object(struct drm_gem_object *obj)
1294{
1295 return -EINVAL;
1296}
1297
1298
1299
1300
1301void omap_gem_free_object(struct drm_gem_object *obj)
1302{
1303 struct drm_device *dev = obj->dev;
1304 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1305
1306 evict(obj);
1307
1308 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1309
1310 list_del(&omap_obj->mm_list);
1311
1312 if (obj->map_list.map)
1313 drm_gem_free_mmap_offset(obj);
1314
1315
1316
1317
1318 WARN_ON(omap_obj->paddr_cnt > 0);
1319
1320
1321 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1322 if (omap_obj->pages)
1323 omap_gem_detach_pages(obj);
1324
1325 if (!is_shmem(obj)) {
1326 dma_free_writecombine(dev->dev, obj->size,
1327 omap_obj->vaddr, omap_obj->paddr);
1328 } else if (omap_obj->vaddr) {
1329 vunmap(omap_obj->vaddr);
1330 }
1331 }
1332
1333
1334 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1335 kfree(omap_obj->sync);
1336
1337 drm_gem_object_release(obj);
1338
1339 kfree(obj);
1340}
1341
1342
1343int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1344 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1345{
1346 struct drm_gem_object *obj;
1347 int ret;
1348
1349 obj = omap_gem_new(dev, gsize, flags);
1350 if (!obj)
1351 return -ENOMEM;
1352
1353 ret = drm_gem_handle_create(file, obj, handle);
1354 if (ret) {
1355 drm_gem_object_release(obj);
1356 kfree(obj);
1357 return ret;
1358 }
1359
1360
1361 drm_gem_object_unreference_unlocked(obj);
1362
1363 return 0;
1364}
1365
1366
1367struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1368 union omap_gem_size gsize, uint32_t flags)
1369{
1370 struct omap_drm_private *priv = dev->dev_private;
1371 struct omap_gem_object *omap_obj;
1372 struct drm_gem_object *obj = NULL;
1373 size_t size;
1374 int ret;
1375
1376 if (flags & OMAP_BO_TILED) {
1377 if (!usergart) {
1378 dev_err(dev->dev, "Tiled buffers require DMM\n");
1379 goto fail;
1380 }
1381
1382
1383
1384
1385 flags &= ~OMAP_BO_SCANOUT;
1386
1387
1388
1389
1390 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1391 flags |= OMAP_BO_WC;
1392
1393
1394 tiler_align(gem2fmt(flags),
1395 &gsize.tiled.width, &gsize.tiled.height);
1396
1397
1398 size = tiler_size(gem2fmt(flags),
1399 gsize.tiled.width, gsize.tiled.height);
1400 } else {
1401 size = PAGE_ALIGN(gsize.bytes);
1402 }
1403
1404 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1405 if (!omap_obj)
1406 goto fail;
1407
1408 list_add(&omap_obj->mm_list, &priv->obj_list);
1409
1410 obj = &omap_obj->base;
1411
1412 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1413
1414
1415
1416 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1417 &omap_obj->paddr, GFP_KERNEL);
1418 if (omap_obj->vaddr)
1419 flags |= OMAP_BO_DMA;
1420
1421 }
1422
1423 omap_obj->flags = flags;
1424
1425 if (flags & OMAP_BO_TILED) {
1426 omap_obj->width = gsize.tiled.width;
1427 omap_obj->height = gsize.tiled.height;
1428 }
1429
1430 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1431 ret = drm_gem_private_object_init(dev, obj, size);
1432 else
1433 ret = drm_gem_object_init(dev, obj, size);
1434
1435 if (ret)
1436 goto fail;
1437
1438 return obj;
1439
1440fail:
1441 if (obj)
1442 omap_gem_free_object(obj);
1443
1444 return NULL;
1445}
1446
1447
1448void omap_gem_init(struct drm_device *dev)
1449{
1450 struct omap_drm_private *priv = dev->dev_private;
1451 const enum tiler_fmt fmts[] = {
1452 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1453 };
1454 int i, j;
1455
1456 if (!dmm_is_available()) {
1457
1458 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1459 return;
1460 }
1461
1462 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1463 if (!usergart)
1464 return;
1465
1466
1467 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1468 uint16_t h = 1, w = PAGE_SIZE >> i;
1469 tiler_align(fmts[i], &w, &h);
1470
1471
1472
1473
1474 usergart[i].height = h;
1475 usergart[i].height_shift = ilog2(h);
1476 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1477 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1478 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1479 struct usergart_entry *entry = &usergart[i].entry[j];
1480 struct tiler_block *block =
1481 tiler_reserve_2d(fmts[i], w, h,
1482 PAGE_SIZE);
1483 if (IS_ERR(block)) {
1484 dev_err(dev->dev,
1485 "reserve failed: %d, %d, %ld\n",
1486 i, j, PTR_ERR(block));
1487 return;
1488 }
1489 entry->paddr = tiler_ssptr(block);
1490 entry->block = block;
1491
1492 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1493 entry->paddr,
1494 usergart[i].stride_pfn << PAGE_SHIFT);
1495 }
1496 }
1497
1498 priv->has_dmm = true;
1499}
1500
1501void omap_gem_deinit(struct drm_device *dev)
1502{
1503
1504
1505
1506 kfree(usergart);
1507}
1508