1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/poll.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include "ispqueue.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define ISP_CACHE_FLUSH_PAGES_MAX 0
66
67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
68{
69 if (buf->skip_cache)
70 return;
71
72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
74 flush_cache_all();
75 else {
76 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
77 DMA_FROM_DEVICE);
78 outer_inv_range(buf->vbuf.m.userptr,
79 buf->vbuf.m.userptr + buf->vbuf.length);
80 }
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
98{
99 struct vm_area_struct *vma;
100 unsigned long start;
101 unsigned long end;
102 int ret = 0;
103
104 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
105 return 0;
106
107
108
109
110
111
112 if (!current || !current->mm)
113 return lock ? -EINVAL : 0;
114
115 start = buf->vbuf.m.userptr;
116 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
117
118 down_write(¤t->mm->mmap_sem);
119 spin_lock(¤t->mm->page_table_lock);
120
121 do {
122 vma = find_vma(current->mm, start);
123 if (vma == NULL) {
124 ret = -EFAULT;
125 goto out;
126 }
127
128 if (lock)
129 vma->vm_flags |= VM_LOCKED;
130 else
131 vma->vm_flags &= ~VM_LOCKED;
132
133 start = vma->vm_end + 1;
134 } while (vma->vm_end < end);
135
136 if (lock)
137 buf->vm_flags |= VM_LOCKED;
138 else
139 buf->vm_flags &= ~VM_LOCKED;
140
141out:
142 spin_unlock(¤t->mm->page_table_lock);
143 up_write(¤t->mm->mmap_sem);
144 return ret;
145}
146
147
148
149
150
151
152
153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
154{
155 struct scatterlist *sglist;
156 unsigned int npages;
157 unsigned int i;
158 void *addr;
159
160 addr = buf->vaddr;
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
162
163 sglist = vmalloc(npages * sizeof(*sglist));
164 if (sglist == NULL)
165 return -ENOMEM;
166
167 sg_init_table(sglist, npages);
168
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
170 struct page *page = vmalloc_to_page(addr);
171
172 if (page == NULL || PageHighMem(page)) {
173 vfree(sglist);
174 return -EINVAL;
175 }
176
177 sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
178 }
179
180 buf->sglen = npages;
181 buf->sglist = sglist;
182
183 return 0;
184}
185
186
187
188
189
190
191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
192{
193 struct scatterlist *sglist;
194 unsigned int offset = buf->offset;
195 unsigned int i;
196
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
198 if (sglist == NULL)
199 return -ENOMEM;
200
201 sg_init_table(sglist, buf->npages);
202
203 for (i = 0; i < buf->npages; ++i) {
204 if (PageHighMem(buf->pages[i])) {
205 vfree(sglist);
206 return -EINVAL;
207 }
208
209 sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
210 offset);
211 offset = 0;
212 }
213
214 buf->sglen = buf->npages;
215 buf->sglist = sglist;
216
217 return 0;
218}
219
220
221
222
223
224
225
226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
227{
228 struct scatterlist *sglist;
229 unsigned int offset = buf->offset;
230 unsigned long pfn = buf->paddr >> PAGE_SHIFT;
231 unsigned int i;
232
233 sglist = vmalloc(buf->npages * sizeof(*sglist));
234 if (sglist == NULL)
235 return -ENOMEM;
236
237 sg_init_table(sglist, buf->npages);
238
239 for (i = 0; i < buf->npages; ++i, ++pfn) {
240 sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
241 offset);
242
243
244
245 sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
246 offset = 0;
247 }
248
249 buf->sglen = buf->npages;
250 buf->sglist = sglist;
251
252 return 0;
253}
254
255
256
257
258
259
260
261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
262{
263 enum dma_data_direction direction;
264 unsigned int i;
265
266 if (buf->queue->ops->buffer_cleanup)
267 buf->queue->ops->buffer_cleanup(buf);
268
269 if (!(buf->vm_flags & VM_PFNMAP)) {
270 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
271 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
272 dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
273 direction);
274 }
275
276 vfree(buf->sglist);
277 buf->sglist = NULL;
278 buf->sglen = 0;
279
280 if (buf->pages != NULL) {
281 isp_video_buffer_lock_vma(buf, 0);
282
283 for (i = 0; i < buf->npages; ++i)
284 page_cache_release(buf->pages[i]);
285
286 vfree(buf->pages);
287 buf->pages = NULL;
288 }
289
290 buf->npages = 0;
291 buf->skip_cache = false;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
318{
319 unsigned long data;
320 unsigned int first;
321 unsigned int last;
322 int ret;
323
324 data = buf->vbuf.m.userptr;
325 first = (data & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
327
328 buf->offset = data & ~PAGE_MASK;
329 buf->npages = last - first + 1;
330 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
331 if (buf->pages == NULL)
332 return -ENOMEM;
333
334 down_read(¤t->mm->mmap_sem);
335 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
336 buf->npages,
337 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
338 buf->pages, NULL);
339 up_read(¤t->mm->mmap_sem);
340
341 if (ret != buf->npages) {
342 buf->npages = ret < 0 ? 0 : ret;
343 isp_video_buffer_cleanup(buf);
344 return -EFAULT;
345 }
346
347 ret = isp_video_buffer_lock_vma(buf, 1);
348 if (ret < 0)
349 isp_video_buffer_cleanup(buf);
350
351 return ret;
352}
353
354
355
356
357
358
359
360
361
362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
363{
364 struct vm_area_struct *vma;
365 unsigned long prev_pfn;
366 unsigned long this_pfn;
367 unsigned long start;
368 unsigned long end;
369 dma_addr_t pa;
370 int ret = -EFAULT;
371
372 start = buf->vbuf.m.userptr;
373 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
374
375 buf->offset = start & ~PAGE_MASK;
376 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
377 buf->pages = NULL;
378
379 down_read(¤t->mm->mmap_sem);
380 vma = find_vma(current->mm, start);
381 if (vma == NULL || vma->vm_end < end)
382 goto done;
383
384 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
385 ret = follow_pfn(vma, start, &this_pfn);
386 if (ret)
387 goto done;
388
389 if (prev_pfn == 0)
390 pa = this_pfn << PAGE_SHIFT;
391 else if (this_pfn != prev_pfn + 1) {
392 ret = -EFAULT;
393 goto done;
394 }
395
396 prev_pfn = this_pfn;
397 }
398
399 buf->paddr = pa + buf->offset;
400 ret = 0;
401
402done:
403 up_read(¤t->mm->mmap_sem);
404 return ret;
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
420{
421 struct vm_area_struct *vma;
422 pgprot_t vm_page_prot;
423 unsigned long start;
424 unsigned long end;
425 int ret = -EFAULT;
426
427 start = buf->vbuf.m.userptr;
428 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
429
430 down_read(¤t->mm->mmap_sem);
431
432 do {
433 vma = find_vma(current->mm, start);
434 if (vma == NULL)
435 goto done;
436
437 if (start == buf->vbuf.m.userptr) {
438 buf->vm_flags = vma->vm_flags;
439 vm_page_prot = vma->vm_page_prot;
440 }
441
442 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
443 goto done;
444
445 if (vm_page_prot != vma->vm_page_prot)
446 goto done;
447
448 start = vma->vm_end + 1;
449 } while (vma->vm_end < end);
450
451
452
453
454 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
455 vm_page_prot == pgprot_writecombine(vm_page_prot))
456 buf->skip_cache = true;
457
458 ret = 0;
459
460done:
461 up_read(¤t->mm->mmap_sem);
462 return ret;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
481{
482 enum dma_data_direction direction;
483 int ret;
484
485 switch (buf->vbuf.memory) {
486 case V4L2_MEMORY_MMAP:
487 ret = isp_video_buffer_sglist_kernel(buf);
488 break;
489
490 case V4L2_MEMORY_USERPTR:
491 ret = isp_video_buffer_prepare_vm_flags(buf);
492 if (ret < 0)
493 return ret;
494
495 if (buf->vm_flags & VM_PFNMAP) {
496 ret = isp_video_buffer_prepare_pfnmap(buf);
497 if (ret < 0)
498 return ret;
499
500 ret = isp_video_buffer_sglist_pfnmap(buf);
501 } else {
502 ret = isp_video_buffer_prepare_user(buf);
503 if (ret < 0)
504 return ret;
505
506 ret = isp_video_buffer_sglist_user(buf);
507 }
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 if (ret < 0)
515 goto done;
516
517 if (!(buf->vm_flags & VM_PFNMAP)) {
518 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
519 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520 ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
521 direction);
522 if (ret != buf->sglen) {
523 ret = -EFAULT;
524 goto done;
525 }
526 }
527
528 if (buf->queue->ops->buffer_prepare)
529 ret = buf->queue->ops->buffer_prepare(buf);
530
531done:
532 if (ret < 0) {
533 isp_video_buffer_cleanup(buf);
534 return ret;
535 }
536
537 return ret;
538}
539
540
541
542
543
544
545static void isp_video_buffer_query(struct isp_video_buffer *buf,
546 struct v4l2_buffer *vbuf)
547{
548 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
549
550 if (buf->vma_use_count)
551 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
552
553 switch (buf->state) {
554 case ISP_BUF_STATE_ERROR:
555 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
556 case ISP_BUF_STATE_DONE:
557 vbuf->flags |= V4L2_BUF_FLAG_DONE;
558 case ISP_BUF_STATE_QUEUED:
559 case ISP_BUF_STATE_ACTIVE:
560 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
561 break;
562 case ISP_BUF_STATE_IDLE:
563 default:
564 break;
565 }
566}
567
568
569
570
571
572
573
574
575
576
577static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
578{
579 if (nonblocking) {
580 return (buf->state != ISP_BUF_STATE_QUEUED &&
581 buf->state != ISP_BUF_STATE_ACTIVE)
582 ? 0 : -EAGAIN;
583 }
584
585 return wait_event_interruptible(buf->wait,
586 buf->state != ISP_BUF_STATE_QUEUED &&
587 buf->state != ISP_BUF_STATE_ACTIVE);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static int isp_video_queue_free(struct isp_video_queue *queue)
603{
604 unsigned int i;
605
606 if (queue->streaming)
607 return -EBUSY;
608
609 for (i = 0; i < queue->count; ++i) {
610 if (queue->buffers[i]->vma_use_count != 0)
611 return -EBUSY;
612 }
613
614 for (i = 0; i < queue->count; ++i) {
615 struct isp_video_buffer *buf = queue->buffers[i];
616
617 isp_video_buffer_cleanup(buf);
618
619 vfree(buf->vaddr);
620 buf->vaddr = NULL;
621
622 kfree(buf);
623 queue->buffers[i] = NULL;
624 }
625
626 INIT_LIST_HEAD(&queue->queue);
627 queue->count = 0;
628 return 0;
629}
630
631
632
633
634
635
636static int isp_video_queue_alloc(struct isp_video_queue *queue,
637 unsigned int nbuffers,
638 unsigned int size, enum v4l2_memory memory)
639{
640 struct isp_video_buffer *buf;
641 unsigned int i;
642 void *mem;
643 int ret;
644
645
646 ret = isp_video_queue_free(queue);
647 if (ret < 0)
648 return ret;
649
650
651 if (nbuffers == 0)
652 return 0;
653
654
655 for (i = 0; i < nbuffers; ++i) {
656 buf = kzalloc(queue->bufsize, GFP_KERNEL);
657 if (buf == NULL)
658 break;
659
660 if (memory == V4L2_MEMORY_MMAP) {
661
662
663
664 mem = vmalloc_32_user(PAGE_ALIGN(size));
665 if (mem == NULL) {
666 kfree(buf);
667 break;
668 }
669
670 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
671 buf->vaddr = mem;
672 }
673
674 buf->vbuf.index = i;
675 buf->vbuf.length = size;
676 buf->vbuf.type = queue->type;
677 buf->vbuf.field = V4L2_FIELD_NONE;
678 buf->vbuf.memory = memory;
679
680 buf->queue = queue;
681 init_waitqueue_head(&buf->wait);
682
683 queue->buffers[i] = buf;
684 }
685
686 if (i == 0)
687 return -ENOMEM;
688
689 queue->count = i;
690 return nbuffers;
691}
692
693
694
695
696
697
698
699
700
701
702
703
704int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
705{
706 return isp_video_queue_free(queue);
707}
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730int omap3isp_video_queue_init(struct isp_video_queue *queue,
731 enum v4l2_buf_type type,
732 const struct isp_video_queue_operations *ops,
733 struct device *dev, unsigned int bufsize)
734{
735 INIT_LIST_HEAD(&queue->queue);
736 mutex_init(&queue->lock);
737 spin_lock_init(&queue->irqlock);
738
739 queue->type = type;
740 queue->ops = ops;
741 queue->dev = dev;
742 queue->bufsize = bufsize;
743
744 return 0;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
773 struct v4l2_requestbuffers *rb)
774{
775 unsigned int nbuffers = rb->count;
776 unsigned int size;
777 int ret;
778
779 if (rb->type != queue->type)
780 return -EINVAL;
781
782 queue->ops->queue_prepare(queue, &nbuffers, &size);
783 if (size == 0)
784 return -EINVAL;
785
786 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
787
788 mutex_lock(&queue->lock);
789
790 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
791 if (ret < 0)
792 goto done;
793
794 rb->count = ret;
795 ret = 0;
796
797done:
798 mutex_unlock(&queue->lock);
799 return ret;
800}
801
802
803
804
805
806
807
808
809
810int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
811 struct v4l2_buffer *vbuf)
812{
813 struct isp_video_buffer *buf;
814 int ret = 0;
815
816 if (vbuf->type != queue->type)
817 return -EINVAL;
818
819 mutex_lock(&queue->lock);
820
821 if (vbuf->index >= queue->count) {
822 ret = -EINVAL;
823 goto done;
824 }
825
826 buf = queue->buffers[vbuf->index];
827 isp_video_buffer_query(buf, vbuf);
828
829done:
830 mutex_unlock(&queue->lock);
831 return ret;
832}
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
848 struct v4l2_buffer *vbuf)
849{
850 struct isp_video_buffer *buf;
851 unsigned long flags;
852 int ret = -EINVAL;
853
854 if (vbuf->type != queue->type)
855 goto done;
856
857 mutex_lock(&queue->lock);
858
859 if (vbuf->index >= queue->count)
860 goto done;
861
862 buf = queue->buffers[vbuf->index];
863
864 if (vbuf->memory != buf->vbuf.memory)
865 goto done;
866
867 if (buf->state != ISP_BUF_STATE_IDLE)
868 goto done;
869
870 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
871 vbuf->m.userptr != buf->vbuf.m.userptr) {
872 isp_video_buffer_cleanup(buf);
873 buf->vbuf.m.userptr = vbuf->m.userptr;
874 buf->prepared = 0;
875 }
876
877 if (!buf->prepared) {
878 ret = isp_video_buffer_prepare(buf);
879 if (ret < 0)
880 goto done;
881 buf->prepared = 1;
882 }
883
884 isp_video_buffer_cache_sync(buf);
885
886 buf->state = ISP_BUF_STATE_QUEUED;
887 list_add_tail(&buf->stream, &queue->queue);
888
889 if (queue->streaming) {
890 spin_lock_irqsave(&queue->irqlock, flags);
891 queue->ops->buffer_queue(buf);
892 spin_unlock_irqrestore(&queue->irqlock, flags);
893 }
894
895 ret = 0;
896
897done:
898 mutex_unlock(&queue->lock);
899 return ret;
900}
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
916 struct v4l2_buffer *vbuf, int nonblocking)
917{
918 struct isp_video_buffer *buf;
919 int ret;
920
921 if (vbuf->type != queue->type)
922 return -EINVAL;
923
924 mutex_lock(&queue->lock);
925
926 if (list_empty(&queue->queue)) {
927 ret = -EINVAL;
928 goto done;
929 }
930
931 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
932 ret = isp_video_buffer_wait(buf, nonblocking);
933 if (ret < 0)
934 goto done;
935
936 list_del(&buf->stream);
937
938 isp_video_buffer_query(buf, vbuf);
939 buf->state = ISP_BUF_STATE_IDLE;
940 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
941
942done:
943 mutex_unlock(&queue->lock);
944 return ret;
945}
946
947
948
949
950
951
952
953
954
955
956int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
957{
958 struct isp_video_buffer *buf;
959 unsigned long flags;
960
961 mutex_lock(&queue->lock);
962
963 if (queue->streaming)
964 goto done;
965
966 queue->streaming = 1;
967
968 spin_lock_irqsave(&queue->irqlock, flags);
969 list_for_each_entry(buf, &queue->queue, stream)
970 queue->ops->buffer_queue(buf);
971 spin_unlock_irqrestore(&queue->irqlock, flags);
972
973done:
974 mutex_unlock(&queue->lock);
975 return 0;
976}
977
978
979
980
981
982
983
984
985
986
987
988void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
989{
990 struct isp_video_buffer *buf;
991 unsigned long flags;
992 unsigned int i;
993
994 mutex_lock(&queue->lock);
995
996 if (!queue->streaming)
997 goto done;
998
999 queue->streaming = 0;
1000
1001 spin_lock_irqsave(&queue->irqlock, flags);
1002 for (i = 0; i < queue->count; ++i) {
1003 buf = queue->buffers[i];
1004
1005 if (buf->state == ISP_BUF_STATE_ACTIVE)
1006 wake_up(&buf->wait);
1007
1008 buf->state = ISP_BUF_STATE_IDLE;
1009 }
1010 spin_unlock_irqrestore(&queue->irqlock, flags);
1011
1012 INIT_LIST_HEAD(&queue->queue);
1013
1014done:
1015 mutex_unlock(&queue->lock);
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1030{
1031 struct isp_video_buffer *buf;
1032 unsigned int i;
1033
1034 mutex_lock(&queue->lock);
1035
1036 if (!queue->streaming)
1037 goto done;
1038
1039 for (i = 0; i < queue->count; ++i) {
1040 buf = queue->buffers[i];
1041
1042 if (buf->state == ISP_BUF_STATE_DONE)
1043 buf->state = ISP_BUF_STATE_ERROR;
1044 }
1045
1046done:
1047 mutex_unlock(&queue->lock);
1048}
1049
1050static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1051{
1052 struct isp_video_buffer *buf = vma->vm_private_data;
1053
1054 buf->vma_use_count++;
1055}
1056
1057static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1058{
1059 struct isp_video_buffer *buf = vma->vm_private_data;
1060
1061 buf->vma_use_count--;
1062}
1063
1064static const struct vm_operations_struct isp_video_queue_vm_ops = {
1065 .open = isp_video_queue_vm_open,
1066 .close = isp_video_queue_vm_close,
1067};
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1078 struct vm_area_struct *vma)
1079{
1080 struct isp_video_buffer *uninitialized_var(buf);
1081 unsigned long size;
1082 unsigned int i;
1083 int ret = 0;
1084
1085 mutex_lock(&queue->lock);
1086
1087 for (i = 0; i < queue->count; ++i) {
1088 buf = queue->buffers[i];
1089 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1090 break;
1091 }
1092
1093 if (i == queue->count) {
1094 ret = -EINVAL;
1095 goto done;
1096 }
1097
1098 size = vma->vm_end - vma->vm_start;
1099
1100 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1101 size != PAGE_ALIGN(buf->vbuf.length)) {
1102 ret = -EINVAL;
1103 goto done;
1104 }
1105
1106 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1107 if (ret < 0)
1108 goto done;
1109
1110 vma->vm_ops = &isp_video_queue_vm_ops;
1111 vma->vm_private_data = buf;
1112 isp_video_queue_vm_open(vma);
1113
1114done:
1115 mutex_unlock(&queue->lock);
1116 return ret;
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1129 struct file *file, poll_table *wait)
1130{
1131 struct isp_video_buffer *buf;
1132 unsigned int mask = 0;
1133
1134 mutex_lock(&queue->lock);
1135 if (list_empty(&queue->queue)) {
1136 mask |= POLLERR;
1137 goto done;
1138 }
1139 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1140
1141 poll_wait(file, &buf->wait, wait);
1142 if (buf->state == ISP_BUF_STATE_DONE ||
1143 buf->state == ISP_BUF_STATE_ERROR) {
1144 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1145 mask |= POLLIN | POLLRDNORM;
1146 else
1147 mask |= POLLOUT | POLLWRNORM;
1148 }
1149
1150done:
1151 mutex_unlock(&queue->lock);
1152 return mask;
1153}
1154