1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/poll.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include "ispqueue.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define ISP_CACHE_FLUSH_PAGES_MAX 0
66
67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
68{
69 if (buf->skip_cache)
70 return;
71
72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
74 flush_cache_all();
75 else {
76 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
77 DMA_FROM_DEVICE);
78 outer_inv_range(buf->vbuf.m.userptr,
79 buf->vbuf.m.userptr + buf->vbuf.length);
80 }
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
98{
99 struct vm_area_struct *vma;
100 unsigned long start;
101 unsigned long end;
102 int ret = 0;
103
104 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
105 return 0;
106
107
108
109
110
111
112 if (!current || !current->mm)
113 return lock ? -EINVAL : 0;
114
115 start = buf->vbuf.m.userptr;
116 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
117
118 down_write(¤t->mm->mmap_sem);
119 spin_lock(¤t->mm->page_table_lock);
120
121 do {
122 vma = find_vma(current->mm, start);
123 if (vma == NULL) {
124 ret = -EFAULT;
125 goto out;
126 }
127
128 if (lock)
129 vma->vm_flags |= VM_LOCKED;
130 else
131 vma->vm_flags &= ~VM_LOCKED;
132
133 start = vma->vm_end + 1;
134 } while (vma->vm_end < end);
135
136 if (lock)
137 buf->vm_flags |= VM_LOCKED;
138 else
139 buf->vm_flags &= ~VM_LOCKED;
140
141out:
142 spin_unlock(¤t->mm->page_table_lock);
143 up_write(¤t->mm->mmap_sem);
144 return ret;
145}
146
147
148
149
150
151
152
153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
154{
155 struct scatterlist *sglist;
156 unsigned int npages;
157 unsigned int i;
158 void *addr;
159
160 addr = buf->vaddr;
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
162
163 sglist = vmalloc(npages * sizeof(*sglist));
164 if (sglist == NULL)
165 return -ENOMEM;
166
167 sg_init_table(sglist, npages);
168
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
170 struct page *page = vmalloc_to_page(addr);
171
172 if (page == NULL || PageHighMem(page)) {
173 vfree(sglist);
174 return -EINVAL;
175 }
176
177 sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
178 }
179
180 buf->sglen = npages;
181 buf->sglist = sglist;
182
183 return 0;
184}
185
186
187
188
189
190
191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
192{
193 struct scatterlist *sglist;
194 unsigned int offset = buf->offset;
195 unsigned int i;
196
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
198 if (sglist == NULL)
199 return -ENOMEM;
200
201 sg_init_table(sglist, buf->npages);
202
203 for (i = 0; i < buf->npages; ++i) {
204 if (PageHighMem(buf->pages[i])) {
205 vfree(sglist);
206 return -EINVAL;
207 }
208
209 sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
210 offset);
211 offset = 0;
212 }
213
214 buf->sglen = buf->npages;
215 buf->sglist = sglist;
216
217 return 0;
218}
219
220
221
222
223
224
225
226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
227{
228 struct scatterlist *sglist;
229 unsigned int offset = buf->offset;
230 unsigned long pfn = buf->paddr >> PAGE_SHIFT;
231 unsigned int i;
232
233 sglist = vmalloc(buf->npages * sizeof(*sglist));
234 if (sglist == NULL)
235 return -ENOMEM;
236
237 sg_init_table(sglist, buf->npages);
238
239 for (i = 0; i < buf->npages; ++i, ++pfn) {
240 sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
241 offset);
242
243
244
245 sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
246 offset = 0;
247 }
248
249 buf->sglen = buf->npages;
250 buf->sglist = sglist;
251
252 return 0;
253}
254
255
256
257
258
259
260
261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
262{
263 enum dma_data_direction direction;
264 unsigned int i;
265
266 if (buf->queue->ops->buffer_cleanup)
267 buf->queue->ops->buffer_cleanup(buf);
268
269 if (!(buf->vm_flags & VM_PFNMAP)) {
270 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
271 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
272 dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
273 direction);
274 }
275
276 vfree(buf->sglist);
277 buf->sglist = NULL;
278 buf->sglen = 0;
279
280 if (buf->pages != NULL) {
281 isp_video_buffer_lock_vma(buf, 0);
282
283 for (i = 0; i < buf->npages; ++i)
284 page_cache_release(buf->pages[i]);
285
286 vfree(buf->pages);
287 buf->pages = NULL;
288 }
289
290 buf->npages = 0;
291 buf->skip_cache = false;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
318{
319 unsigned long data;
320 unsigned int first;
321 unsigned int last;
322 int ret;
323
324 data = buf->vbuf.m.userptr;
325 first = (data & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
327
328 buf->offset = data & ~PAGE_MASK;
329 buf->npages = last - first + 1;
330 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
331 if (buf->pages == NULL)
332 return -ENOMEM;
333
334 down_read(¤t->mm->mmap_sem);
335 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
336 buf->npages,
337 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
338 buf->pages, NULL);
339 up_read(¤t->mm->mmap_sem);
340
341 if (ret != buf->npages) {
342 buf->npages = ret < 0 ? 0 : ret;
343 isp_video_buffer_cleanup(buf);
344 return -EFAULT;
345 }
346
347 ret = isp_video_buffer_lock_vma(buf, 1);
348 if (ret < 0)
349 isp_video_buffer_cleanup(buf);
350
351 return ret;
352}
353
354
355
356
357
358
359
360
361
362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
363{
364 struct vm_area_struct *vma;
365 unsigned long prev_pfn;
366 unsigned long this_pfn;
367 unsigned long start;
368 unsigned long end;
369 dma_addr_t pa = 0;
370 int ret = -EFAULT;
371
372 start = buf->vbuf.m.userptr;
373 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
374
375 buf->offset = start & ~PAGE_MASK;
376 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
377 buf->pages = NULL;
378
379 down_read(¤t->mm->mmap_sem);
380 vma = find_vma(current->mm, start);
381 if (vma == NULL || vma->vm_end < end)
382 goto done;
383
384 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
385 ret = follow_pfn(vma, start, &this_pfn);
386 if (ret)
387 goto done;
388
389 if (prev_pfn == 0)
390 pa = this_pfn << PAGE_SHIFT;
391 else if (this_pfn != prev_pfn + 1) {
392 ret = -EFAULT;
393 goto done;
394 }
395
396 prev_pfn = this_pfn;
397 }
398
399 buf->paddr = pa + buf->offset;
400 ret = 0;
401
402done:
403 up_read(¤t->mm->mmap_sem);
404 return ret;
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
420{
421 struct vm_area_struct *vma;
422 pgprot_t uninitialized_var(vm_page_prot);
423 unsigned long start;
424 unsigned long end;
425 int ret = -EFAULT;
426
427 start = buf->vbuf.m.userptr;
428 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
429
430 down_read(¤t->mm->mmap_sem);
431
432 do {
433 vma = find_vma(current->mm, start);
434 if (vma == NULL)
435 goto done;
436
437 if (start == buf->vbuf.m.userptr) {
438 buf->vm_flags = vma->vm_flags;
439 vm_page_prot = vma->vm_page_prot;
440 }
441
442 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
443 goto done;
444
445 if (vm_page_prot != vma->vm_page_prot)
446 goto done;
447
448 start = vma->vm_end + 1;
449 } while (vma->vm_end < end);
450
451
452
453
454 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
455 vm_page_prot == pgprot_writecombine(vm_page_prot))
456 buf->skip_cache = true;
457
458 ret = 0;
459
460done:
461 up_read(¤t->mm->mmap_sem);
462 return ret;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
481{
482 enum dma_data_direction direction;
483 int ret;
484
485 switch (buf->vbuf.memory) {
486 case V4L2_MEMORY_MMAP:
487 ret = isp_video_buffer_sglist_kernel(buf);
488 break;
489
490 case V4L2_MEMORY_USERPTR:
491 ret = isp_video_buffer_prepare_vm_flags(buf);
492 if (ret < 0)
493 return ret;
494
495 if (buf->vm_flags & VM_PFNMAP) {
496 ret = isp_video_buffer_prepare_pfnmap(buf);
497 if (ret < 0)
498 return ret;
499
500 ret = isp_video_buffer_sglist_pfnmap(buf);
501 } else {
502 ret = isp_video_buffer_prepare_user(buf);
503 if (ret < 0)
504 return ret;
505
506 ret = isp_video_buffer_sglist_user(buf);
507 }
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 if (ret < 0)
515 goto done;
516
517 if (!(buf->vm_flags & VM_PFNMAP)) {
518 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
519 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520 ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
521 direction);
522 if (ret != buf->sglen) {
523 ret = -EFAULT;
524 goto done;
525 }
526 }
527
528 if (buf->queue->ops->buffer_prepare)
529 ret = buf->queue->ops->buffer_prepare(buf);
530
531done:
532 if (ret < 0) {
533 isp_video_buffer_cleanup(buf);
534 return ret;
535 }
536
537 return ret;
538}
539
540
541
542
543
544
545static void isp_video_buffer_query(struct isp_video_buffer *buf,
546 struct v4l2_buffer *vbuf)
547{
548 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
549
550 if (buf->vma_use_count)
551 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
552
553 switch (buf->state) {
554 case ISP_BUF_STATE_ERROR:
555 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
556 case ISP_BUF_STATE_DONE:
557 vbuf->flags |= V4L2_BUF_FLAG_DONE;
558 case ISP_BUF_STATE_QUEUED:
559 case ISP_BUF_STATE_ACTIVE:
560 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
561 break;
562 case ISP_BUF_STATE_IDLE:
563 default:
564 break;
565 }
566}
567
568
569
570
571
572
573
574
575
576
577static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
578{
579 if (nonblocking) {
580 return (buf->state != ISP_BUF_STATE_QUEUED &&
581 buf->state != ISP_BUF_STATE_ACTIVE)
582 ? 0 : -EAGAIN;
583 }
584
585 return wait_event_interruptible(buf->wait,
586 buf->state != ISP_BUF_STATE_QUEUED &&
587 buf->state != ISP_BUF_STATE_ACTIVE);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static int isp_video_queue_free(struct isp_video_queue *queue)
603{
604 unsigned int i;
605
606 if (queue->streaming)
607 return -EBUSY;
608
609 for (i = 0; i < queue->count; ++i) {
610 if (queue->buffers[i]->vma_use_count != 0)
611 return -EBUSY;
612 }
613
614 for (i = 0; i < queue->count; ++i) {
615 struct isp_video_buffer *buf = queue->buffers[i];
616
617 isp_video_buffer_cleanup(buf);
618
619 vfree(buf->vaddr);
620 buf->vaddr = NULL;
621
622 kfree(buf);
623 queue->buffers[i] = NULL;
624 }
625
626 INIT_LIST_HEAD(&queue->queue);
627 queue->count = 0;
628 return 0;
629}
630
631
632
633
634
635
636static int isp_video_queue_alloc(struct isp_video_queue *queue,
637 unsigned int nbuffers,
638 unsigned int size, enum v4l2_memory memory)
639{
640 struct isp_video_buffer *buf;
641 unsigned int i;
642 void *mem;
643 int ret;
644
645
646 ret = isp_video_queue_free(queue);
647 if (ret < 0)
648 return ret;
649
650
651 if (nbuffers == 0)
652 return 0;
653
654
655 for (i = 0; i < nbuffers; ++i) {
656 buf = kzalloc(queue->bufsize, GFP_KERNEL);
657 if (buf == NULL)
658 break;
659
660 if (memory == V4L2_MEMORY_MMAP) {
661
662
663
664 mem = vmalloc_32_user(PAGE_ALIGN(size));
665 if (mem == NULL) {
666 kfree(buf);
667 break;
668 }
669
670 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
671 buf->vaddr = mem;
672 }
673
674 buf->vbuf.index = i;
675 buf->vbuf.length = size;
676 buf->vbuf.type = queue->type;
677 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
678 buf->vbuf.field = V4L2_FIELD_NONE;
679 buf->vbuf.memory = memory;
680
681 buf->queue = queue;
682 init_waitqueue_head(&buf->wait);
683
684 queue->buffers[i] = buf;
685 }
686
687 if (i == 0)
688 return -ENOMEM;
689
690 queue->count = i;
691 return nbuffers;
692}
693
694
695
696
697
698
699
700
701
702
703
704
705int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
706{
707 return isp_video_queue_free(queue);
708}
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731int omap3isp_video_queue_init(struct isp_video_queue *queue,
732 enum v4l2_buf_type type,
733 const struct isp_video_queue_operations *ops,
734 struct device *dev, unsigned int bufsize)
735{
736 INIT_LIST_HEAD(&queue->queue);
737 mutex_init(&queue->lock);
738 spin_lock_init(&queue->irqlock);
739
740 queue->type = type;
741 queue->ops = ops;
742 queue->dev = dev;
743 queue->bufsize = bufsize;
744
745 return 0;
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
774 struct v4l2_requestbuffers *rb)
775{
776 unsigned int nbuffers = rb->count;
777 unsigned int size;
778 int ret;
779
780 if (rb->type != queue->type)
781 return -EINVAL;
782
783 queue->ops->queue_prepare(queue, &nbuffers, &size);
784 if (size == 0)
785 return -EINVAL;
786
787 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
788
789 mutex_lock(&queue->lock);
790
791 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
792 if (ret < 0)
793 goto done;
794
795 rb->count = ret;
796 ret = 0;
797
798done:
799 mutex_unlock(&queue->lock);
800 return ret;
801}
802
803
804
805
806
807
808
809
810
811int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
812 struct v4l2_buffer *vbuf)
813{
814 struct isp_video_buffer *buf;
815 int ret = 0;
816
817 if (vbuf->type != queue->type)
818 return -EINVAL;
819
820 mutex_lock(&queue->lock);
821
822 if (vbuf->index >= queue->count) {
823 ret = -EINVAL;
824 goto done;
825 }
826
827 buf = queue->buffers[vbuf->index];
828 isp_video_buffer_query(buf, vbuf);
829
830done:
831 mutex_unlock(&queue->lock);
832 return ret;
833}
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
849 struct v4l2_buffer *vbuf)
850{
851 struct isp_video_buffer *buf;
852 unsigned long flags;
853 int ret = -EINVAL;
854
855 if (vbuf->type != queue->type)
856 goto done;
857
858 mutex_lock(&queue->lock);
859
860 if (vbuf->index >= queue->count)
861 goto done;
862
863 buf = queue->buffers[vbuf->index];
864
865 if (vbuf->memory != buf->vbuf.memory)
866 goto done;
867
868 if (buf->state != ISP_BUF_STATE_IDLE)
869 goto done;
870
871 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
872 vbuf->length < buf->vbuf.length)
873 goto done;
874
875 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
876 vbuf->m.userptr != buf->vbuf.m.userptr) {
877 isp_video_buffer_cleanup(buf);
878 buf->vbuf.m.userptr = vbuf->m.userptr;
879 buf->prepared = 0;
880 }
881
882 if (!buf->prepared) {
883 ret = isp_video_buffer_prepare(buf);
884 if (ret < 0)
885 goto done;
886 buf->prepared = 1;
887 }
888
889 isp_video_buffer_cache_sync(buf);
890
891 buf->state = ISP_BUF_STATE_QUEUED;
892 list_add_tail(&buf->stream, &queue->queue);
893
894 if (queue->streaming) {
895 spin_lock_irqsave(&queue->irqlock, flags);
896 queue->ops->buffer_queue(buf);
897 spin_unlock_irqrestore(&queue->irqlock, flags);
898 }
899
900 ret = 0;
901
902done:
903 mutex_unlock(&queue->lock);
904 return ret;
905}
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
922 struct v4l2_buffer *vbuf, int nonblocking)
923{
924 struct isp_video_buffer *buf;
925 int ret;
926
927 if (vbuf->type != queue->type)
928 return -EINVAL;
929
930 mutex_lock(&queue->lock);
931
932 if (list_empty(&queue->queue)) {
933 ret = -EINVAL;
934 goto done;
935 }
936
937 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
938 ret = isp_video_buffer_wait(buf, nonblocking);
939 if (ret < 0)
940 goto done;
941
942 list_del(&buf->stream);
943
944 isp_video_buffer_query(buf, vbuf);
945 buf->state = ISP_BUF_STATE_IDLE;
946 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
947
948done:
949 mutex_unlock(&queue->lock);
950 return ret;
951}
952
953
954
955
956
957
958
959
960
961
962int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
963{
964 struct isp_video_buffer *buf;
965 unsigned long flags;
966
967 mutex_lock(&queue->lock);
968
969 if (queue->streaming)
970 goto done;
971
972 queue->streaming = 1;
973
974 spin_lock_irqsave(&queue->irqlock, flags);
975 list_for_each_entry(buf, &queue->queue, stream)
976 queue->ops->buffer_queue(buf);
977 spin_unlock_irqrestore(&queue->irqlock, flags);
978
979done:
980 mutex_unlock(&queue->lock);
981 return 0;
982}
983
984
985
986
987
988
989
990
991
992
993
994void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
995{
996 struct isp_video_buffer *buf;
997 unsigned long flags;
998 unsigned int i;
999
1000 mutex_lock(&queue->lock);
1001
1002 if (!queue->streaming)
1003 goto done;
1004
1005 queue->streaming = 0;
1006
1007 spin_lock_irqsave(&queue->irqlock, flags);
1008 for (i = 0; i < queue->count; ++i) {
1009 buf = queue->buffers[i];
1010
1011 if (buf->state == ISP_BUF_STATE_ACTIVE)
1012 wake_up(&buf->wait);
1013
1014 buf->state = ISP_BUF_STATE_IDLE;
1015 }
1016 spin_unlock_irqrestore(&queue->irqlock, flags);
1017
1018 INIT_LIST_HEAD(&queue->queue);
1019
1020done:
1021 mutex_unlock(&queue->lock);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1036{
1037 struct isp_video_buffer *buf;
1038 unsigned int i;
1039
1040 mutex_lock(&queue->lock);
1041
1042 if (!queue->streaming)
1043 goto done;
1044
1045 for (i = 0; i < queue->count; ++i) {
1046 buf = queue->buffers[i];
1047
1048 if (buf->state == ISP_BUF_STATE_DONE)
1049 buf->state = ISP_BUF_STATE_ERROR;
1050 }
1051
1052done:
1053 mutex_unlock(&queue->lock);
1054}
1055
1056static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1057{
1058 struct isp_video_buffer *buf = vma->vm_private_data;
1059
1060 buf->vma_use_count++;
1061}
1062
1063static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1064{
1065 struct isp_video_buffer *buf = vma->vm_private_data;
1066
1067 buf->vma_use_count--;
1068}
1069
1070static const struct vm_operations_struct isp_video_queue_vm_ops = {
1071 .open = isp_video_queue_vm_open,
1072 .close = isp_video_queue_vm_close,
1073};
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1084 struct vm_area_struct *vma)
1085{
1086 struct isp_video_buffer *uninitialized_var(buf);
1087 unsigned long size;
1088 unsigned int i;
1089 int ret = 0;
1090
1091 mutex_lock(&queue->lock);
1092
1093 for (i = 0; i < queue->count; ++i) {
1094 buf = queue->buffers[i];
1095 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1096 break;
1097 }
1098
1099 if (i == queue->count) {
1100 ret = -EINVAL;
1101 goto done;
1102 }
1103
1104 size = vma->vm_end - vma->vm_start;
1105
1106 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1107 size != PAGE_ALIGN(buf->vbuf.length)) {
1108 ret = -EINVAL;
1109 goto done;
1110 }
1111
1112 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1113 if (ret < 0)
1114 goto done;
1115
1116 vma->vm_ops = &isp_video_queue_vm_ops;
1117 vma->vm_private_data = buf;
1118 isp_video_queue_vm_open(vma);
1119
1120done:
1121 mutex_unlock(&queue->lock);
1122 return ret;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1135 struct file *file, poll_table *wait)
1136{
1137 struct isp_video_buffer *buf;
1138 unsigned int mask = 0;
1139
1140 mutex_lock(&queue->lock);
1141 if (list_empty(&queue->queue)) {
1142 mask |= POLLERR;
1143 goto done;
1144 }
1145 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1146
1147 poll_wait(file, &buf->wait, wait);
1148 if (buf->state == ISP_BUF_STATE_DONE ||
1149 buf->state == ISP_BUF_STATE_ERROR) {
1150 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1151 mask |= POLLIN | POLLRDNORM;
1152 else
1153 mask |= POLLOUT | POLLWRNORM;
1154 }
1155
1156done:
1157 mutex_unlock(&queue->lock);
1158 return mask;
1159}
1160