1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/device.h>
25#include <linux/fs.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/ioctl.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/types.h>
33#include <linux/cdev.h>
34#include <linux/list.h>
35#include <linux/poll.h>
36#include <linux/wait.h>
37#include <linux/mm.h>
38#include <linux/uaccess.h>
39#include <linux/sched.h>
40
41static unsigned int b3dfg_nbuf = 2;
42
43module_param_named(buffer_count, b3dfg_nbuf, uint, 0444);
44
45MODULE_PARM_DESC(buffer_count, "Number of buffers (min 2, default 2)");
46
47MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>");
48MODULE_DESCRIPTION("Brontes frame grabber driver");
49MODULE_LICENSE("GPL");
50
51#define DRIVER_NAME "b3dfg"
52#define B3DFG_MAX_DEVS 4
53#define B3DFG_FRAMES_PER_BUFFER 3
54
55#define B3DFG_BAR_REGS 0
56#define B3DFG_REGS_LENGTH 0x10000
57
58#define B3DFG_IOC_MAGIC 0xb3
59#define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int)
60#define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2)
61#define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3)
62#define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4)
63#define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll)
64#define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait)
65#define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int)
66
67enum {
68
69 B3D_REG_FRM_SIZE = 0x0,
70
71
72
73 B3D_REG_HW_CTRL = 0x4,
74
75
76
77
78
79 B3D_REG_DMA_STS = 0x8,
80
81
82 B3D_REG_WAND_STS = 0xc,
83
84
85
86 B3D_REG_EC220_DMA_ADDR = 0x8000,
87
88
89
90 B3D_REG_EC220_TRF_SIZE = 0x8004,
91
92
93
94
95
96
97 B3D_REG_EC220_DMA_STS = 0x8008,
98};
99
100enum b3dfg_buffer_state {
101 B3DFG_BUFFER_POLLED = 0,
102 B3DFG_BUFFER_PENDING,
103 B3DFG_BUFFER_POPULATED,
104};
105
106struct b3dfg_buffer {
107 unsigned char *frame[B3DFG_FRAMES_PER_BUFFER];
108 struct list_head list;
109 u8 state;
110};
111
112struct b3dfg_dev {
113
114
115 struct pci_dev *pdev;
116 struct cdev chardev;
117 struct device *dev;
118 void __iomem *regs;
119 unsigned int frame_size;
120
121
122
123
124
125 spinlock_t buffer_lock;
126 struct b3dfg_buffer *buffers;
127 struct list_head buffer_queue;
128
129
130 int cur_dma_frame_idx;
131
132
133 dma_addr_t cur_dma_frame_addr;
134
135
136
137
138
139 spinlock_t cstate_lock;
140 unsigned long cstate_tstamp;
141
142
143
144
145
146 spinlock_t triplets_dropped_lock;
147 unsigned int triplets_dropped;
148
149 wait_queue_head_t buffer_waitqueue;
150
151 unsigned int transmission_enabled:1;
152 unsigned int triplet_ready:1;
153};
154
155static u8 b3dfg_devices[B3DFG_MAX_DEVS];
156
157static struct class *b3dfg_class;
158static dev_t b3dfg_devt;
159
160static const struct pci_device_id b3dfg_ids[] __devinitdata = {
161 { PCI_DEVICE(0x0b3d, 0x0001) },
162 { },
163};
164
165MODULE_DEVICE_TABLE(pci, b3dfg_ids);
166
167
168
169struct b3dfg_poll {
170 int buffer_idx;
171 unsigned int triplets_dropped;
172};
173
174struct b3dfg_wait {
175 int buffer_idx;
176 unsigned int timeout;
177 unsigned int triplets_dropped;
178};
179
180
181
182static u32 b3dfg_read32(struct b3dfg_dev *fgdev, u16 reg)
183{
184 return ioread32(fgdev->regs + reg);
185}
186
187static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value)
188{
189 iowrite32(value, fgdev->regs + reg);
190}
191
192
193
194
195
196
197
198static int setup_frame_transfer(struct b3dfg_dev *fgdev,
199 struct b3dfg_buffer *buf, int frame)
200{
201 unsigned char *frm_addr;
202 dma_addr_t frm_addr_dma;
203 unsigned int frm_size = fgdev->frame_size;
204
205 frm_addr = buf->frame[frame];
206 frm_addr_dma = pci_map_single(fgdev->pdev, frm_addr,
207 frm_size, PCI_DMA_FROMDEVICE);
208 if (pci_dma_mapping_error(fgdev->pdev, frm_addr_dma))
209 return -ENOMEM;
210
211 fgdev->cur_dma_frame_addr = frm_addr_dma;
212 fgdev->cur_dma_frame_idx = frame;
213
214 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR,
215 cpu_to_le32(frm_addr_dma));
216 b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE,
217 cpu_to_le32(frm_size >> 2));
218 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf);
219
220 return 0;
221}
222
223
224static void dequeue_all_buffers(struct b3dfg_dev *fgdev)
225{
226 int i;
227 for (i = 0; i < b3dfg_nbuf; i++) {
228 struct b3dfg_buffer *buf = &fgdev->buffers[i];
229 buf->state = B3DFG_BUFFER_POLLED;
230 list_del_init(&buf->list);
231 }
232}
233
234
235static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
236{
237 struct device *dev = &fgdev->pdev->dev;
238 struct b3dfg_buffer *buf;
239 unsigned long flags;
240 int r = 0;
241
242 spin_lock_irqsave(&fgdev->buffer_lock, flags);
243 if (bufidx < 0 || bufidx >= b3dfg_nbuf) {
244 dev_dbg(dev, "Invalid buffer index, %d\n", bufidx);
245 r = -ENOENT;
246 goto out;
247 }
248 buf = &fgdev->buffers[bufidx];
249
250 if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) {
251 dev_dbg(dev, "buffer %d is already queued\n", bufidx);
252 r = -EINVAL;
253 goto out;
254 }
255
256 buf->state = B3DFG_BUFFER_PENDING;
257 list_add_tail(&buf->list, &fgdev->buffer_queue);
258
259 if (fgdev->transmission_enabled && fgdev->triplet_ready) {
260 dev_dbg(dev, "triplet is ready, pushing immediately\n");
261 fgdev->triplet_ready = 0;
262 r = setup_frame_transfer(fgdev, buf, 0);
263 if (r)
264 dev_err(dev, "unable to map DMA buffer\n");
265 }
266
267out:
268 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
269 return r;
270}
271
272
273
274static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg)
275{
276 struct device *dev = &fgdev->pdev->dev;
277 struct b3dfg_poll p;
278 struct b3dfg_buffer *buf;
279 unsigned long flags;
280 int r = 1;
281 int arg_out = 0;
282
283 if (copy_from_user(&p, arg, sizeof(p)))
284 return -EFAULT;
285
286 if (unlikely(!fgdev->transmission_enabled)) {
287 dev_dbg(dev, "cannot poll, transmission disabled\n");
288 return -EINVAL;
289 }
290
291 if (p.buffer_idx < 0 || p.buffer_idx >= b3dfg_nbuf)
292 return -ENOENT;
293
294 buf = &fgdev->buffers[p.buffer_idx];
295
296 spin_lock_irqsave(&fgdev->buffer_lock, flags);
297
298 if (likely(buf->state == B3DFG_BUFFER_POPULATED)) {
299 arg_out = 1;
300 buf->state = B3DFG_BUFFER_POLLED;
301
302
303 spin_lock(&fgdev->triplets_dropped_lock);
304 p.triplets_dropped = fgdev->triplets_dropped;
305 fgdev->triplets_dropped = 0;
306 spin_unlock(&fgdev->triplets_dropped_lock);
307 } else {
308 r = 0;
309 }
310
311 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
312
313 if (arg_out && copy_to_user(arg, &p, sizeof(p)))
314 r = -EFAULT;
315
316 return r;
317}
318
319static unsigned long get_cstate_change(struct b3dfg_dev *fgdev)
320{
321 unsigned long flags, when;
322
323 spin_lock_irqsave(&fgdev->cstate_lock, flags);
324 when = fgdev->cstate_tstamp;
325 spin_unlock_irqrestore(&fgdev->cstate_lock, flags);
326 return when;
327}
328
329static int is_event_ready(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf,
330 unsigned long when)
331{
332 int result;
333 unsigned long flags;
334
335 spin_lock_irqsave(&fgdev->buffer_lock, flags);
336 spin_lock(&fgdev->cstate_lock);
337 result = (!fgdev->transmission_enabled ||
338 buf->state == B3DFG_BUFFER_POPULATED ||
339 when != fgdev->cstate_tstamp);
340 spin_unlock(&fgdev->cstate_lock);
341 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
342
343 return result;
344}
345
346
347static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg)
348{
349 struct device *dev = &fgdev->pdev->dev;
350 struct b3dfg_wait w;
351 struct b3dfg_buffer *buf;
352 unsigned long flags, when;
353 int r;
354
355 if (copy_from_user(&w, arg, sizeof(w)))
356 return -EFAULT;
357
358 if (!fgdev->transmission_enabled) {
359 dev_dbg(dev, "cannot wait, transmission disabled\n");
360 return -EINVAL;
361 }
362
363 if (w.buffer_idx < 0 || w.buffer_idx >= b3dfg_nbuf)
364 return -ENOENT;
365
366 buf = &fgdev->buffers[w.buffer_idx];
367
368 spin_lock_irqsave(&fgdev->buffer_lock, flags);
369
370 if (buf->state == B3DFG_BUFFER_POPULATED) {
371 r = w.timeout;
372 goto out_triplets_dropped;
373 }
374
375 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
376
377 when = get_cstate_change(fgdev);
378 if (w.timeout > 0) {
379 r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue,
380 is_event_ready(fgdev, buf, when),
381 (w.timeout * HZ) / 1000);
382
383 if (unlikely(r < 0))
384 goto out;
385
386 w.timeout = r * 1000 / HZ;
387 } else {
388 r = wait_event_interruptible(fgdev->buffer_waitqueue,
389 is_event_ready(fgdev, buf, when));
390
391 if (unlikely(r)) {
392 r = -ERESTARTSYS;
393 goto out;
394 }
395 }
396
397
398 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) {
399 r = -EINVAL;
400 goto out;
401 }
402
403 spin_lock_irqsave(&fgdev->buffer_lock, flags);
404
405 if (buf->state != B3DFG_BUFFER_POPULATED) {
406 r = -ETIMEDOUT;
407 goto out_unlock;
408 }
409
410 buf->state = B3DFG_BUFFER_POLLED;
411
412out_triplets_dropped:
413
414
415 spin_lock(&fgdev->triplets_dropped_lock);
416 w.triplets_dropped = fgdev->triplets_dropped;
417 fgdev->triplets_dropped = 0;
418 spin_unlock(&fgdev->triplets_dropped_lock);
419
420out_unlock:
421 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
422 if (copy_to_user(arg, &w, sizeof(w)))
423 r = -EFAULT;
424out:
425 return r;
426}
427
428
429static int b3dfg_vma_fault(struct vm_area_struct *vma,
430 struct vm_fault *vmf)
431{
432 struct b3dfg_dev *fgdev = vma->vm_file->private_data;
433 unsigned long off = vmf->pgoff << PAGE_SHIFT;
434 unsigned int frame_size = fgdev->frame_size;
435 unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER;
436 unsigned char *addr;
437
438
439 unsigned int buf_idx = off / buf_size;
440
441 unsigned int buf_off = off % buf_size;
442
443
444 unsigned int frm_idx = buf_off / frame_size;
445
446 unsigned int frm_off = buf_off % frame_size;
447
448 if (unlikely(buf_idx >= b3dfg_nbuf))
449 return VM_FAULT_SIGBUS;
450
451 addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off;
452 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
453 virt_to_phys(addr) >> PAGE_SHIFT);
454
455 return VM_FAULT_NOPAGE;
456}
457
458static struct vm_operations_struct b3dfg_vm_ops = {
459 .fault = b3dfg_vma_fault,
460};
461
462static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg)
463{
464 u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
465 dev_dbg(&fgdev->pdev->dev, "wand status %x\n", wndstat);
466 return __put_user(wndstat & 0x1, arg);
467}
468
469static int enable_transmission(struct b3dfg_dev *fgdev)
470{
471 u16 command;
472 unsigned long flags;
473 struct device *dev = &fgdev->pdev->dev;
474
475 dev_dbg(dev, "enable transmission\n");
476
477
478 if (!b3dfg_read32(fgdev, B3D_REG_WAND_STS)) {
479 dev_dbg(dev, "cannot start transmission without wand\n");
480 return -EINVAL;
481 }
482
483
484
485
486
487 pci_read_config_word(fgdev->pdev, PCI_COMMAND, &command);
488 if (!(command & PCI_COMMAND_MASTER)) {
489 dev_err(dev, "not a bus master, force-enabling\n");
490 pci_write_config_word(fgdev->pdev, PCI_COMMAND,
491 command | PCI_COMMAND_MASTER);
492 }
493
494 spin_lock_irqsave(&fgdev->buffer_lock, flags);
495
496
497 if (fgdev->transmission_enabled) {
498 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
499 goto out;
500 }
501
502 spin_lock(&fgdev->triplets_dropped_lock);
503 fgdev->triplets_dropped = 0;
504 spin_unlock(&fgdev->triplets_dropped_lock);
505
506 fgdev->triplet_ready = 0;
507 fgdev->cur_dma_frame_idx = -1;
508 fgdev->transmission_enabled = 1;
509
510 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
511
512
513 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0x03);
514
515out:
516 return 0;
517}
518
519static void disable_transmission(struct b3dfg_dev *fgdev)
520{
521 struct device *dev = &fgdev->pdev->dev;
522 unsigned long flags;
523 u32 tmp;
524
525 dev_dbg(dev, "disable transmission\n");
526
527
528 spin_lock_irqsave(&fgdev->buffer_lock, flags);
529 fgdev->transmission_enabled = 0;
530
531 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
532
533
534
535
536 tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
537 dev_dbg(dev, "DMA_STS reads %x after TX stopped\n", tmp);
538
539 dequeue_all_buffers(fgdev);
540 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
541
542 wake_up_interruptible(&fgdev->buffer_waitqueue);
543}
544
545static int set_transmission(struct b3dfg_dev *fgdev, int enabled)
546{
547 int res = 0;
548
549 if (enabled && !fgdev->transmission_enabled)
550 res = enable_transmission(fgdev);
551 else if (!enabled && fgdev->transmission_enabled)
552 disable_transmission(fgdev);
553
554 return res;
555}
556
557
558static void handle_cstate_unplug(struct b3dfg_dev *fgdev)
559{
560
561 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
562
563
564 spin_lock(&fgdev->buffer_lock);
565 fgdev->transmission_enabled = 0;
566
567 fgdev->cur_dma_frame_idx = -1;
568 fgdev->triplet_ready = 0;
569 if (fgdev->cur_dma_frame_addr) {
570 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
571 fgdev->frame_size, PCI_DMA_FROMDEVICE);
572 fgdev->cur_dma_frame_addr = 0;
573 }
574 dequeue_all_buffers(fgdev);
575 spin_unlock(&fgdev->buffer_lock);
576}
577
578
579static void handle_cstate_change(struct b3dfg_dev *fgdev)
580{
581 u32 cstate = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
582 unsigned long when;
583 struct device *dev = &fgdev->pdev->dev;
584
585 dev_dbg(dev, "cable state change: %u\n", cstate);
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 if (cstate) {
606 dev_warn(dev, "ignoring unexpected plug event\n");
607 return;
608 }
609 handle_cstate_unplug(fgdev);
610
611
612
613
614
615
616 spin_lock(&fgdev->cstate_lock);
617 when = jiffies_64;
618 if (when <= fgdev->cstate_tstamp)
619 when = fgdev->cstate_tstamp + 1;
620 fgdev->cstate_tstamp = when;
621 wake_up_interruptible(&fgdev->buffer_waitqueue);
622 spin_unlock(&fgdev->cstate_lock);
623}
624
625
626static void transfer_complete(struct b3dfg_dev *fgdev)
627{
628 struct b3dfg_buffer *buf;
629 struct device *dev = &fgdev->pdev->dev;
630
631 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
632 fgdev->frame_size, PCI_DMA_FROMDEVICE);
633 fgdev->cur_dma_frame_addr = 0;
634
635 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
636
637 dev_dbg(dev, "handle frame completion\n");
638 if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) {
639
640
641 dev_dbg(dev, "triplet completed\n");
642 buf->state = B3DFG_BUFFER_POPULATED;
643 list_del_init(&buf->list);
644 wake_up_interruptible(&fgdev->buffer_waitqueue);
645 }
646}
647
648
649
650
651
652
653
654
655static bool setup_next_frame_transfer(struct b3dfg_dev *fgdev, int idx)
656{
657 struct b3dfg_buffer *buf;
658 struct device *dev = &fgdev->pdev->dev;
659 bool need_ack = 1;
660
661 dev_dbg(dev, "program DMA transfer for next frame: %d\n", idx);
662
663 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
664 if (idx == fgdev->cur_dma_frame_idx + 2) {
665 if (setup_frame_transfer(fgdev, buf, idx - 1))
666 dev_err(dev, "unable to map DMA buffer\n");
667 need_ack = 0;
668 } else {
669 dev_err(dev, "frame mismatch, got %d, expected %d\n",
670 idx, fgdev->cur_dma_frame_idx + 2);
671
672
673 }
674
675 return need_ack;
676}
677
678static irqreturn_t b3dfg_intr(int irq, void *dev_id)
679{
680 struct b3dfg_dev *fgdev = dev_id;
681 struct device *dev = &fgdev->pdev->dev;
682 u32 sts;
683 u8 dropped;
684 bool need_ack = 1;
685 irqreturn_t res = IRQ_HANDLED;
686
687 sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
688 if (unlikely(sts == 0)) {
689 dev_warn(dev, "ignore interrupt, DMA status is 0\n");
690 res = IRQ_NONE;
691 goto out;
692 }
693
694 if (unlikely(!fgdev->transmission_enabled)) {
695 dev_warn(dev, "ignore interrupt, TX disabled\n");
696 res = IRQ_HANDLED;
697 goto out;
698 }
699
700
701 dropped = (sts >> 8) & 0xff;
702 dev_dbg(dev, "intr: DMA_STS=%08x (drop=%d comp=%d next=%d)\n",
703 sts, dropped, !!(sts & 0x4), sts & 0x3);
704 if (unlikely(dropped > 0)) {
705 spin_lock(&fgdev->triplets_dropped_lock);
706 fgdev->triplets_dropped += dropped;
707 spin_unlock(&fgdev->triplets_dropped_lock);
708 }
709
710
711 if (sts & 0x08) {
712 handle_cstate_change(fgdev);
713 goto out;
714 }
715
716 spin_lock(&fgdev->buffer_lock);
717 if (unlikely(list_empty(&fgdev->buffer_queue))) {
718
719
720 dev_info(dev, "buffer not ready for next transfer\n");
721 fgdev->triplet_ready = 1;
722 goto out_unlock;
723 }
724
725
726 if (sts & 0x4) {
727 u32 dma_status = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS);
728
729
730 if (unlikely(dma_status & 0x1)) {
731 dev_err(dev, "EC220 error: %08x\n", dma_status);
732
733
734 goto out_unlock;
735 }
736
737
738 if (unlikely(fgdev->cur_dma_frame_idx == -1)) {
739 dev_err(dev, "completed but no last idx?\n");
740
741
742 goto out_unlock;
743 }
744
745 transfer_complete(fgdev);
746 }
747
748
749 if (sts & 0x3)
750 need_ack = setup_next_frame_transfer(fgdev, sts & 0x3);
751 else
752 fgdev->cur_dma_frame_idx = -1;
753
754out_unlock:
755 spin_unlock(&fgdev->buffer_lock);
756out:
757 if (need_ack) {
758 dev_dbg(dev, "acknowledging interrupt\n");
759 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b);
760 }
761 return res;
762}
763
764static int b3dfg_open(struct inode *inode, struct file *filp)
765{
766 struct b3dfg_dev *fgdev =
767 container_of(inode->i_cdev, struct b3dfg_dev, chardev);
768
769 dev_dbg(&fgdev->pdev->dev, "open\n");
770 filp->private_data = fgdev;
771 return 0;
772}
773
774static int b3dfg_release(struct inode *inode, struct file *filp)
775{
776 struct b3dfg_dev *fgdev = filp->private_data;
777 dev_dbg(&fgdev->pdev->dev, "release\n");
778 disable_transmission(fgdev);
779 return 0;
780}
781
782static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
783{
784 struct b3dfg_dev *fgdev = filp->private_data;
785
786 switch (cmd) {
787 case B3DFG_IOCGFRMSZ:
788 return __put_user(fgdev->frame_size, (int __user *) arg);
789 case B3DFG_IOCGWANDSTAT:
790 return get_wand_status(fgdev, (int __user *) arg);
791 case B3DFG_IOCTTRANS:
792 return set_transmission(fgdev, (int) arg);
793 case B3DFG_IOCTQUEUEBUF:
794 return queue_buffer(fgdev, (int) arg);
795 case B3DFG_IOCTPOLLBUF:
796 return poll_buffer(fgdev, (void __user *) arg);
797 case B3DFG_IOCTWAITBUF:
798 return wait_buffer(fgdev, (void __user *) arg);
799 default:
800 dev_dbg(&fgdev->pdev->dev, "unrecognised ioctl %x\n", cmd);
801 return -EINVAL;
802 }
803}
804
805static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table)
806{
807 struct b3dfg_dev *fgdev = filp->private_data;
808 unsigned long flags, when;
809 int i;
810 int r = 0;
811
812 when = get_cstate_change(fgdev);
813 poll_wait(filp, &fgdev->buffer_waitqueue, poll_table);
814
815 spin_lock_irqsave(&fgdev->buffer_lock, flags);
816 for (i = 0; i < b3dfg_nbuf; i++) {
817 if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) {
818 r = POLLIN | POLLRDNORM;
819 break;
820 }
821 }
822 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
823
824
825 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev))
826 r = POLLERR;
827
828 return r;
829}
830
831static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
832{
833 struct b3dfg_dev *fgdev = filp->private_data;
834 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
835 unsigned long vsize = vma->vm_end - vma->vm_start;
836 unsigned long bufdatalen = b3dfg_nbuf * fgdev->frame_size * 3;
837 unsigned long psize = bufdatalen - offset;
838 int r = 0;
839
840 if (vsize <= psize) {
841 vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR |
842 VM_PFNMAP;
843 vma->vm_ops = &b3dfg_vm_ops;
844 } else {
845 r = -EINVAL;
846 }
847
848 return r;
849}
850
851static struct file_operations b3dfg_fops = {
852 .owner = THIS_MODULE,
853 .open = b3dfg_open,
854 .release = b3dfg_release,
855 .unlocked_ioctl = b3dfg_ioctl,
856 .poll = b3dfg_poll,
857 .mmap = b3dfg_mmap,
858};
859
860static void free_all_frame_buffers(struct b3dfg_dev *fgdev)
861{
862 int i, j;
863 for (i = 0; i < b3dfg_nbuf; i++)
864 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++)
865 kfree(fgdev->buffers[i].frame[j]);
866 kfree(fgdev->buffers);
867}
868
869
870
871static int b3dfg_init_dev(struct b3dfg_dev *fgdev)
872{
873 int i, j;
874 u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE);
875
876
877
878
879
880
881
882
883
884 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
885
886 fgdev->frame_size = frm_size * 4096;
887 fgdev->buffers = kzalloc(sizeof(struct b3dfg_buffer) * b3dfg_nbuf,
888 GFP_KERNEL);
889 if (!fgdev->buffers)
890 goto err_no_buf;
891 for (i = 0; i < b3dfg_nbuf; i++) {
892 struct b3dfg_buffer *buf = &fgdev->buffers[i];
893 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) {
894 buf->frame[j] = kmalloc(fgdev->frame_size, GFP_KERNEL);
895 if (!buf->frame[j])
896 goto err_no_mem;
897 }
898 INIT_LIST_HEAD(&buf->list);
899 }
900
901 INIT_LIST_HEAD(&fgdev->buffer_queue);
902 init_waitqueue_head(&fgdev->buffer_waitqueue);
903 spin_lock_init(&fgdev->buffer_lock);
904 spin_lock_init(&fgdev->cstate_lock);
905 spin_lock_init(&fgdev->triplets_dropped_lock);
906 return 0;
907
908err_no_mem:
909 free_all_frame_buffers(fgdev);
910err_no_buf:
911 return -ENOMEM;
912}
913
914
915static int get_free_minor(void)
916{
917 int i;
918 for (i = 0; i < B3DFG_MAX_DEVS; i++) {
919 if (b3dfg_devices[i] == 0)
920 return i;
921 }
922 return -1;
923}
924
925static int __devinit b3dfg_probe(struct pci_dev *pdev,
926 const struct pci_device_id *id)
927{
928 struct b3dfg_dev *fgdev = kzalloc(sizeof(*fgdev), GFP_KERNEL);
929 int r = 0;
930 int minor = get_free_minor();
931 dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor);
932 unsigned long res_len;
933 resource_size_t res_base;
934
935 if (fgdev == NULL)
936 return -ENOMEM;
937
938 if (minor < 0) {
939 dev_err(&pdev->dev, "too many devices found!\n");
940 r = -EIO;
941 goto err_free;
942 }
943
944 b3dfg_devices[minor] = 1;
945 dev_info(&pdev->dev, "probe device with IRQ %d\n", pdev->irq);
946
947 cdev_init(&fgdev->chardev, &b3dfg_fops);
948 fgdev->chardev.owner = THIS_MODULE;
949
950 r = cdev_add(&fgdev->chardev, devno, 1);
951 if (r) {
952 dev_err(&pdev->dev, "cannot add char device\n");
953 goto err_release_minor;
954 }
955
956 fgdev->dev = device_create(
957 b3dfg_class,
958 &pdev->dev,
959 devno,
960 dev_get_drvdata(&pdev->dev),
961 DRIVER_NAME "%d", minor);
962
963 if (IS_ERR(fgdev->dev)) {
964 dev_err(&pdev->dev, "cannot create device\n");
965 r = PTR_ERR(fgdev->dev);
966 goto err_del_cdev;
967 }
968
969 r = pci_enable_device(pdev);
970 if (r) {
971 dev_err(&pdev->dev, "cannot enable PCI device\n");
972 goto err_dev_unreg;
973 }
974
975 res_len = pci_resource_len(pdev, B3DFG_BAR_REGS);
976 if (res_len != B3DFG_REGS_LENGTH) {
977 dev_err(&pdev->dev, "invalid register resource size\n");
978 r = -EIO;
979 goto err_disable;
980 }
981
982 if (pci_resource_flags(pdev, B3DFG_BAR_REGS)
983 != (IORESOURCE_MEM | IORESOURCE_SIZEALIGN)) {
984 dev_err(&pdev->dev, "invalid resource flags\n");
985 r = -EIO;
986 goto err_disable;
987 }
988 r = pci_request_regions(pdev, DRIVER_NAME);
989 if (r) {
990 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
991 goto err_disable;
992 }
993
994 pci_set_master(pdev);
995
996 r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
997 if (r) {
998 dev_err(&pdev->dev, "no usable DMA configuration\n");
999 goto err_free_res;
1000 }
1001
1002 res_base = pci_resource_start(pdev, B3DFG_BAR_REGS);
1003 fgdev->regs = ioremap_nocache(res_base, res_len);
1004 if (!fgdev->regs) {
1005 dev_err(&pdev->dev, "regs ioremap failed\n");
1006 r = -EIO;
1007 goto err_free_res;
1008 }
1009
1010 fgdev->pdev = pdev;
1011 pci_set_drvdata(pdev, fgdev);
1012 r = b3dfg_init_dev(fgdev);
1013 if (r < 0) {
1014 dev_err(&pdev->dev, "failed to initalize device\n");
1015 goto err_unmap;
1016 }
1017
1018 r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev);
1019 if (r) {
1020 dev_err(&pdev->dev, "couldn't request irq %d\n", pdev->irq);
1021 goto err_free_bufs;
1022 }
1023
1024 return 0;
1025
1026err_free_bufs:
1027 free_all_frame_buffers(fgdev);
1028err_unmap:
1029 iounmap(fgdev->regs);
1030err_free_res:
1031 pci_release_regions(pdev);
1032err_disable:
1033 pci_disable_device(pdev);
1034err_dev_unreg:
1035 device_destroy(b3dfg_class, devno);
1036err_del_cdev:
1037 cdev_del(&fgdev->chardev);
1038err_release_minor:
1039 b3dfg_devices[minor] = 0;
1040err_free:
1041 kfree(fgdev);
1042 return r;
1043}
1044
1045static void __devexit b3dfg_remove(struct pci_dev *pdev)
1046{
1047 struct b3dfg_dev *fgdev = pci_get_drvdata(pdev);
1048 unsigned int minor = MINOR(fgdev->chardev.dev);
1049
1050 dev_dbg(&pdev->dev, "remove\n");
1051
1052 free_irq(pdev->irq, fgdev);
1053 iounmap(fgdev->regs);
1054 pci_release_regions(pdev);
1055 pci_disable_device(pdev);
1056 device_destroy(b3dfg_class, MKDEV(MAJOR(b3dfg_devt), minor));
1057 cdev_del(&fgdev->chardev);
1058 free_all_frame_buffers(fgdev);
1059 kfree(fgdev);
1060 b3dfg_devices[minor] = 0;
1061}
1062
1063static struct pci_driver b3dfg_driver = {
1064 .name = DRIVER_NAME,
1065 .id_table = b3dfg_ids,
1066 .probe = b3dfg_probe,
1067 .remove = __devexit_p(b3dfg_remove),
1068};
1069
1070static int __init b3dfg_module_init(void)
1071{
1072 int r;
1073
1074 if (b3dfg_nbuf < 2) {
1075 printk(KERN_ERR DRIVER_NAME
1076 ": buffer_count is out of range (must be >= 2)");
1077 return -EINVAL;
1078 }
1079
1080 printk(KERN_INFO DRIVER_NAME ": loaded\n");
1081
1082 b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME);
1083 if (IS_ERR(b3dfg_class))
1084 return PTR_ERR(b3dfg_class);
1085
1086 r = alloc_chrdev_region(&b3dfg_devt, 0, B3DFG_MAX_DEVS, DRIVER_NAME);
1087 if (r)
1088 goto err1;
1089
1090 r = pci_register_driver(&b3dfg_driver);
1091 if (r)
1092 goto err2;
1093
1094 return r;
1095
1096err2:
1097 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1098err1:
1099 class_destroy(b3dfg_class);
1100 return r;
1101}
1102
1103static void __exit b3dfg_module_exit(void)
1104{
1105 printk(KERN_INFO DRIVER_NAME ": unloaded\n");
1106 pci_unregister_driver(&b3dfg_driver);
1107 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1108 class_destroy(b3dfg_class);
1109}
1110
1111module_init(b3dfg_module_init);
1112module_exit(b3dfg_module_exit);
1113