1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/kmod.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/delay.h>
31#include <asm/div64.h>
32
33#include "cx23885.h"
34#include "cimax2.h"
35#include "cx23888-ir.h"
36#include "cx23885-ir.h"
37#include "cx23885-av.h"
38#include "cx23885-input.h"
39
40MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
41MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
42MODULE_LICENSE("GPL");
43
44static unsigned int debug;
45module_param(debug, int, 0644);
46MODULE_PARM_DESC(debug, "enable debug messages");
47
48static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49module_param_array(card, int, NULL, 0444);
50MODULE_PARM_DESC(card, "card type");
51
52#define dprintk(level, fmt, arg...)\
53 do { if (debug >= level)\
54 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
55 } while (0)
56
57static unsigned int cx23885_devcount;
58
59#define NO_SYNC_LINE (-1U)
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static struct sram_channel cx23885_sram_channels[] = {
82 [SRAM_CH01] = {
83 .name = "VID A",
84 .cmds_start = 0x10000,
85 .ctrl_start = 0x10380,
86 .cdt = 0x104c0,
87 .fifo_start = 0x40,
88 .fifo_size = 0x2800,
89 .ptr1_reg = DMA1_PTR1,
90 .ptr2_reg = DMA1_PTR2,
91 .cnt1_reg = DMA1_CNT1,
92 .cnt2_reg = DMA1_CNT2,
93 },
94 [SRAM_CH02] = {
95 .name = "ch2",
96 .cmds_start = 0x0,
97 .ctrl_start = 0x0,
98 .cdt = 0x0,
99 .fifo_start = 0x0,
100 .fifo_size = 0x0,
101 .ptr1_reg = DMA2_PTR1,
102 .ptr2_reg = DMA2_PTR2,
103 .cnt1_reg = DMA2_CNT1,
104 .cnt2_reg = DMA2_CNT2,
105 },
106 [SRAM_CH03] = {
107 .name = "TS1 B",
108 .cmds_start = 0x100A0,
109 .ctrl_start = 0x10400,
110 .cdt = 0x10580,
111 .fifo_start = 0x5000,
112 .fifo_size = 0x1000,
113 .ptr1_reg = DMA3_PTR1,
114 .ptr2_reg = DMA3_PTR2,
115 .cnt1_reg = DMA3_CNT1,
116 .cnt2_reg = DMA3_CNT2,
117 },
118 [SRAM_CH04] = {
119 .name = "ch4",
120 .cmds_start = 0x0,
121 .ctrl_start = 0x0,
122 .cdt = 0x0,
123 .fifo_start = 0x0,
124 .fifo_size = 0x0,
125 .ptr1_reg = DMA4_PTR1,
126 .ptr2_reg = DMA4_PTR2,
127 .cnt1_reg = DMA4_CNT1,
128 .cnt2_reg = DMA4_CNT2,
129 },
130 [SRAM_CH05] = {
131 .name = "ch5",
132 .cmds_start = 0x0,
133 .ctrl_start = 0x0,
134 .cdt = 0x0,
135 .fifo_start = 0x0,
136 .fifo_size = 0x0,
137 .ptr1_reg = DMA5_PTR1,
138 .ptr2_reg = DMA5_PTR2,
139 .cnt1_reg = DMA5_CNT1,
140 .cnt2_reg = DMA5_CNT2,
141 },
142 [SRAM_CH06] = {
143 .name = "TS2 C",
144 .cmds_start = 0x10140,
145 .ctrl_start = 0x10440,
146 .cdt = 0x105e0,
147 .fifo_start = 0x6000,
148 .fifo_size = 0x1000,
149 .ptr1_reg = DMA5_PTR1,
150 .ptr2_reg = DMA5_PTR2,
151 .cnt1_reg = DMA5_CNT1,
152 .cnt2_reg = DMA5_CNT2,
153 },
154 [SRAM_CH07] = {
155 .name = "ch7",
156 .cmds_start = 0x0,
157 .ctrl_start = 0x0,
158 .cdt = 0x0,
159 .fifo_start = 0x0,
160 .fifo_size = 0x0,
161 .ptr1_reg = DMA6_PTR1,
162 .ptr2_reg = DMA6_PTR2,
163 .cnt1_reg = DMA6_CNT1,
164 .cnt2_reg = DMA6_CNT2,
165 },
166 [SRAM_CH08] = {
167 .name = "ch8",
168 .cmds_start = 0x0,
169 .ctrl_start = 0x0,
170 .cdt = 0x0,
171 .fifo_start = 0x0,
172 .fifo_size = 0x0,
173 .ptr1_reg = DMA7_PTR1,
174 .ptr2_reg = DMA7_PTR2,
175 .cnt1_reg = DMA7_CNT1,
176 .cnt2_reg = DMA7_CNT2,
177 },
178 [SRAM_CH09] = {
179 .name = "ch9",
180 .cmds_start = 0x0,
181 .ctrl_start = 0x0,
182 .cdt = 0x0,
183 .fifo_start = 0x0,
184 .fifo_size = 0x0,
185 .ptr1_reg = DMA8_PTR1,
186 .ptr2_reg = DMA8_PTR2,
187 .cnt1_reg = DMA8_CNT1,
188 .cnt2_reg = DMA8_CNT2,
189 },
190};
191
192static struct sram_channel cx23887_sram_channels[] = {
193 [SRAM_CH01] = {
194 .name = "VID A",
195 .cmds_start = 0x10000,
196 .ctrl_start = 0x105b0,
197 .cdt = 0x107b0,
198 .fifo_start = 0x40,
199 .fifo_size = 0x2800,
200 .ptr1_reg = DMA1_PTR1,
201 .ptr2_reg = DMA1_PTR2,
202 .cnt1_reg = DMA1_CNT1,
203 .cnt2_reg = DMA1_CNT2,
204 },
205 [SRAM_CH02] = {
206 .name = "ch2",
207 .cmds_start = 0x0,
208 .ctrl_start = 0x0,
209 .cdt = 0x0,
210 .fifo_start = 0x0,
211 .fifo_size = 0x0,
212 .ptr1_reg = DMA2_PTR1,
213 .ptr2_reg = DMA2_PTR2,
214 .cnt1_reg = DMA2_CNT1,
215 .cnt2_reg = DMA2_CNT2,
216 },
217 [SRAM_CH03] = {
218 .name = "TS1 B",
219 .cmds_start = 0x100A0,
220 .ctrl_start = 0x10630,
221 .cdt = 0x10870,
222 .fifo_start = 0x5000,
223 .fifo_size = 0x1000,
224 .ptr1_reg = DMA3_PTR1,
225 .ptr2_reg = DMA3_PTR2,
226 .cnt1_reg = DMA3_CNT1,
227 .cnt2_reg = DMA3_CNT2,
228 },
229 [SRAM_CH04] = {
230 .name = "ch4",
231 .cmds_start = 0x0,
232 .ctrl_start = 0x0,
233 .cdt = 0x0,
234 .fifo_start = 0x0,
235 .fifo_size = 0x0,
236 .ptr1_reg = DMA4_PTR1,
237 .ptr2_reg = DMA4_PTR2,
238 .cnt1_reg = DMA4_CNT1,
239 .cnt2_reg = DMA4_CNT2,
240 },
241 [SRAM_CH05] = {
242 .name = "ch5",
243 .cmds_start = 0x0,
244 .ctrl_start = 0x0,
245 .cdt = 0x0,
246 .fifo_start = 0x0,
247 .fifo_size = 0x0,
248 .ptr1_reg = DMA5_PTR1,
249 .ptr2_reg = DMA5_PTR2,
250 .cnt1_reg = DMA5_CNT1,
251 .cnt2_reg = DMA5_CNT2,
252 },
253 [SRAM_CH06] = {
254 .name = "TS2 C",
255 .cmds_start = 0x10140,
256 .ctrl_start = 0x10670,
257 .cdt = 0x108d0,
258 .fifo_start = 0x6000,
259 .fifo_size = 0x1000,
260 .ptr1_reg = DMA5_PTR1,
261 .ptr2_reg = DMA5_PTR2,
262 .cnt1_reg = DMA5_CNT1,
263 .cnt2_reg = DMA5_CNT2,
264 },
265 [SRAM_CH07] = {
266 .name = "ch7",
267 .cmds_start = 0x0,
268 .ctrl_start = 0x0,
269 .cdt = 0x0,
270 .fifo_start = 0x0,
271 .fifo_size = 0x0,
272 .ptr1_reg = DMA6_PTR1,
273 .ptr2_reg = DMA6_PTR2,
274 .cnt1_reg = DMA6_CNT1,
275 .cnt2_reg = DMA6_CNT2,
276 },
277 [SRAM_CH08] = {
278 .name = "ch8",
279 .cmds_start = 0x0,
280 .ctrl_start = 0x0,
281 .cdt = 0x0,
282 .fifo_start = 0x0,
283 .fifo_size = 0x0,
284 .ptr1_reg = DMA7_PTR1,
285 .ptr2_reg = DMA7_PTR2,
286 .cnt1_reg = DMA7_CNT1,
287 .cnt2_reg = DMA7_CNT2,
288 },
289 [SRAM_CH09] = {
290 .name = "ch9",
291 .cmds_start = 0x0,
292 .ctrl_start = 0x0,
293 .cdt = 0x0,
294 .fifo_start = 0x0,
295 .fifo_size = 0x0,
296 .ptr1_reg = DMA8_PTR1,
297 .ptr2_reg = DMA8_PTR2,
298 .cnt1_reg = DMA8_CNT1,
299 .cnt2_reg = DMA8_CNT2,
300 },
301};
302
303void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
304{
305 unsigned long flags;
306 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
307
308 dev->pci_irqmask |= mask;
309
310 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
311}
312
313void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
314{
315 unsigned long flags;
316 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
317
318 dev->pci_irqmask |= mask;
319 cx_set(PCI_INT_MSK, mask);
320
321 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
322}
323
324void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
325{
326 u32 v;
327 unsigned long flags;
328 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
329
330 v = mask & dev->pci_irqmask;
331 if (v)
332 cx_set(PCI_INT_MSK, v);
333
334 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
335}
336
337static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
338{
339 cx23885_irq_enable(dev, 0xffffffff);
340}
341
342void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
343{
344 unsigned long flags;
345 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
346
347 cx_clear(PCI_INT_MSK, mask);
348
349 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
350}
351
352static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
353{
354 cx23885_irq_disable(dev, 0xffffffff);
355}
356
357void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
358{
359 unsigned long flags;
360 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
361
362 dev->pci_irqmask &= ~mask;
363 cx_clear(PCI_INT_MSK, mask);
364
365 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
366}
367
368static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
369{
370 u32 v;
371 unsigned long flags;
372 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
373
374 v = cx_read(PCI_INT_MSK);
375
376 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
377 return v;
378}
379
380static int cx23885_risc_decode(u32 risc)
381{
382 static char *instr[16] = {
383 [RISC_SYNC >> 28] = "sync",
384 [RISC_WRITE >> 28] = "write",
385 [RISC_WRITEC >> 28] = "writec",
386 [RISC_READ >> 28] = "read",
387 [RISC_READC >> 28] = "readc",
388 [RISC_JUMP >> 28] = "jump",
389 [RISC_SKIP >> 28] = "skip",
390 [RISC_WRITERM >> 28] = "writerm",
391 [RISC_WRITECM >> 28] = "writecm",
392 [RISC_WRITECR >> 28] = "writecr",
393 };
394 static int incr[16] = {
395 [RISC_WRITE >> 28] = 3,
396 [RISC_JUMP >> 28] = 3,
397 [RISC_SKIP >> 28] = 1,
398 [RISC_SYNC >> 28] = 1,
399 [RISC_WRITERM >> 28] = 3,
400 [RISC_WRITECM >> 28] = 3,
401 [RISC_WRITECR >> 28] = 4,
402 };
403 static char *bits[] = {
404 "12", "13", "14", "resync",
405 "cnt0", "cnt1", "18", "19",
406 "20", "21", "22", "23",
407 "irq1", "irq2", "eol", "sol",
408 };
409 int i;
410
411 printk("0x%08x [ %s", risc,
412 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
413 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
414 if (risc & (1 << (i + 12)))
415 printk(" %s", bits[i]);
416 printk(" count=%d ]\n", risc & 0xfff);
417 return incr[risc >> 28] ? incr[risc >> 28] : 1;
418}
419
420void cx23885_wakeup(struct cx23885_tsport *port,
421 struct cx23885_dmaqueue *q, u32 count)
422{
423 struct cx23885_dev *dev = port->dev;
424 struct cx23885_buffer *buf;
425 int bc;
426
427 for (bc = 0;; bc++) {
428 if (list_empty(&q->active))
429 break;
430 buf = list_entry(q->active.next,
431 struct cx23885_buffer, vb.queue);
432
433
434
435
436 if ((s16) (count - buf->count) < 0)
437 break;
438
439 do_gettimeofday(&buf->vb.ts);
440 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
441 count, buf->count);
442 buf->vb.state = VIDEOBUF_DONE;
443 list_del(&buf->vb.queue);
444 wake_up(&buf->vb.done);
445 }
446 if (list_empty(&q->active))
447 del_timer(&q->timeout);
448 else
449 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
450 if (bc != 1)
451 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
452 __func__, bc);
453}
454
455int cx23885_sram_channel_setup(struct cx23885_dev *dev,
456 struct sram_channel *ch,
457 unsigned int bpl, u32 risc)
458{
459 unsigned int i, lines;
460 u32 cdt;
461
462 if (ch->cmds_start == 0) {
463 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
464 ch->name);
465 cx_write(ch->ptr1_reg, 0);
466 cx_write(ch->ptr2_reg, 0);
467 cx_write(ch->cnt2_reg, 0);
468 cx_write(ch->cnt1_reg, 0);
469 return 0;
470 } else {
471 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
472 ch->name);
473 }
474
475 bpl = (bpl + 7) & ~7;
476 cdt = ch->cdt;
477 lines = ch->fifo_size / bpl;
478 if (lines > 6)
479 lines = 6;
480 BUG_ON(lines < 2);
481
482 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
483 cx_write(8 + 4, 8);
484 cx_write(8 + 8, 0);
485
486
487 for (i = 0; i < lines; i++) {
488 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
489 ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
491 cx_write(cdt + 16*i + 4, 0);
492 cx_write(cdt + 16*i + 8, 0);
493 cx_write(cdt + 16*i + 12, 0);
494 }
495
496
497 if (ch->jumponly)
498 cx_write(ch->cmds_start + 0, 8);
499 else
500 cx_write(ch->cmds_start + 0, risc);
501 cx_write(ch->cmds_start + 4, 0);
502 cx_write(ch->cmds_start + 8, cdt);
503 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
504 cx_write(ch->cmds_start + 16, ch->ctrl_start);
505 if (ch->jumponly)
506 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
507 else
508 cx_write(ch->cmds_start + 20, 64 >> 2);
509 for (i = 24; i < 80; i += 4)
510 cx_write(ch->cmds_start + i, 0);
511
512
513 cx_write(ch->ptr1_reg, ch->fifo_start);
514 cx_write(ch->ptr2_reg, cdt);
515 cx_write(ch->cnt2_reg, (lines*16) >> 3);
516 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
517
518 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
519 dev->bridge,
520 ch->name,
521 bpl,
522 lines);
523
524 return 0;
525}
526
527void cx23885_sram_channel_dump(struct cx23885_dev *dev,
528 struct sram_channel *ch)
529{
530 static char *name[] = {
531 "init risc lo",
532 "init risc hi",
533 "cdt base",
534 "cdt size",
535 "iq base",
536 "iq size",
537 "risc pc lo",
538 "risc pc hi",
539 "iq wr ptr",
540 "iq rd ptr",
541 "cdt current",
542 "pci target lo",
543 "pci target hi",
544 "line / byte",
545 };
546 u32 risc;
547 unsigned int i, j, n;
548
549 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
550 dev->name, ch->name);
551 for (i = 0; i < ARRAY_SIZE(name); i++)
552 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
553 dev->name, name[i],
554 cx_read(ch->cmds_start + 4*i));
555
556 for (i = 0; i < 4; i++) {
557 risc = cx_read(ch->cmds_start + 4 * (i + 14));
558 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
559 cx23885_risc_decode(risc);
560 }
561 for (i = 0; i < (64 >> 2); i += n) {
562 risc = cx_read(ch->ctrl_start + 4 * i);
563
564
565 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
566 ch->ctrl_start + 4 * i, i);
567 n = cx23885_risc_decode(risc);
568 for (j = 1; j < n; j++) {
569 risc = cx_read(ch->ctrl_start + 4 * (i + j));
570 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
571 dev->name, i+j, risc, j);
572 }
573 }
574
575 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
576 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
577 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
578 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
579 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
580 dev->name, cx_read(ch->ptr1_reg));
581 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
582 dev->name, cx_read(ch->ptr2_reg));
583 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
584 dev->name, cx_read(ch->cnt1_reg));
585 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
586 dev->name, cx_read(ch->cnt2_reg));
587}
588
589static void cx23885_risc_disasm(struct cx23885_tsport *port,
590 struct btcx_riscmem *risc)
591{
592 struct cx23885_dev *dev = port->dev;
593 unsigned int i, j, n;
594
595 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
596 dev->name, risc->cpu, (unsigned long)risc->dma);
597 for (i = 0; i < (risc->size >> 2); i += n) {
598 printk(KERN_INFO "%s: %04d: ", dev->name, i);
599 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
600 for (j = 1; j < n; j++)
601 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
602 dev->name, i + j, risc->cpu[i + j], j);
603 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
604 break;
605 }
606}
607
608static void cx23885_shutdown(struct cx23885_dev *dev)
609{
610
611 cx_write(DEV_CNTRL2, 0);
612
613
614 cx_write(IR_CNTRL_REG, 0);
615
616
617 cx_write(VID_A_DMA_CTL, 0);
618 cx_write(VID_B_DMA_CTL, 0);
619 cx_write(VID_C_DMA_CTL, 0);
620
621
622 cx_write(AUD_INT_DMA_CTL, 0);
623 cx_write(AUD_EXT_DMA_CTL, 0);
624
625
626 cx_write(UART_CTL, 0);
627
628
629 cx23885_irq_disable_all(dev);
630 cx_write(VID_A_INT_MSK, 0);
631 cx_write(VID_B_INT_MSK, 0);
632 cx_write(VID_C_INT_MSK, 0);
633 cx_write(AUDIO_INT_INT_MSK, 0);
634 cx_write(AUDIO_EXT_INT_MSK, 0);
635
636}
637
638static void cx23885_reset(struct cx23885_dev *dev)
639{
640 dprintk(1, "%s()\n", __func__);
641
642 cx23885_shutdown(dev);
643
644 cx_write(PCI_INT_STAT, 0xffffffff);
645 cx_write(VID_A_INT_STAT, 0xffffffff);
646 cx_write(VID_B_INT_STAT, 0xffffffff);
647 cx_write(VID_C_INT_STAT, 0xffffffff);
648 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
649 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
650 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
651 cx_write(PAD_CTRL, 0x00500300);
652
653 mdelay(100);
654
655 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
656 720*4, 0);
657 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
658 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
659 188*4, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
661 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
662 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
663 188*4, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
665 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
666 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
667
668 cx23885_gpio_setup(dev);
669}
670
671
672static int cx23885_pci_quirks(struct cx23885_dev *dev)
673{
674 dprintk(1, "%s()\n", __func__);
675
676
677
678
679
680 if (dev->bridge == CX23885_BRIDGE_885)
681 cx_clear(RDR_TLCTL0, 1 << 4);
682
683 return 0;
684}
685
686static int get_resources(struct cx23885_dev *dev)
687{
688 if (request_mem_region(pci_resource_start(dev->pci, 0),
689 pci_resource_len(dev->pci, 0),
690 dev->name))
691 return 0;
692
693 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
694 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
695
696 return -EBUSY;
697}
698
699static void cx23885_timeout(unsigned long data);
700int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
701 u32 reg, u32 mask, u32 value);
702
703static int cx23885_init_tsport(struct cx23885_dev *dev,
704 struct cx23885_tsport *port, int portno)
705{
706 dprintk(1, "%s(portno=%d)\n", __func__, portno);
707
708
709 port->dma_ctl_val = 0x11;
710 port->ts_int_msk_val = 0x1111;
711 port->vld_misc_val = 0x0;
712 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
713
714 spin_lock_init(&port->slock);
715 port->dev = dev;
716 port->nr = portno;
717
718 INIT_LIST_HEAD(&port->mpegq.active);
719 INIT_LIST_HEAD(&port->mpegq.queued);
720 port->mpegq.timeout.function = cx23885_timeout;
721 port->mpegq.timeout.data = (unsigned long)port;
722 init_timer(&port->mpegq.timeout);
723
724 mutex_init(&port->frontends.lock);
725 INIT_LIST_HEAD(&port->frontends.felist);
726 port->frontends.active_fe_id = 0;
727
728
729
730
731
732 if (!port->num_frontends)
733 port->num_frontends = 1;
734
735 switch (portno) {
736 case 1:
737 port->reg_gpcnt = VID_B_GPCNT;
738 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
739 port->reg_dma_ctl = VID_B_DMA_CTL;
740 port->reg_lngth = VID_B_LNGTH;
741 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
742 port->reg_gen_ctrl = VID_B_GEN_CTL;
743 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
744 port->reg_sop_status = VID_B_SOP_STATUS;
745 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
746 port->reg_vld_misc = VID_B_VLD_MISC;
747 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
748 port->reg_src_sel = VID_B_SRC_SEL;
749 port->reg_ts_int_msk = VID_B_INT_MSK;
750 port->reg_ts_int_stat = VID_B_INT_STAT;
751 port->sram_chno = SRAM_CH03;
752 port->pci_irqmask = 0x02;
753 break;
754 case 2:
755 port->reg_gpcnt = VID_C_GPCNT;
756 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
757 port->reg_dma_ctl = VID_C_DMA_CTL;
758 port->reg_lngth = VID_C_LNGTH;
759 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
760 port->reg_gen_ctrl = VID_C_GEN_CTL;
761 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
762 port->reg_sop_status = VID_C_SOP_STATUS;
763 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
764 port->reg_vld_misc = VID_C_VLD_MISC;
765 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
766 port->reg_src_sel = 0;
767 port->reg_ts_int_msk = VID_C_INT_MSK;
768 port->reg_ts_int_stat = VID_C_INT_STAT;
769 port->sram_chno = SRAM_CH06;
770 port->pci_irqmask = 0x04;
771 break;
772 default:
773 BUG();
774 }
775
776 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
777 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
778
779 return 0;
780}
781
782static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
783{
784 switch (cx_read(RDR_CFG2) & 0xff) {
785 case 0x00:
786
787 dev->hwrevision = 0xa0;
788 break;
789 case 0x01:
790
791 dev->hwrevision = 0xa1;
792 break;
793 case 0x02:
794
795 dev->hwrevision = 0xb0;
796 break;
797 case 0x03:
798 if (dev->pci->device == 0x8880) {
799
800 dev->hwrevision = 0xc0;
801 } else {
802
803 dev->hwrevision = 0xa4;
804 }
805 break;
806 case 0x04:
807 if (dev->pci->device == 0x8880) {
808
809 dev->hwrevision = 0xd0;
810 } else {
811
812 dev->hwrevision = 0xa5;
813 }
814 break;
815 case 0x0e:
816
817 dev->hwrevision = 0xc0;
818 break;
819 case 0x0f:
820
821 dev->hwrevision = 0xb1;
822 break;
823 default:
824 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
825 __func__, dev->hwrevision);
826 }
827 if (dev->hwrevision)
828 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
829 __func__, dev->hwrevision);
830 else
831 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
832 __func__, dev->hwrevision);
833}
834
835
836struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
837{
838 struct v4l2_subdev *result = NULL;
839 struct v4l2_subdev *sd;
840
841 spin_lock(&dev->v4l2_dev.lock);
842 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
843 if (sd->grp_id == hw) {
844 result = sd;
845 break;
846 }
847 }
848 spin_unlock(&dev->v4l2_dev.lock);
849 return result;
850}
851
852static int cx23885_dev_setup(struct cx23885_dev *dev)
853{
854 int i;
855
856 spin_lock_init(&dev->pci_irqmask_lock);
857
858 mutex_init(&dev->lock);
859 mutex_init(&dev->gpio_lock);
860
861 atomic_inc(&dev->refcount);
862
863 dev->nr = cx23885_devcount++;
864 sprintf(dev->name, "cx23885[%d]", dev->nr);
865
866
867 if (dev->pci->device == 0x8880) {
868
869 dev->bridge = CX23885_BRIDGE_887;
870
871 dev->clk_freq = 25000000;
872 dev->sram_channels = cx23887_sram_channels;
873 } else
874 if (dev->pci->device == 0x8852) {
875 dev->bridge = CX23885_BRIDGE_885;
876
877 dev->clk_freq = 28000000;
878 dev->sram_channels = cx23885_sram_channels;
879 } else
880 BUG();
881
882 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
883 __func__, dev->bridge);
884
885
886 dev->board = UNSET;
887 if (card[dev->nr] < cx23885_bcount)
888 dev->board = card[dev->nr];
889 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
890 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
891 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
892 dev->board = cx23885_subids[i].card;
893 if (UNSET == dev->board) {
894 dev->board = CX23885_BOARD_UNKNOWN;
895 cx23885_card_list(dev);
896 }
897
898
899 if (cx23885_boards[dev->board].clk_freq > 0)
900 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
901
902 dev->pci_bus = dev->pci->bus->number;
903 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
904 cx23885_irq_add(dev, 0x001f00);
905 if (cx23885_boards[dev->board].cimax > 0)
906 cx23885_irq_add(dev, 0x01800000);
907
908
909 dev->i2c_bus[0].nr = 0;
910 dev->i2c_bus[0].dev = dev;
911 dev->i2c_bus[0].reg_stat = I2C1_STAT;
912 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
913 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
914 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
915 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
916 dev->i2c_bus[0].i2c_period = (0x9d << 24);
917
918
919 dev->i2c_bus[1].nr = 1;
920 dev->i2c_bus[1].dev = dev;
921 dev->i2c_bus[1].reg_stat = I2C2_STAT;
922 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
923 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
924 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
925 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
926 dev->i2c_bus[1].i2c_period = (0x9d << 24);
927
928
929 dev->i2c_bus[2].nr = 2;
930 dev->i2c_bus[2].dev = dev;
931 dev->i2c_bus[2].reg_stat = I2C3_STAT;
932 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
933 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
934 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
935 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
936 dev->i2c_bus[2].i2c_period = (0x07 << 24);
937
938 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
939 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
940 cx23885_init_tsport(dev, &dev->ts1, 1);
941
942 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
943 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
944 cx23885_init_tsport(dev, &dev->ts2, 2);
945
946 if (get_resources(dev) < 0) {
947 printk(KERN_ERR "CORE %s No more PCIe resources for "
948 "subsystem: %04x:%04x\n",
949 dev->name, dev->pci->subsystem_vendor,
950 dev->pci->subsystem_device);
951
952 cx23885_devcount--;
953 return -ENODEV;
954 }
955
956
957 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
958 pci_resource_len(dev->pci, 0));
959
960 dev->bmmio = (u8 __iomem *)dev->lmmio;
961
962 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
963 dev->name, dev->pci->subsystem_vendor,
964 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
965 dev->board, card[dev->nr] == dev->board ?
966 "insmod option" : "autodetected");
967
968 cx23885_pci_quirks(dev);
969
970
971 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
972 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
973 dev->radio_type = cx23885_boards[dev->board].radio_type;
974 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
975
976 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
977 __func__, dev->tuner_type, dev->tuner_addr);
978 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
979 __func__, dev->radio_type, dev->radio_addr);
980
981
982
983
984
985 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
986 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
987 cx23885_mc417_init(dev);
988
989
990 cx23885_reset(dev);
991
992 cx23885_i2c_register(&dev->i2c_bus[0]);
993 cx23885_i2c_register(&dev->i2c_bus[1]);
994 cx23885_i2c_register(&dev->i2c_bus[2]);
995 cx23885_card_setup(dev);
996 call_all(dev, core, s_power, 0);
997 cx23885_ir_init(dev);
998
999 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1000 if (cx23885_video_register(dev) < 0) {
1001 printk(KERN_ERR "%s() Failed to register analog "
1002 "video adapters on VID_A\n", __func__);
1003 }
1004 }
1005
1006 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1007 if (cx23885_dvb_register(&dev->ts1) < 0) {
1008 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1009 __func__);
1010 }
1011 } else
1012 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1013 if (cx23885_417_register(dev) < 0) {
1014 printk(KERN_ERR
1015 "%s() Failed to register 417 on VID_B\n",
1016 __func__);
1017 }
1018 }
1019
1020 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1021 if (cx23885_dvb_register(&dev->ts2) < 0) {
1022 printk(KERN_ERR
1023 "%s() Failed to register dvb on VID_C\n",
1024 __func__);
1025 }
1026 } else
1027 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1028 if (cx23885_417_register(dev) < 0) {
1029 printk(KERN_ERR
1030 "%s() Failed to register 417 on VID_C\n",
1031 __func__);
1032 }
1033 }
1034
1035 cx23885_dev_checkrevision(dev);
1036
1037 return 0;
1038}
1039
1040static void cx23885_dev_unregister(struct cx23885_dev *dev)
1041{
1042 release_mem_region(pci_resource_start(dev->pci, 0),
1043 pci_resource_len(dev->pci, 0));
1044
1045 if (!atomic_dec_and_test(&dev->refcount))
1046 return;
1047
1048 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1049 cx23885_video_unregister(dev);
1050
1051 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1052 cx23885_dvb_unregister(&dev->ts1);
1053
1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1055 cx23885_417_unregister(dev);
1056
1057 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1058 cx23885_dvb_unregister(&dev->ts2);
1059
1060 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1061 cx23885_417_unregister(dev);
1062
1063 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1064 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1065 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1066
1067 iounmap(dev->lmmio);
1068}
1069
1070static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1071 unsigned int offset, u32 sync_line,
1072 unsigned int bpl, unsigned int padding,
1073 unsigned int lines)
1074{
1075 struct scatterlist *sg;
1076 unsigned int line, todo;
1077
1078
1079 if (sync_line != NO_SYNC_LINE)
1080 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1081
1082
1083 sg = sglist;
1084 for (line = 0; line < lines; line++) {
1085 while (offset && offset >= sg_dma_len(sg)) {
1086 offset -= sg_dma_len(sg);
1087 sg++;
1088 }
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090
1091 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0);
1094 offset += bpl;
1095 } else {
1096
1097 todo = bpl;
1098 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0);
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
1104 sg++;
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0);
1110 todo -= sg_dma_len(sg);
1111 sg++;
1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0);
1116 offset += todo;
1117 }
1118 offset += padding;
1119 }
1120
1121 return rp;
1122}
1123
1124int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1128{
1129 u32 instructions, fields;
1130 __le32 *rp;
1131 int rc;
1132
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1138
1139
1140
1141
1142
1143
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1146 instructions += 2;
1147 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1148 if (rc < 0)
1149 return rc;
1150
1151
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines);
1159
1160
1161 risc->jmp = rp;
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163 return 0;
1164}
1165
1166static int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct btcx_riscmem *risc,
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
1170 unsigned int lines)
1171{
1172 u32 instructions;
1173 __le32 *rp;
1174 int rc;
1175
1176
1177
1178
1179
1180
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1182 instructions += 1;
1183
1184 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1185 if (rc < 0)
1186 return rc;
1187
1188
1189 rp = risc->cpu;
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1191
1192
1193 risc->jmp = rp;
1194 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1195 return 0;
1196}
1197
1198int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1199 u32 reg, u32 mask, u32 value)
1200{
1201 __le32 *rp;
1202 int rc;
1203
1204 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1205 if (rc < 0)
1206 return rc;
1207
1208
1209 rp = risc->cpu;
1210 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1211 *(rp++) = cpu_to_le32(reg);
1212 *(rp++) = cpu_to_le32(value);
1213 *(rp++) = cpu_to_le32(mask);
1214 *(rp++) = cpu_to_le32(RISC_JUMP);
1215 *(rp++) = cpu_to_le32(risc->dma);
1216 *(rp++) = cpu_to_le32(0);
1217 return 0;
1218}
1219
1220void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1221{
1222 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1223
1224 BUG_ON(in_interrupt());
1225 videobuf_waiton(q, &buf->vb, 0, 0);
1226 videobuf_dma_unmap(q->dev, dma);
1227 videobuf_dma_free(dma);
1228 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1229 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1230}
1231
1232static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1233{
1234 struct cx23885_dev *dev = port->dev;
1235
1236 dprintk(1, "%s() Register Dump\n", __func__);
1237 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1238 cx_read(DEV_CNTRL2));
1239 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1240 cx23885_irq_get_mask(dev));
1241 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1242 cx_read(AUDIO_INT_INT_MSK));
1243 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1244 cx_read(AUD_INT_DMA_CTL));
1245 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1246 cx_read(AUDIO_EXT_INT_MSK));
1247 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1248 cx_read(AUD_EXT_DMA_CTL));
1249 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1250 cx_read(PAD_CTRL));
1251 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1252 cx_read(ALT_PIN_OUT_SEL));
1253 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1254 cx_read(GPIO2));
1255 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1256 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1257 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1258 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1259 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1260 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1261 if (port->reg_src_sel)
1262 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1263 port->reg_src_sel, cx_read(port->reg_src_sel));
1264 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1265 port->reg_lngth, cx_read(port->reg_lngth));
1266 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1267 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1268 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1269 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1270 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1271 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1272 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1273 port->reg_sop_status, cx_read(port->reg_sop_status));
1274 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1275 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1276 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1277 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1278 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1279 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1280 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1281 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1282}
1283
1284static int cx23885_start_dma(struct cx23885_tsport *port,
1285 struct cx23885_dmaqueue *q,
1286 struct cx23885_buffer *buf)
1287{
1288 struct cx23885_dev *dev = port->dev;
1289 u32 reg;
1290
1291 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1292 buf->vb.width, buf->vb.height, buf->vb.field);
1293
1294
1295 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1296
1297
1298 cx23885_sram_channel_setup(dev,
1299 &dev->sram_channels[port->sram_chno],
1300 port->ts_packet_size, buf->risc.dma);
1301 if (debug > 5) {
1302 cx23885_sram_channel_dump(dev,
1303 &dev->sram_channels[port->sram_chno]);
1304 cx23885_risc_disasm(port, &buf->risc);
1305 }
1306
1307
1308 cx_write(port->reg_lngth, buf->vb.width);
1309
1310 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1311 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1312 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1313 __func__,
1314 cx23885_boards[dev->board].portb,
1315 cx23885_boards[dev->board].portc);
1316 return -EINVAL;
1317 }
1318
1319 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1320 cx23885_av_clk(dev, 0);
1321
1322 udelay(100);
1323
1324
1325 if (port->reg_src_sel)
1326 cx_write(port->reg_src_sel, port->src_sel_val);
1327
1328 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1329 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1330 cx_write(port->reg_vld_misc, port->vld_misc_val);
1331 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1332 udelay(100);
1333
1334
1335
1336 cx_write(port->reg_gpcnt_ctl, 3);
1337 q->count = 1;
1338
1339
1340 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1341 reg = cx_read(PAD_CTRL);
1342 reg &= ~0x3;
1343 cx_write(PAD_CTRL, reg);
1344 }
1345
1346
1347 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1348 reg = cx_read(PAD_CTRL);
1349 reg &= ~0x4;
1350 cx_write(PAD_CTRL, reg);
1351 }
1352
1353 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1354
1355 reg = cx_read(PAD_CTRL);
1356 reg = reg & ~0x1;
1357
1358
1359
1360 reg = reg | 0xa;
1361 cx_write(PAD_CTRL, reg);
1362
1363
1364 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1365 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1366 }
1367
1368 switch (dev->bridge) {
1369 case CX23885_BRIDGE_885:
1370 case CX23885_BRIDGE_887:
1371 case CX23885_BRIDGE_888:
1372
1373 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1374 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1375 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1376 cx23885_irq_add(dev, port->pci_irqmask);
1377 cx23885_irq_enable_all(dev);
1378 break;
1379 default:
1380 BUG();
1381 }
1382
1383 cx_set(DEV_CNTRL2, (1<<5));
1384
1385 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1386 cx23885_av_clk(dev, 1);
1387
1388 if (debug > 4)
1389 cx23885_tsport_reg_dump(port);
1390
1391 return 0;
1392}
1393
1394static int cx23885_stop_dma(struct cx23885_tsport *port)
1395{
1396 struct cx23885_dev *dev = port->dev;
1397 u32 reg;
1398
1399 dprintk(1, "%s()\n", __func__);
1400
1401
1402 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1403 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1404
1405 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1406
1407 reg = cx_read(PAD_CTRL);
1408
1409
1410 reg = reg | 0x1;
1411
1412
1413 reg = reg & ~0xa;
1414 cx_write(PAD_CTRL, reg);
1415 cx_write(port->reg_src_sel, 0);
1416 cx_write(port->reg_gen_ctrl, 8);
1417
1418 }
1419
1420 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1421 cx23885_av_clk(dev, 0);
1422
1423 return 0;
1424}
1425
1426int cx23885_restart_queue(struct cx23885_tsport *port,
1427 struct cx23885_dmaqueue *q)
1428{
1429 struct cx23885_dev *dev = port->dev;
1430 struct cx23885_buffer *buf;
1431
1432 dprintk(5, "%s()\n", __func__);
1433 if (list_empty(&q->active)) {
1434 struct cx23885_buffer *prev;
1435 prev = NULL;
1436
1437 dprintk(5, "%s() queue is empty\n", __func__);
1438
1439 for (;;) {
1440 if (list_empty(&q->queued))
1441 return 0;
1442 buf = list_entry(q->queued.next, struct cx23885_buffer,
1443 vb.queue);
1444 if (NULL == prev) {
1445 list_del(&buf->vb.queue);
1446 list_add_tail(&buf->vb.queue, &q->active);
1447 cx23885_start_dma(port, q, buf);
1448 buf->vb.state = VIDEOBUF_ACTIVE;
1449 buf->count = q->count++;
1450 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1451 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1452 buf, buf->vb.i);
1453
1454 } else if (prev->vb.width == buf->vb.width &&
1455 prev->vb.height == buf->vb.height &&
1456 prev->fmt == buf->fmt) {
1457 list_del(&buf->vb.queue);
1458 list_add_tail(&buf->vb.queue, &q->active);
1459 buf->vb.state = VIDEOBUF_ACTIVE;
1460 buf->count = q->count++;
1461 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1462
1463 prev->risc.jmp[2] = cpu_to_le32(0);
1464 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1465 buf, buf->vb.i);
1466 } else {
1467 return 0;
1468 }
1469 prev = buf;
1470 }
1471 return 0;
1472 }
1473
1474 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1475 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1476 buf, buf->vb.i);
1477 cx23885_start_dma(port, q, buf);
1478 list_for_each_entry(buf, &q->active, vb.queue)
1479 buf->count = q->count++;
1480 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1481 return 0;
1482}
1483
1484
1485
1486int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1487 struct cx23885_buffer *buf, enum v4l2_field field)
1488{
1489 struct cx23885_dev *dev = port->dev;
1490 int size = port->ts_packet_size * port->ts_packet_count;
1491 int rc;
1492
1493 dprintk(1, "%s: %p\n", __func__, buf);
1494 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1495 return -EINVAL;
1496
1497 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1498 buf->vb.width = port->ts_packet_size;
1499 buf->vb.height = port->ts_packet_count;
1500 buf->vb.size = size;
1501 buf->vb.field = field ;
1502
1503 rc = videobuf_iolock(q, &buf->vb, NULL);
1504 if (0 != rc)
1505 goto fail;
1506 cx23885_risc_databuffer(dev->pci, &buf->risc,
1507 videobuf_to_dma(&buf->vb)->sglist,
1508 buf->vb.width, buf->vb.height);
1509 }
1510 buf->vb.state = VIDEOBUF_PREPARED;
1511 return 0;
1512
1513 fail:
1514 cx23885_free_buffer(q, buf);
1515 return rc;
1516}
1517
1518void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1519{
1520 struct cx23885_buffer *prev;
1521 struct cx23885_dev *dev = port->dev;
1522 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1523
1524
1525 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1526 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1527 buf->risc.jmp[2] = cpu_to_le32(0);
1528
1529 if (list_empty(&cx88q->active)) {
1530 dprintk(1, "queue is empty - first active\n");
1531 list_add_tail(&buf->vb.queue, &cx88q->active);
1532 cx23885_start_dma(port, cx88q, buf);
1533 buf->vb.state = VIDEOBUF_ACTIVE;
1534 buf->count = cx88q->count++;
1535 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1536 dprintk(1, "[%p/%d] %s - first active\n",
1537 buf, buf->vb.i, __func__);
1538 } else {
1539 dprintk(1, "queue is not empty - append to active\n");
1540 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1541 vb.queue);
1542 list_add_tail(&buf->vb.queue, &cx88q->active);
1543 buf->vb.state = VIDEOBUF_ACTIVE;
1544 buf->count = cx88q->count++;
1545 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1546 prev->risc.jmp[2] = cpu_to_le32(0);
1547 dprintk(1, "[%p/%d] %s - append to active\n",
1548 buf, buf->vb.i, __func__);
1549 }
1550}
1551
1552
1553
1554static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1555 int restart)
1556{
1557 struct cx23885_dev *dev = port->dev;
1558 struct cx23885_dmaqueue *q = &port->mpegq;
1559 struct cx23885_buffer *buf;
1560 unsigned long flags;
1561
1562 spin_lock_irqsave(&port->slock, flags);
1563 while (!list_empty(&q->active)) {
1564 buf = list_entry(q->active.next, struct cx23885_buffer,
1565 vb.queue);
1566 list_del(&buf->vb.queue);
1567 buf->vb.state = VIDEOBUF_ERROR;
1568 wake_up(&buf->vb.done);
1569 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1570 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1571 }
1572 if (restart) {
1573 dprintk(1, "restarting queue\n");
1574 cx23885_restart_queue(port, q);
1575 }
1576 spin_unlock_irqrestore(&port->slock, flags);
1577}
1578
1579void cx23885_cancel_buffers(struct cx23885_tsport *port)
1580{
1581 struct cx23885_dev *dev = port->dev;
1582 struct cx23885_dmaqueue *q = &port->mpegq;
1583
1584 dprintk(1, "%s()\n", __func__);
1585 del_timer_sync(&q->timeout);
1586 cx23885_stop_dma(port);
1587 do_cancel_buffers(port, "cancel", 0);
1588}
1589
1590static void cx23885_timeout(unsigned long data)
1591{
1592 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1593 struct cx23885_dev *dev = port->dev;
1594
1595 dprintk(1, "%s()\n", __func__);
1596
1597 if (debug > 5)
1598 cx23885_sram_channel_dump(dev,
1599 &dev->sram_channels[port->sram_chno]);
1600
1601 cx23885_stop_dma(port);
1602 do_cancel_buffers(port, "timeout", 1);
1603}
1604
1605int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1606{
1607
1608 struct cx23885_tsport *port = &dev->ts1;
1609 int count = 0;
1610 int handled = 0;
1611
1612 if (status == 0)
1613 return handled;
1614
1615 count = cx_read(port->reg_gpcnt);
1616 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1617 status, cx_read(port->reg_ts_int_msk), count);
1618
1619 if ((status & VID_B_MSK_BAD_PKT) ||
1620 (status & VID_B_MSK_OPC_ERR) ||
1621 (status & VID_B_MSK_VBI_OPC_ERR) ||
1622 (status & VID_B_MSK_SYNC) ||
1623 (status & VID_B_MSK_VBI_SYNC) ||
1624 (status & VID_B_MSK_OF) ||
1625 (status & VID_B_MSK_VBI_OF)) {
1626 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1627 "= 0x%x\n", dev->name, status);
1628 if (status & VID_B_MSK_BAD_PKT)
1629 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1630 if (status & VID_B_MSK_OPC_ERR)
1631 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1632 if (status & VID_B_MSK_VBI_OPC_ERR)
1633 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1634 if (status & VID_B_MSK_SYNC)
1635 dprintk(1, " VID_B_MSK_SYNC\n");
1636 if (status & VID_B_MSK_VBI_SYNC)
1637 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1638 if (status & VID_B_MSK_OF)
1639 dprintk(1, " VID_B_MSK_OF\n");
1640 if (status & VID_B_MSK_VBI_OF)
1641 dprintk(1, " VID_B_MSK_VBI_OF\n");
1642
1643 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1644 cx23885_sram_channel_dump(dev,
1645 &dev->sram_channels[port->sram_chno]);
1646 cx23885_417_check_encoder(dev);
1647 } else if (status & VID_B_MSK_RISCI1) {
1648 dprintk(7, " VID_B_MSK_RISCI1\n");
1649 spin_lock(&port->slock);
1650 cx23885_wakeup(port, &port->mpegq, count);
1651 spin_unlock(&port->slock);
1652 } else if (status & VID_B_MSK_RISCI2) {
1653 dprintk(7, " VID_B_MSK_RISCI2\n");
1654 spin_lock(&port->slock);
1655 cx23885_restart_queue(port, &port->mpegq);
1656 spin_unlock(&port->slock);
1657 }
1658 if (status) {
1659 cx_write(port->reg_ts_int_stat, status);
1660 handled = 1;
1661 }
1662
1663 return handled;
1664}
1665
1666static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1667{
1668 struct cx23885_dev *dev = port->dev;
1669 int handled = 0;
1670 u32 count;
1671
1672 if ((status & VID_BC_MSK_OPC_ERR) ||
1673 (status & VID_BC_MSK_BAD_PKT) ||
1674 (status & VID_BC_MSK_SYNC) ||
1675 (status & VID_BC_MSK_OF)) {
1676
1677 if (status & VID_BC_MSK_OPC_ERR)
1678 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1679 VID_BC_MSK_OPC_ERR);
1680
1681 if (status & VID_BC_MSK_BAD_PKT)
1682 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1683 VID_BC_MSK_BAD_PKT);
1684
1685 if (status & VID_BC_MSK_SYNC)
1686 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1687 VID_BC_MSK_SYNC);
1688
1689 if (status & VID_BC_MSK_OF)
1690 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1691 VID_BC_MSK_OF);
1692
1693 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1694
1695 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1696 cx23885_sram_channel_dump(dev,
1697 &dev->sram_channels[port->sram_chno]);
1698
1699 } else if (status & VID_BC_MSK_RISCI1) {
1700
1701 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1702
1703 spin_lock(&port->slock);
1704 count = cx_read(port->reg_gpcnt);
1705 cx23885_wakeup(port, &port->mpegq, count);
1706 spin_unlock(&port->slock);
1707
1708 } else if (status & VID_BC_MSK_RISCI2) {
1709
1710 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1711
1712 spin_lock(&port->slock);
1713 cx23885_restart_queue(port, &port->mpegq);
1714 spin_unlock(&port->slock);
1715
1716 }
1717 if (status) {
1718 cx_write(port->reg_ts_int_stat, status);
1719 handled = 1;
1720 }
1721
1722 return handled;
1723}
1724
1725static irqreturn_t cx23885_irq(int irq, void *dev_id)
1726{
1727 struct cx23885_dev *dev = dev_id;
1728 struct cx23885_tsport *ts1 = &dev->ts1;
1729 struct cx23885_tsport *ts2 = &dev->ts2;
1730 u32 pci_status, pci_mask;
1731 u32 vida_status, vida_mask;
1732 u32 ts1_status, ts1_mask;
1733 u32 ts2_status, ts2_mask;
1734 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1735 bool subdev_handled;
1736
1737 pci_status = cx_read(PCI_INT_STAT);
1738 pci_mask = cx23885_irq_get_mask(dev);
1739 vida_status = cx_read(VID_A_INT_STAT);
1740 vida_mask = cx_read(VID_A_INT_MSK);
1741 ts1_status = cx_read(VID_B_INT_STAT);
1742 ts1_mask = cx_read(VID_B_INT_MSK);
1743 ts2_status = cx_read(VID_C_INT_STAT);
1744 ts2_mask = cx_read(VID_C_INT_MSK);
1745
1746 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1747 goto out;
1748
1749 vida_count = cx_read(VID_A_GPCNT);
1750 ts1_count = cx_read(ts1->reg_gpcnt);
1751 ts2_count = cx_read(ts2->reg_gpcnt);
1752 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1753 pci_status, pci_mask);
1754 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1755 vida_status, vida_mask, vida_count);
1756 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1757 ts1_status, ts1_mask, ts1_count);
1758 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1759 ts2_status, ts2_mask, ts2_count);
1760
1761 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1762 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1763 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1764 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1765 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1766 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1767
1768 if (pci_status & PCI_MSK_RISC_RD)
1769 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1770 PCI_MSK_RISC_RD);
1771
1772 if (pci_status & PCI_MSK_RISC_WR)
1773 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1774 PCI_MSK_RISC_WR);
1775
1776 if (pci_status & PCI_MSK_AL_RD)
1777 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1778 PCI_MSK_AL_RD);
1779
1780 if (pci_status & PCI_MSK_AL_WR)
1781 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1782 PCI_MSK_AL_WR);
1783
1784 if (pci_status & PCI_MSK_APB_DMA)
1785 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1786 PCI_MSK_APB_DMA);
1787
1788 if (pci_status & PCI_MSK_VID_C)
1789 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1790 PCI_MSK_VID_C);
1791
1792 if (pci_status & PCI_MSK_VID_B)
1793 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1794 PCI_MSK_VID_B);
1795
1796 if (pci_status & PCI_MSK_VID_A)
1797 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1798 PCI_MSK_VID_A);
1799
1800 if (pci_status & PCI_MSK_AUD_INT)
1801 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1802 PCI_MSK_AUD_INT);
1803
1804 if (pci_status & PCI_MSK_AUD_EXT)
1805 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1806 PCI_MSK_AUD_EXT);
1807
1808 if (pci_status & PCI_MSK_GPIO0)
1809 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1810 PCI_MSK_GPIO0);
1811
1812 if (pci_status & PCI_MSK_GPIO1)
1813 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1814 PCI_MSK_GPIO1);
1815
1816 if (pci_status & PCI_MSK_AV_CORE)
1817 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1818 PCI_MSK_AV_CORE);
1819
1820 if (pci_status & PCI_MSK_IR)
1821 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1822 PCI_MSK_IR);
1823 }
1824
1825 if (cx23885_boards[dev->board].cimax > 0 &&
1826 ((pci_status & PCI_MSK_GPIO0) ||
1827 (pci_status & PCI_MSK_GPIO1))) {
1828
1829 if (cx23885_boards[dev->board].cimax > 0)
1830 handled += netup_ci_slot_status(dev, pci_status);
1831
1832 }
1833
1834 if (ts1_status) {
1835 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1836 handled += cx23885_irq_ts(ts1, ts1_status);
1837 else
1838 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1839 handled += cx23885_irq_417(dev, ts1_status);
1840 }
1841
1842 if (ts2_status) {
1843 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1844 handled += cx23885_irq_ts(ts2, ts2_status);
1845 else
1846 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1847 handled += cx23885_irq_417(dev, ts2_status);
1848 }
1849
1850 if (vida_status)
1851 handled += cx23885_video_irq(dev, vida_status);
1852
1853 if (pci_status & PCI_MSK_IR) {
1854 subdev_handled = false;
1855 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1856 pci_status, &subdev_handled);
1857 if (subdev_handled)
1858 handled++;
1859 }
1860
1861 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1862 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1863 if (!schedule_work(&dev->cx25840_work))
1864 printk(KERN_ERR "%s: failed to set up deferred work for"
1865 " AV Core/IR interrupt. Interrupt is disabled"
1866 " and won't be re-enabled\n", dev->name);
1867 handled++;
1868 }
1869
1870 if (handled)
1871 cx_write(PCI_INT_STAT, pci_status);
1872out:
1873 return IRQ_RETVAL(handled);
1874}
1875
1876static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1877 unsigned int notification, void *arg)
1878{
1879 struct cx23885_dev *dev;
1880
1881 if (sd == NULL)
1882 return;
1883
1884 dev = to_cx23885(sd->v4l2_dev);
1885
1886 switch (notification) {
1887 case V4L2_SUBDEV_IR_RX_NOTIFY:
1888 if (sd == dev->sd_ir)
1889 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1890 break;
1891 case V4L2_SUBDEV_IR_TX_NOTIFY:
1892 if (sd == dev->sd_ir)
1893 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1894 break;
1895 }
1896}
1897
1898static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1899{
1900 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1901 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1902 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1903 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1904}
1905
1906static inline int encoder_on_portb(struct cx23885_dev *dev)
1907{
1908 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1909}
1910
1911static inline int encoder_on_portc(struct cx23885_dev *dev)
1912{
1913 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1929{
1930 if (mask & 0x7)
1931 cx_set(GP0_IO, mask & 0x7);
1932
1933 if (mask & 0x0007fff8) {
1934 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1935 printk(KERN_ERR
1936 "%s: Setting GPIO on encoder ports\n",
1937 dev->name);
1938 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1939 }
1940
1941
1942 if (mask & 0x00f80000)
1943 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1944}
1945
1946void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1947{
1948 if (mask & 0x00000007)
1949 cx_clear(GP0_IO, mask & 0x7);
1950
1951 if (mask & 0x0007fff8) {
1952 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1953 printk(KERN_ERR
1954 "%s: Clearing GPIO moving on encoder ports\n",
1955 dev->name);
1956 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1957 }
1958
1959
1960 if (mask & 0x00f80000)
1961 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1962}
1963
1964u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1965{
1966 if (mask & 0x00000007)
1967 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1968
1969 if (mask & 0x0007fff8) {
1970 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1971 printk(KERN_ERR
1972 "%s: Reading GPIO moving on encoder ports\n",
1973 dev->name);
1974 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1975 }
1976
1977
1978 if (mask & 0x00f80000)
1979 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1980
1981 return 0;
1982}
1983
1984void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1985{
1986 if ((mask & 0x00000007) && asoutput)
1987 cx_set(GP0_IO, (mask & 0x7) << 16);
1988 else if ((mask & 0x00000007) && !asoutput)
1989 cx_clear(GP0_IO, (mask & 0x7) << 16);
1990
1991 if (mask & 0x0007fff8) {
1992 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1993 printk(KERN_ERR
1994 "%s: Enabling GPIO on encoder ports\n",
1995 dev->name);
1996 }
1997
1998
1999 if ((mask & 0x0007fff8) && asoutput)
2000 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2001
2002 else if ((mask & 0x0007fff8) && !asoutput)
2003 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2004
2005
2006}
2007
2008static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2009 const struct pci_device_id *pci_id)
2010{
2011 struct cx23885_dev *dev;
2012 int err;
2013
2014 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2015 if (NULL == dev)
2016 return -ENOMEM;
2017
2018 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2019 if (err < 0)
2020 goto fail_free;
2021
2022
2023 cx23885_v4l2_dev_notify_init(dev);
2024
2025
2026 dev->pci = pci_dev;
2027 if (pci_enable_device(pci_dev)) {
2028 err = -EIO;
2029 goto fail_unreg;
2030 }
2031
2032 if (cx23885_dev_setup(dev) < 0) {
2033 err = -EINVAL;
2034 goto fail_unreg;
2035 }
2036
2037
2038 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2039 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2040 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2041 "latency: %d, mmio: 0x%llx\n", dev->name,
2042 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2043 dev->pci_lat,
2044 (unsigned long long)pci_resource_start(pci_dev, 0));
2045
2046 pci_set_master(pci_dev);
2047 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2048 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2049 err = -EIO;
2050 goto fail_irq;
2051 }
2052
2053 if (!pci_enable_msi(pci_dev))
2054 err = request_irq(pci_dev->irq, cx23885_irq,
2055 IRQF_DISABLED, dev->name, dev);
2056 else
2057 err = request_irq(pci_dev->irq, cx23885_irq,
2058 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2059 if (err < 0) {
2060 printk(KERN_ERR "%s: can't get IRQ %d\n",
2061 dev->name, pci_dev->irq);
2062 goto fail_irq;
2063 }
2064
2065 switch (dev->board) {
2066 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2067 cx23885_irq_add_enable(dev, 0x01800000);
2068 break;
2069 }
2070
2071
2072
2073
2074
2075
2076 cx23885_ir_pci_int_enable(dev);
2077 cx23885_input_init(dev);
2078
2079 return 0;
2080
2081fail_irq:
2082 cx23885_dev_unregister(dev);
2083fail_unreg:
2084 v4l2_device_unregister(&dev->v4l2_dev);
2085fail_free:
2086 kfree(dev);
2087 return err;
2088}
2089
2090static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2091{
2092 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2093 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2094
2095 cx23885_input_fini(dev);
2096 cx23885_ir_fini(dev);
2097
2098 cx23885_shutdown(dev);
2099
2100 pci_disable_device(pci_dev);
2101
2102
2103 free_irq(pci_dev->irq, dev);
2104 pci_disable_msi(pci_dev);
2105
2106 cx23885_dev_unregister(dev);
2107 v4l2_device_unregister(v4l2_dev);
2108 kfree(dev);
2109}
2110
2111static struct pci_device_id cx23885_pci_tbl[] = {
2112 {
2113
2114 .vendor = 0x14f1,
2115 .device = 0x8852,
2116 .subvendor = PCI_ANY_ID,
2117 .subdevice = PCI_ANY_ID,
2118 }, {
2119
2120 .vendor = 0x14f1,
2121 .device = 0x8880,
2122 .subvendor = PCI_ANY_ID,
2123 .subdevice = PCI_ANY_ID,
2124 }, {
2125
2126 }
2127};
2128MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2129
2130static struct pci_driver cx23885_pci_driver = {
2131 .name = "cx23885",
2132 .id_table = cx23885_pci_tbl,
2133 .probe = cx23885_initdev,
2134 .remove = __devexit_p(cx23885_finidev),
2135
2136 .suspend = NULL,
2137 .resume = NULL,
2138};
2139
2140static int __init cx23885_init(void)
2141{
2142 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2143 (CX23885_VERSION_CODE >> 16) & 0xff,
2144 (CX23885_VERSION_CODE >> 8) & 0xff,
2145 CX23885_VERSION_CODE & 0xff);
2146#ifdef SNAPSHOT
2147 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2148 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2149#endif
2150 return pci_register_driver(&cx23885_pci_driver);
2151}
2152
2153static void __exit cx23885_fini(void)
2154{
2155 pci_unregister_driver(&cx23885_pci_driver);
2156}
2157
2158module_init(cx23885_init);
2159module_exit(cx23885_fini);
2160
2161
2162