1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kmod.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/div64.h>
28#include <linux/firmware.h>
29
30#include "cx23885.h"
31#include "cimax2.h"
32#include "altera-ci.h"
33#include "cx23888-ir.h"
34#include "cx23885-ir.h"
35#include "cx23885-av.h"
36#include "cx23885-input.h"
37
38MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40MODULE_LICENSE("GPL");
41MODULE_VERSION(CX23885_VERSION);
42
43static unsigned int debug;
44module_param(debug, int, 0644);
45MODULE_PARM_DESC(debug, "enable debug messages");
46
47static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48module_param_array(card, int, NULL, 0444);
49MODULE_PARM_DESC(card, "card type");
50
51#define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 } while (0)
55
56static unsigned int cx23885_devcount;
57
58#define NO_SYNC_LINE (-1U)
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80static struct sram_channel cx23885_sram_channels[] = {
81 [SRAM_CH01] = {
82 .name = "VID A",
83 .cmds_start = 0x10000,
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189};
190
191static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300};
301
302static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303{
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310}
311
312void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313{
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321}
322
323void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324{
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334}
335
336static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337{
338 cx23885_irq_enable(dev, 0xffffffff);
339}
340
341void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342{
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349}
350
351static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352{
353 cx23885_irq_disable(dev, 0xffffffff);
354}
355
356void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357{
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365}
366
367static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368{
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377}
378
379static int cx23885_risc_decode(u32 risc)
380{
381 static char *instr[16] = {
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
392 };
393 static int incr[16] = {
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 if (risc & (1 << (i + 12)))
414 printk(" %s", bits[i]);
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417}
418
419static void cx23885_wakeup(struct cx23885_tsport *port,
420 struct cx23885_dmaqueue *q, u32 count)
421{
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
424
425 if (list_empty(&q->active))
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433 count, q->count);
434 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
436}
437
438int cx23885_sram_channel_setup(struct cx23885_dev *dev,
439 struct sram_channel *ch,
440 unsigned int bpl, u32 risc)
441{
442 unsigned int i, lines;
443 u32 cdt;
444
445 if (ch->cmds_start == 0) {
446 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
447 ch->name);
448 cx_write(ch->ptr1_reg, 0);
449 cx_write(ch->ptr2_reg, 0);
450 cx_write(ch->cnt2_reg, 0);
451 cx_write(ch->cnt1_reg, 0);
452 return 0;
453 } else {
454 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
455 ch->name);
456 }
457
458 bpl = (bpl + 7) & ~7;
459 cdt = ch->cdt;
460 lines = ch->fifo_size / bpl;
461 if (lines > 6)
462 lines = 6;
463 BUG_ON(lines < 2);
464
465 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466 cx_write(8 + 4, 12);
467 cx_write(8 + 8, 0);
468
469
470 for (i = 0; i < lines; i++) {
471 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
472 ch->fifo_start + bpl*i);
473 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i + 4, 0);
475 cx_write(cdt + 16*i + 8, 0);
476 cx_write(cdt + 16*i + 12, 0);
477 }
478
479
480 if (ch->jumponly)
481 cx_write(ch->cmds_start + 0, 8);
482 else
483 cx_write(ch->cmds_start + 0, risc);
484 cx_write(ch->cmds_start + 4, 0);
485 cx_write(ch->cmds_start + 8, cdt);
486 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487 cx_write(ch->cmds_start + 16, ch->ctrl_start);
488 if (ch->jumponly)
489 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
490 else
491 cx_write(ch->cmds_start + 20, 64 >> 2);
492 for (i = 24; i < 80; i += 4)
493 cx_write(ch->cmds_start + i, 0);
494
495
496 cx_write(ch->ptr1_reg, ch->fifo_start);
497 cx_write(ch->ptr2_reg, cdt);
498 cx_write(ch->cnt2_reg, (lines*16) >> 3);
499 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
500
501 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
502 dev->bridge,
503 ch->name,
504 bpl,
505 lines);
506
507 return 0;
508}
509
510void cx23885_sram_channel_dump(struct cx23885_dev *dev,
511 struct sram_channel *ch)
512{
513 static char *name[] = {
514 "init risc lo",
515 "init risc hi",
516 "cdt base",
517 "cdt size",
518 "iq base",
519 "iq size",
520 "risc pc lo",
521 "risc pc hi",
522 "iq wr ptr",
523 "iq rd ptr",
524 "cdt current",
525 "pci target lo",
526 "pci target hi",
527 "line / byte",
528 };
529 u32 risc;
530 unsigned int i, j, n;
531
532 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
533 dev->name, ch->name);
534 for (i = 0; i < ARRAY_SIZE(name); i++)
535 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
536 dev->name, name[i],
537 cx_read(ch->cmds_start + 4*i));
538
539 for (i = 0; i < 4; i++) {
540 risc = cx_read(ch->cmds_start + 4 * (i + 14));
541 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
542 cx23885_risc_decode(risc);
543 }
544 for (i = 0; i < (64 >> 2); i += n) {
545 risc = cx_read(ch->ctrl_start + 4 * i);
546
547
548 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
549 ch->ctrl_start + 4 * i, i);
550 n = cx23885_risc_decode(risc);
551 for (j = 1; j < n; j++) {
552 risc = cx_read(ch->ctrl_start + 4 * (i + j));
553 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
554 dev->name, i+j, risc, j);
555 }
556 }
557
558 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
559 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
560 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
561 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
562 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
563 dev->name, cx_read(ch->ptr1_reg));
564 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
565 dev->name, cx_read(ch->ptr2_reg));
566 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
567 dev->name, cx_read(ch->cnt1_reg));
568 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
569 dev->name, cx_read(ch->cnt2_reg));
570}
571
572static void cx23885_risc_disasm(struct cx23885_tsport *port,
573 struct cx23885_riscmem *risc)
574{
575 struct cx23885_dev *dev = port->dev;
576 unsigned int i, j, n;
577
578 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
579 dev->name, risc->cpu, (unsigned long)risc->dma);
580 for (i = 0; i < (risc->size >> 2); i += n) {
581 printk(KERN_INFO "%s: %04d: ", dev->name, i);
582 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
583 for (j = 1; j < n; j++)
584 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
585 dev->name, i + j, risc->cpu[i + j], j);
586 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
587 break;
588 }
589}
590
591static void cx23885_shutdown(struct cx23885_dev *dev)
592{
593
594 cx_write(DEV_CNTRL2, 0);
595
596
597 cx_write(IR_CNTRL_REG, 0);
598
599
600 cx_write(VID_A_DMA_CTL, 0);
601 cx_write(VID_B_DMA_CTL, 0);
602 cx_write(VID_C_DMA_CTL, 0);
603
604
605 cx_write(AUD_INT_DMA_CTL, 0);
606 cx_write(AUD_EXT_DMA_CTL, 0);
607
608
609 cx_write(UART_CTL, 0);
610
611
612 cx23885_irq_disable_all(dev);
613 cx_write(VID_A_INT_MSK, 0);
614 cx_write(VID_B_INT_MSK, 0);
615 cx_write(VID_C_INT_MSK, 0);
616 cx_write(AUDIO_INT_INT_MSK, 0);
617 cx_write(AUDIO_EXT_INT_MSK, 0);
618
619}
620
621static void cx23885_reset(struct cx23885_dev *dev)
622{
623 dprintk(1, "%s()\n", __func__);
624
625 cx23885_shutdown(dev);
626
627 cx_write(PCI_INT_STAT, 0xffffffff);
628 cx_write(VID_A_INT_STAT, 0xffffffff);
629 cx_write(VID_B_INT_STAT, 0xffffffff);
630 cx_write(VID_C_INT_STAT, 0xffffffff);
631 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
634 cx_write(PAD_CTRL, 0x00500300);
635
636 mdelay(100);
637
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639 720*4, 0);
640 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642 188*4, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646 188*4, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
650
651 cx23885_gpio_setup(dev);
652}
653
654
655static int cx23885_pci_quirks(struct cx23885_dev *dev)
656{
657 dprintk(1, "%s()\n", __func__);
658
659
660
661
662
663 if (dev->bridge == CX23885_BRIDGE_885)
664 cx_clear(RDR_TLCTL0, 1 << 4);
665
666 return 0;
667}
668
669static int get_resources(struct cx23885_dev *dev)
670{
671 if (request_mem_region(pci_resource_start(dev->pci, 0),
672 pci_resource_len(dev->pci, 0),
673 dev->name))
674 return 0;
675
676 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
677 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
678
679 return -EBUSY;
680}
681
682static int cx23885_init_tsport(struct cx23885_dev *dev,
683 struct cx23885_tsport *port, int portno)
684{
685 dprintk(1, "%s(portno=%d)\n", __func__, portno);
686
687
688 port->dma_ctl_val = 0x11;
689 port->ts_int_msk_val = 0x1111;
690 port->vld_misc_val = 0x0;
691 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
692
693 spin_lock_init(&port->slock);
694 port->dev = dev;
695 port->nr = portno;
696
697 INIT_LIST_HEAD(&port->mpegq.active);
698 mutex_init(&port->frontends.lock);
699 INIT_LIST_HEAD(&port->frontends.felist);
700 port->frontends.active_fe_id = 0;
701
702
703
704
705
706 if (!port->num_frontends)
707 port->num_frontends = 1;
708
709 switch (portno) {
710 case 1:
711 port->reg_gpcnt = VID_B_GPCNT;
712 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
713 port->reg_dma_ctl = VID_B_DMA_CTL;
714 port->reg_lngth = VID_B_LNGTH;
715 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
716 port->reg_gen_ctrl = VID_B_GEN_CTL;
717 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
718 port->reg_sop_status = VID_B_SOP_STATUS;
719 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720 port->reg_vld_misc = VID_B_VLD_MISC;
721 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
722 port->reg_src_sel = VID_B_SRC_SEL;
723 port->reg_ts_int_msk = VID_B_INT_MSK;
724 port->reg_ts_int_stat = VID_B_INT_STAT;
725 port->sram_chno = SRAM_CH03;
726 port->pci_irqmask = 0x02;
727 break;
728 case 2:
729 port->reg_gpcnt = VID_C_GPCNT;
730 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
731 port->reg_dma_ctl = VID_C_DMA_CTL;
732 port->reg_lngth = VID_C_LNGTH;
733 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
734 port->reg_gen_ctrl = VID_C_GEN_CTL;
735 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
736 port->reg_sop_status = VID_C_SOP_STATUS;
737 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738 port->reg_vld_misc = VID_C_VLD_MISC;
739 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
740 port->reg_src_sel = 0;
741 port->reg_ts_int_msk = VID_C_INT_MSK;
742 port->reg_ts_int_stat = VID_C_INT_STAT;
743 port->sram_chno = SRAM_CH06;
744 port->pci_irqmask = 0x04;
745 break;
746 default:
747 BUG();
748 }
749
750 return 0;
751}
752
753static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754{
755 switch (cx_read(RDR_CFG2) & 0xff) {
756 case 0x00:
757
758 dev->hwrevision = 0xa0;
759 break;
760 case 0x01:
761
762 dev->hwrevision = 0xa1;
763 break;
764 case 0x02:
765
766 dev->hwrevision = 0xb0;
767 break;
768 case 0x03:
769 if (dev->pci->device == 0x8880) {
770
771 dev->hwrevision = 0xc0;
772 } else {
773
774 dev->hwrevision = 0xa4;
775 }
776 break;
777 case 0x04:
778 if (dev->pci->device == 0x8880) {
779
780 dev->hwrevision = 0xd0;
781 } else {
782
783 dev->hwrevision = 0xa5;
784 }
785 break;
786 case 0x0e:
787
788 dev->hwrevision = 0xc0;
789 break;
790 case 0x0f:
791
792 dev->hwrevision = 0xb1;
793 break;
794 default:
795 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
796 __func__, dev->hwrevision);
797 }
798 if (dev->hwrevision)
799 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
800 __func__, dev->hwrevision);
801 else
802 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
803 __func__, dev->hwrevision);
804}
805
806
807struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808{
809 struct v4l2_subdev *result = NULL;
810 struct v4l2_subdev *sd;
811
812 spin_lock(&dev->v4l2_dev.lock);
813 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814 if (sd->grp_id == hw) {
815 result = sd;
816 break;
817 }
818 }
819 spin_unlock(&dev->v4l2_dev.lock);
820 return result;
821}
822
823static int cx23885_dev_setup(struct cx23885_dev *dev)
824{
825 int i;
826
827 spin_lock_init(&dev->pci_irqmask_lock);
828
829 mutex_init(&dev->lock);
830 mutex_init(&dev->gpio_lock);
831
832 atomic_inc(&dev->refcount);
833
834 dev->nr = cx23885_devcount++;
835 sprintf(dev->name, "cx23885[%d]", dev->nr);
836
837
838 if (dev->pci->device == 0x8880) {
839
840 dev->bridge = CX23885_BRIDGE_887;
841
842 dev->clk_freq = 25000000;
843 dev->sram_channels = cx23887_sram_channels;
844 } else
845 if (dev->pci->device == 0x8852) {
846 dev->bridge = CX23885_BRIDGE_885;
847
848 dev->clk_freq = 28000000;
849 dev->sram_channels = cx23885_sram_channels;
850 } else
851 BUG();
852
853 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
854 __func__, dev->bridge);
855
856
857 dev->board = UNSET;
858 if (card[dev->nr] < cx23885_bcount)
859 dev->board = card[dev->nr];
860 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
861 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
862 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
863 dev->board = cx23885_subids[i].card;
864 if (UNSET == dev->board) {
865 dev->board = CX23885_BOARD_UNKNOWN;
866 cx23885_card_list(dev);
867 }
868
869
870 if (cx23885_boards[dev->board].clk_freq > 0)
871 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
872
873 dev->pci_bus = dev->pci->bus->number;
874 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
875 cx23885_irq_add(dev, 0x001f00);
876
877
878 dev->i2c_bus[0].nr = 0;
879 dev->i2c_bus[0].dev = dev;
880 dev->i2c_bus[0].reg_stat = I2C1_STAT;
881 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
882 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
883 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
884 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
885 dev->i2c_bus[0].i2c_period = (0x9d << 24);
886
887
888 dev->i2c_bus[1].nr = 1;
889 dev->i2c_bus[1].dev = dev;
890 dev->i2c_bus[1].reg_stat = I2C2_STAT;
891 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
892 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
893 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
894 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
895 dev->i2c_bus[1].i2c_period = (0x9d << 24);
896
897
898 dev->i2c_bus[2].nr = 2;
899 dev->i2c_bus[2].dev = dev;
900 dev->i2c_bus[2].reg_stat = I2C3_STAT;
901 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
902 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
903 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
904 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
905 dev->i2c_bus[2].i2c_period = (0x07 << 24);
906
907 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
908 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
909 cx23885_init_tsport(dev, &dev->ts1, 1);
910
911 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
912 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
913 cx23885_init_tsport(dev, &dev->ts2, 2);
914
915 if (get_resources(dev) < 0) {
916 printk(KERN_ERR "CORE %s No more PCIe resources for "
917 "subsystem: %04x:%04x\n",
918 dev->name, dev->pci->subsystem_vendor,
919 dev->pci->subsystem_device);
920
921 cx23885_devcount--;
922 return -ENODEV;
923 }
924
925
926 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
927 pci_resource_len(dev->pci, 0));
928
929 dev->bmmio = (u8 __iomem *)dev->lmmio;
930
931 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
932 dev->name, dev->pci->subsystem_vendor,
933 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
934 dev->board, card[dev->nr] == dev->board ?
935 "insmod option" : "autodetected");
936
937 cx23885_pci_quirks(dev);
938
939
940 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
941 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
942 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
943 dev->radio_type = cx23885_boards[dev->board].radio_type;
944 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
945
946 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
947 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
948 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
949 __func__, dev->radio_type, dev->radio_addr);
950
951
952
953
954
955 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
956 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
957 cx23885_mc417_init(dev);
958
959
960 cx23885_reset(dev);
961
962 cx23885_i2c_register(&dev->i2c_bus[0]);
963 cx23885_i2c_register(&dev->i2c_bus[1]);
964 cx23885_i2c_register(&dev->i2c_bus[2]);
965 cx23885_card_setup(dev);
966 call_all(dev, core, s_power, 0);
967 cx23885_ir_init(dev);
968
969 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
970 if (cx23885_video_register(dev) < 0) {
971 printk(KERN_ERR "%s() Failed to register analog "
972 "video adapters on VID_A\n", __func__);
973 }
974 }
975
976 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
977 if (cx23885_boards[dev->board].num_fds_portb)
978 dev->ts1.num_frontends =
979 cx23885_boards[dev->board].num_fds_portb;
980 if (cx23885_dvb_register(&dev->ts1) < 0) {
981 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
982 __func__);
983 }
984 } else
985 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
986 if (cx23885_417_register(dev) < 0) {
987 printk(KERN_ERR
988 "%s() Failed to register 417 on VID_B\n",
989 __func__);
990 }
991 }
992
993 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
994 if (cx23885_boards[dev->board].num_fds_portc)
995 dev->ts2.num_frontends =
996 cx23885_boards[dev->board].num_fds_portc;
997 if (cx23885_dvb_register(&dev->ts2) < 0) {
998 printk(KERN_ERR
999 "%s() Failed to register dvb on VID_C\n",
1000 __func__);
1001 }
1002 } else
1003 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1004 if (cx23885_417_register(dev) < 0) {
1005 printk(KERN_ERR
1006 "%s() Failed to register 417 on VID_C\n",
1007 __func__);
1008 }
1009 }
1010
1011 cx23885_dev_checkrevision(dev);
1012
1013
1014 if (cx23885_boards[dev->board].ci_type > 0)
1015 cx_clear(RDR_RDRCTL1, 1 << 8);
1016
1017 switch (dev->board) {
1018 case CX23885_BOARD_TEVII_S470:
1019 case CX23885_BOARD_TEVII_S471:
1020 cx_clear(RDR_RDRCTL1, 1 << 8);
1021 break;
1022 }
1023
1024 return 0;
1025}
1026
1027static void cx23885_dev_unregister(struct cx23885_dev *dev)
1028{
1029 release_mem_region(pci_resource_start(dev->pci, 0),
1030 pci_resource_len(dev->pci, 0));
1031
1032 if (!atomic_dec_and_test(&dev->refcount))
1033 return;
1034
1035 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1036 cx23885_video_unregister(dev);
1037
1038 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1039 cx23885_dvb_unregister(&dev->ts1);
1040
1041 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1042 cx23885_417_unregister(dev);
1043
1044 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1045 cx23885_dvb_unregister(&dev->ts2);
1046
1047 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1048 cx23885_417_unregister(dev);
1049
1050 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1051 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1052 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1053
1054 iounmap(dev->lmmio);
1055}
1056
1057static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1058 unsigned int offset, u32 sync_line,
1059 unsigned int bpl, unsigned int padding,
1060 unsigned int lines, unsigned int lpi, bool jump)
1061{
1062 struct scatterlist *sg;
1063 unsigned int line, todo, sol;
1064
1065
1066 if (jump) {
1067 *(rp++) = cpu_to_le32(RISC_JUMP);
1068 *(rp++) = cpu_to_le32(0);
1069 *(rp++) = cpu_to_le32(0);
1070 }
1071
1072
1073 if (sync_line != NO_SYNC_LINE)
1074 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1075
1076
1077 sg = sglist;
1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg);
1081 sg = sg_next(sg);
1082 }
1083
1084 if (lpi && line > 0 && !(line % lpi))
1085 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1086 else
1087 sol = RISC_SOL;
1088
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090
1091 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0);
1094 offset += bpl;
1095 } else {
1096
1097 todo = bpl;
1098 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0);
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
1104 sg = sg_next(sg);
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0);
1110 todo -= sg_dma_len(sg);
1111 sg = sg_next(sg);
1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0);
1116 offset += todo;
1117 }
1118 offset += padding;
1119 }
1120
1121 return rp;
1122}
1123
1124int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1128{
1129 u32 instructions, fields;
1130 __le32 *rp;
1131
1132 fields = 0;
1133 if (UNSET != top_offset)
1134 fields++;
1135 if (UNSET != bottom_offset)
1136 fields++;
1137
1138
1139
1140
1141
1142
1143 instructions = fields * (1 + ((bpl + padding) * lines)
1144 / PAGE_SIZE + lines);
1145 instructions += 5;
1146 risc->size = instructions * 12;
1147 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1148 if (risc->cpu == NULL)
1149 return -ENOMEM;
1150
1151
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines, 0, true);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines, 0, UNSET == top_offset);
1159
1160
1161 risc->jmp = rp;
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163 return 0;
1164}
1165
1166int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct cx23885_riscmem *risc,
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
1170 unsigned int lines, unsigned int lpi)
1171{
1172 u32 instructions;
1173 __le32 *rp;
1174
1175
1176
1177
1178
1179
1180 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1181 instructions += 4;
1182
1183 risc->size = instructions * 12;
1184 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1185 if (risc->cpu == NULL)
1186 return -ENOMEM;
1187
1188
1189 rp = risc->cpu;
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1191 bpl, 0, lines, lpi, lpi == 0);
1192
1193
1194 risc->jmp = rp;
1195 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1196 return 0;
1197}
1198
1199int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1200 struct scatterlist *sglist, unsigned int top_offset,
1201 unsigned int bottom_offset, unsigned int bpl,
1202 unsigned int padding, unsigned int lines)
1203{
1204 u32 instructions, fields;
1205 __le32 *rp;
1206
1207 fields = 0;
1208 if (UNSET != top_offset)
1209 fields++;
1210 if (UNSET != bottom_offset)
1211 fields++;
1212
1213
1214
1215
1216
1217
1218 instructions = fields * (1 + ((bpl + padding) * lines)
1219 / PAGE_SIZE + lines);
1220 instructions += 5;
1221 risc->size = instructions * 12;
1222 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1223 if (risc->cpu == NULL)
1224 return -ENOMEM;
1225
1226 rp = risc->cpu;
1227
1228
1229
1230 if (UNSET != top_offset)
1231 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1232 bpl, padding, lines, 0, true);
1233
1234 if (UNSET != bottom_offset)
1235 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1236 bpl, padding, lines, 0, UNSET == top_offset);
1237
1238
1239
1240
1241 risc->jmp = rp;
1242 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1243 return 0;
1244}
1245
1246
1247void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1248{
1249 struct cx23885_riscmem *risc = &buf->risc;
1250
1251 BUG_ON(in_interrupt());
1252 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1253}
1254
1255static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1256{
1257 struct cx23885_dev *dev = port->dev;
1258
1259 dprintk(1, "%s() Register Dump\n", __func__);
1260 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1261 cx_read(DEV_CNTRL2));
1262 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1263 cx23885_irq_get_mask(dev));
1264 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1265 cx_read(AUDIO_INT_INT_MSK));
1266 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1267 cx_read(AUD_INT_DMA_CTL));
1268 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1269 cx_read(AUDIO_EXT_INT_MSK));
1270 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1271 cx_read(AUD_EXT_DMA_CTL));
1272 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1273 cx_read(PAD_CTRL));
1274 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1275 cx_read(ALT_PIN_OUT_SEL));
1276 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1277 cx_read(GPIO2));
1278 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1279 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1280 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1281 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1282 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1283 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1284 if (port->reg_src_sel)
1285 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1286 port->reg_src_sel, cx_read(port->reg_src_sel));
1287 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1288 port->reg_lngth, cx_read(port->reg_lngth));
1289 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1290 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1291 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1292 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1293 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1294 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1295 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1296 port->reg_sop_status, cx_read(port->reg_sop_status));
1297 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1298 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1299 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1300 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1301 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1302 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1303 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1304 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1305}
1306
1307int cx23885_start_dma(struct cx23885_tsport *port,
1308 struct cx23885_dmaqueue *q,
1309 struct cx23885_buffer *buf)
1310{
1311 struct cx23885_dev *dev = port->dev;
1312 u32 reg;
1313
1314 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1315 dev->width, dev->height, dev->field);
1316
1317
1318 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1319
1320
1321 cx23885_sram_channel_setup(dev,
1322 &dev->sram_channels[port->sram_chno],
1323 port->ts_packet_size, buf->risc.dma);
1324 if (debug > 5) {
1325 cx23885_sram_channel_dump(dev,
1326 &dev->sram_channels[port->sram_chno]);
1327 cx23885_risc_disasm(port, &buf->risc);
1328 }
1329
1330
1331 cx_write(port->reg_lngth, port->ts_packet_size);
1332
1333 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1334 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1335 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1336 __func__,
1337 cx23885_boards[dev->board].portb,
1338 cx23885_boards[dev->board].portc);
1339 return -EINVAL;
1340 }
1341
1342 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1343 cx23885_av_clk(dev, 0);
1344
1345 udelay(100);
1346
1347
1348 if (port->reg_src_sel)
1349 cx_write(port->reg_src_sel, port->src_sel_val);
1350
1351 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1352 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1353 cx_write(port->reg_vld_misc, port->vld_misc_val);
1354 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1355 udelay(100);
1356
1357
1358
1359 cx_write(port->reg_gpcnt_ctl, 3);
1360 q->count = 0;
1361
1362
1363 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1364 reg = cx_read(PAD_CTRL);
1365 reg &= ~0x3;
1366 cx_write(PAD_CTRL, reg);
1367 }
1368
1369
1370 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1371 reg = cx_read(PAD_CTRL);
1372 reg &= ~0x4;
1373 cx_write(PAD_CTRL, reg);
1374 }
1375
1376 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1377
1378 reg = cx_read(PAD_CTRL);
1379 reg = reg & ~0x1;
1380
1381
1382
1383 reg = reg | 0xa;
1384 cx_write(PAD_CTRL, reg);
1385
1386
1387 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1388 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1389 }
1390
1391 switch (dev->bridge) {
1392 case CX23885_BRIDGE_885:
1393 case CX23885_BRIDGE_887:
1394 case CX23885_BRIDGE_888:
1395
1396 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1397 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1398 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1399 cx23885_irq_add(dev, port->pci_irqmask);
1400 cx23885_irq_enable_all(dev);
1401 break;
1402 default:
1403 BUG();
1404 }
1405
1406 cx_set(DEV_CNTRL2, (1<<5));
1407
1408 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1409 cx23885_av_clk(dev, 1);
1410
1411 if (debug > 4)
1412 cx23885_tsport_reg_dump(port);
1413
1414 return 0;
1415}
1416
1417static int cx23885_stop_dma(struct cx23885_tsport *port)
1418{
1419 struct cx23885_dev *dev = port->dev;
1420 u32 reg;
1421
1422 dprintk(1, "%s()\n", __func__);
1423
1424
1425 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1426 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1427
1428 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1429
1430 reg = cx_read(PAD_CTRL);
1431
1432
1433 reg = reg | 0x1;
1434
1435
1436 reg = reg & ~0xa;
1437 cx_write(PAD_CTRL, reg);
1438 cx_write(port->reg_src_sel, 0);
1439 cx_write(port->reg_gen_ctrl, 8);
1440
1441 }
1442
1443 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1444 cx23885_av_clk(dev, 0);
1445
1446 return 0;
1447}
1448
1449
1450
1451int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1452{
1453 struct cx23885_dev *dev = port->dev;
1454 int size = port->ts_packet_size * port->ts_packet_count;
1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1456
1457 dprintk(1, "%s: %p\n", __func__, buf);
1458 if (vb2_plane_size(&buf->vb, 0) < size)
1459 return -EINVAL;
1460 vb2_set_plane_payload(&buf->vb, 0, size);
1461
1462 cx23885_risc_databuffer(dev->pci, &buf->risc,
1463 sgt->sgl,
1464 port->ts_packet_size, port->ts_packet_count, 0);
1465 return 0;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1490{
1491 struct cx23885_buffer *prev;
1492 struct cx23885_dev *dev = port->dev;
1493 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1494 unsigned long flags;
1495
1496 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1497 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1498 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1499 buf->risc.jmp[2] = cpu_to_le32(0);
1500
1501 spin_lock_irqsave(&dev->slock, flags);
1502 if (list_empty(&cx88q->active)) {
1503 list_add_tail(&buf->queue, &cx88q->active);
1504 dprintk(1, "[%p/%d] %s - first active\n",
1505 buf, buf->vb.v4l2_buf.index, __func__);
1506 } else {
1507 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1508 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1509 queue);
1510 list_add_tail(&buf->queue, &cx88q->active);
1511 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1512 dprintk(1, "[%p/%d] %s - append to active\n",
1513 buf, buf->vb.v4l2_buf.index, __func__);
1514 }
1515 spin_unlock_irqrestore(&dev->slock, flags);
1516}
1517
1518
1519
1520static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1521{
1522 struct cx23885_dev *dev = port->dev;
1523 struct cx23885_dmaqueue *q = &port->mpegq;
1524 struct cx23885_buffer *buf;
1525 unsigned long flags;
1526
1527 spin_lock_irqsave(&port->slock, flags);
1528 while (!list_empty(&q->active)) {
1529 buf = list_entry(q->active.next, struct cx23885_buffer,
1530 queue);
1531 list_del(&buf->queue);
1532 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
1533 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1534 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
1535 }
1536 spin_unlock_irqrestore(&port->slock, flags);
1537}
1538
1539void cx23885_cancel_buffers(struct cx23885_tsport *port)
1540{
1541 struct cx23885_dev *dev = port->dev;
1542
1543 dprintk(1, "%s()\n", __func__);
1544 cx23885_stop_dma(port);
1545 do_cancel_buffers(port, "cancel");
1546}
1547
1548int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1549{
1550
1551 struct cx23885_tsport *port = &dev->ts1;
1552 int count = 0;
1553 int handled = 0;
1554
1555 if (status == 0)
1556 return handled;
1557
1558 count = cx_read(port->reg_gpcnt);
1559 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1560 status, cx_read(port->reg_ts_int_msk), count);
1561
1562 if ((status & VID_B_MSK_BAD_PKT) ||
1563 (status & VID_B_MSK_OPC_ERR) ||
1564 (status & VID_B_MSK_VBI_OPC_ERR) ||
1565 (status & VID_B_MSK_SYNC) ||
1566 (status & VID_B_MSK_VBI_SYNC) ||
1567 (status & VID_B_MSK_OF) ||
1568 (status & VID_B_MSK_VBI_OF)) {
1569 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1570 "= 0x%x\n", dev->name, status);
1571 if (status & VID_B_MSK_BAD_PKT)
1572 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1573 if (status & VID_B_MSK_OPC_ERR)
1574 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1575 if (status & VID_B_MSK_VBI_OPC_ERR)
1576 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1577 if (status & VID_B_MSK_SYNC)
1578 dprintk(1, " VID_B_MSK_SYNC\n");
1579 if (status & VID_B_MSK_VBI_SYNC)
1580 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1581 if (status & VID_B_MSK_OF)
1582 dprintk(1, " VID_B_MSK_OF\n");
1583 if (status & VID_B_MSK_VBI_OF)
1584 dprintk(1, " VID_B_MSK_VBI_OF\n");
1585
1586 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1587 cx23885_sram_channel_dump(dev,
1588 &dev->sram_channels[port->sram_chno]);
1589 cx23885_417_check_encoder(dev);
1590 } else if (status & VID_B_MSK_RISCI1) {
1591 dprintk(7, " VID_B_MSK_RISCI1\n");
1592 spin_lock(&port->slock);
1593 cx23885_wakeup(port, &port->mpegq, count);
1594 spin_unlock(&port->slock);
1595 }
1596 if (status) {
1597 cx_write(port->reg_ts_int_stat, status);
1598 handled = 1;
1599 }
1600
1601 return handled;
1602}
1603
1604static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1605{
1606 struct cx23885_dev *dev = port->dev;
1607 int handled = 0;
1608 u32 count;
1609
1610 if ((status & VID_BC_MSK_OPC_ERR) ||
1611 (status & VID_BC_MSK_BAD_PKT) ||
1612 (status & VID_BC_MSK_SYNC) ||
1613 (status & VID_BC_MSK_OF)) {
1614
1615 if (status & VID_BC_MSK_OPC_ERR)
1616 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1617 VID_BC_MSK_OPC_ERR);
1618
1619 if (status & VID_BC_MSK_BAD_PKT)
1620 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1621 VID_BC_MSK_BAD_PKT);
1622
1623 if (status & VID_BC_MSK_SYNC)
1624 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1625 VID_BC_MSK_SYNC);
1626
1627 if (status & VID_BC_MSK_OF)
1628 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1629 VID_BC_MSK_OF);
1630
1631 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1632
1633 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1634 cx23885_sram_channel_dump(dev,
1635 &dev->sram_channels[port->sram_chno]);
1636
1637 } else if (status & VID_BC_MSK_RISCI1) {
1638
1639 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1640
1641 spin_lock(&port->slock);
1642 count = cx_read(port->reg_gpcnt);
1643 cx23885_wakeup(port, &port->mpegq, count);
1644 spin_unlock(&port->slock);
1645
1646 }
1647 if (status) {
1648 cx_write(port->reg_ts_int_stat, status);
1649 handled = 1;
1650 }
1651
1652 return handled;
1653}
1654
1655static irqreturn_t cx23885_irq(int irq, void *dev_id)
1656{
1657 struct cx23885_dev *dev = dev_id;
1658 struct cx23885_tsport *ts1 = &dev->ts1;
1659 struct cx23885_tsport *ts2 = &dev->ts2;
1660 u32 pci_status, pci_mask;
1661 u32 vida_status, vida_mask;
1662 u32 audint_status, audint_mask;
1663 u32 ts1_status, ts1_mask;
1664 u32 ts2_status, ts2_mask;
1665 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1666 int audint_count = 0;
1667 bool subdev_handled;
1668
1669 pci_status = cx_read(PCI_INT_STAT);
1670 pci_mask = cx23885_irq_get_mask(dev);
1671 vida_status = cx_read(VID_A_INT_STAT);
1672 vida_mask = cx_read(VID_A_INT_MSK);
1673 audint_status = cx_read(AUDIO_INT_INT_STAT);
1674 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1675 ts1_status = cx_read(VID_B_INT_STAT);
1676 ts1_mask = cx_read(VID_B_INT_MSK);
1677 ts2_status = cx_read(VID_C_INT_STAT);
1678 ts2_mask = cx_read(VID_C_INT_MSK);
1679
1680 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1681 goto out;
1682
1683 vida_count = cx_read(VID_A_GPCNT);
1684 audint_count = cx_read(AUD_INT_A_GPCNT);
1685 ts1_count = cx_read(ts1->reg_gpcnt);
1686 ts2_count = cx_read(ts2->reg_gpcnt);
1687 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1688 pci_status, pci_mask);
1689 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1690 vida_status, vida_mask, vida_count);
1691 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1692 audint_status, audint_mask, audint_count);
1693 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1694 ts1_status, ts1_mask, ts1_count);
1695 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1696 ts2_status, ts2_mask, ts2_count);
1697
1698 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1699 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1700 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1701 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1702 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1703 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1704
1705 if (pci_status & PCI_MSK_RISC_RD)
1706 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1707 PCI_MSK_RISC_RD);
1708
1709 if (pci_status & PCI_MSK_RISC_WR)
1710 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1711 PCI_MSK_RISC_WR);
1712
1713 if (pci_status & PCI_MSK_AL_RD)
1714 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1715 PCI_MSK_AL_RD);
1716
1717 if (pci_status & PCI_MSK_AL_WR)
1718 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1719 PCI_MSK_AL_WR);
1720
1721 if (pci_status & PCI_MSK_APB_DMA)
1722 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1723 PCI_MSK_APB_DMA);
1724
1725 if (pci_status & PCI_MSK_VID_C)
1726 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1727 PCI_MSK_VID_C);
1728
1729 if (pci_status & PCI_MSK_VID_B)
1730 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1731 PCI_MSK_VID_B);
1732
1733 if (pci_status & PCI_MSK_VID_A)
1734 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1735 PCI_MSK_VID_A);
1736
1737 if (pci_status & PCI_MSK_AUD_INT)
1738 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1739 PCI_MSK_AUD_INT);
1740
1741 if (pci_status & PCI_MSK_AUD_EXT)
1742 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1743 PCI_MSK_AUD_EXT);
1744
1745 if (pci_status & PCI_MSK_GPIO0)
1746 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1747 PCI_MSK_GPIO0);
1748
1749 if (pci_status & PCI_MSK_GPIO1)
1750 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1751 PCI_MSK_GPIO1);
1752
1753 if (pci_status & PCI_MSK_AV_CORE)
1754 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1755 PCI_MSK_AV_CORE);
1756
1757 if (pci_status & PCI_MSK_IR)
1758 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1759 PCI_MSK_IR);
1760 }
1761
1762 if (cx23885_boards[dev->board].ci_type == 1 &&
1763 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1764 handled += netup_ci_slot_status(dev, pci_status);
1765
1766 if (cx23885_boards[dev->board].ci_type == 2 &&
1767 (pci_status & PCI_MSK_GPIO0))
1768 handled += altera_ci_irq(dev);
1769
1770 if (ts1_status) {
1771 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1772 handled += cx23885_irq_ts(ts1, ts1_status);
1773 else
1774 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1775 handled += cx23885_irq_417(dev, ts1_status);
1776 }
1777
1778 if (ts2_status) {
1779 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1780 handled += cx23885_irq_ts(ts2, ts2_status);
1781 else
1782 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1783 handled += cx23885_irq_417(dev, ts2_status);
1784 }
1785
1786 if (vida_status)
1787 handled += cx23885_video_irq(dev, vida_status);
1788
1789 if (audint_status)
1790 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1791
1792 if (pci_status & PCI_MSK_IR) {
1793 subdev_handled = false;
1794 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1795 pci_status, &subdev_handled);
1796 if (subdev_handled)
1797 handled++;
1798 }
1799
1800 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1801 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1802 schedule_work(&dev->cx25840_work);
1803 handled++;
1804 }
1805
1806 if (handled)
1807 cx_write(PCI_INT_STAT, pci_status);
1808out:
1809 return IRQ_RETVAL(handled);
1810}
1811
1812static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1813 unsigned int notification, void *arg)
1814{
1815 struct cx23885_dev *dev;
1816
1817 if (sd == NULL)
1818 return;
1819
1820 dev = to_cx23885(sd->v4l2_dev);
1821
1822 switch (notification) {
1823 case V4L2_SUBDEV_IR_RX_NOTIFY:
1824 if (sd == dev->sd_ir)
1825 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1826 break;
1827 case V4L2_SUBDEV_IR_TX_NOTIFY:
1828 if (sd == dev->sd_ir)
1829 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1830 break;
1831 }
1832}
1833
1834static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1835{
1836 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1837 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1838 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1839 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1840}
1841
1842static inline int encoder_on_portb(struct cx23885_dev *dev)
1843{
1844 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1845}
1846
1847static inline int encoder_on_portc(struct cx23885_dev *dev)
1848{
1849 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1865{
1866 if (mask & 0x7)
1867 cx_set(GP0_IO, mask & 0x7);
1868
1869 if (mask & 0x0007fff8) {
1870 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1871 printk(KERN_ERR
1872 "%s: Setting GPIO on encoder ports\n",
1873 dev->name);
1874 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1875 }
1876
1877
1878 if (mask & 0x00f80000)
1879 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1880}
1881
1882void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1883{
1884 if (mask & 0x00000007)
1885 cx_clear(GP0_IO, mask & 0x7);
1886
1887 if (mask & 0x0007fff8) {
1888 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1889 printk(KERN_ERR
1890 "%s: Clearing GPIO moving on encoder ports\n",
1891 dev->name);
1892 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1893 }
1894
1895
1896 if (mask & 0x00f80000)
1897 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1898}
1899
1900u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1901{
1902 if (mask & 0x00000007)
1903 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1904
1905 if (mask & 0x0007fff8) {
1906 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1907 printk(KERN_ERR
1908 "%s: Reading GPIO moving on encoder ports\n",
1909 dev->name);
1910 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1911 }
1912
1913
1914 if (mask & 0x00f80000)
1915 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1916
1917 return 0;
1918}
1919
1920void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1921{
1922 if ((mask & 0x00000007) && asoutput)
1923 cx_set(GP0_IO, (mask & 0x7) << 16);
1924 else if ((mask & 0x00000007) && !asoutput)
1925 cx_clear(GP0_IO, (mask & 0x7) << 16);
1926
1927 if (mask & 0x0007fff8) {
1928 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1929 printk(KERN_ERR
1930 "%s: Enabling GPIO on encoder ports\n",
1931 dev->name);
1932 }
1933
1934
1935 if ((mask & 0x0007fff8) && asoutput)
1936 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1937
1938 else if ((mask & 0x0007fff8) && !asoutput)
1939 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1940
1941
1942}
1943
1944static int cx23885_initdev(struct pci_dev *pci_dev,
1945 const struct pci_device_id *pci_id)
1946{
1947 struct cx23885_dev *dev;
1948 struct v4l2_ctrl_handler *hdl;
1949 int err;
1950
1951 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1952 if (NULL == dev)
1953 return -ENOMEM;
1954
1955 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1956 if (err < 0)
1957 goto fail_free;
1958
1959 hdl = &dev->ctrl_handler;
1960 v4l2_ctrl_handler_init(hdl, 6);
1961 if (hdl->error) {
1962 err = hdl->error;
1963 goto fail_ctrl;
1964 }
1965 dev->v4l2_dev.ctrl_handler = hdl;
1966
1967
1968 cx23885_v4l2_dev_notify_init(dev);
1969
1970
1971 dev->pci = pci_dev;
1972 if (pci_enable_device(pci_dev)) {
1973 err = -EIO;
1974 goto fail_ctrl;
1975 }
1976
1977 if (cx23885_dev_setup(dev) < 0) {
1978 err = -EINVAL;
1979 goto fail_ctrl;
1980 }
1981
1982
1983 dev->pci_rev = pci_dev->revision;
1984 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1985 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1986 "latency: %d, mmio: 0x%llx\n", dev->name,
1987 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1988 dev->pci_lat,
1989 (unsigned long long)pci_resource_start(pci_dev, 0));
1990
1991 pci_set_master(pci_dev);
1992 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1993 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1994 err = -EIO;
1995 goto fail_context;
1996 }
1997
1998 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
1999 if (IS_ERR(dev->alloc_ctx)) {
2000 err = PTR_ERR(dev->alloc_ctx);
2001 goto fail_context;
2002 }
2003 err = request_irq(pci_dev->irq, cx23885_irq,
2004 IRQF_SHARED, dev->name, dev);
2005 if (err < 0) {
2006 printk(KERN_ERR "%s: can't get IRQ %d\n",
2007 dev->name, pci_dev->irq);
2008 goto fail_irq;
2009 }
2010
2011 switch (dev->board) {
2012 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2013 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2014 break;
2015 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2016 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2017 break;
2018 }
2019
2020
2021
2022
2023
2024
2025 cx23885_ir_pci_int_enable(dev);
2026 cx23885_input_init(dev);
2027
2028 return 0;
2029
2030fail_irq:
2031 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2032fail_context:
2033 cx23885_dev_unregister(dev);
2034fail_ctrl:
2035 v4l2_ctrl_handler_free(hdl);
2036 v4l2_device_unregister(&dev->v4l2_dev);
2037fail_free:
2038 kfree(dev);
2039 return err;
2040}
2041
2042static void cx23885_finidev(struct pci_dev *pci_dev)
2043{
2044 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2045 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2046
2047 cx23885_input_fini(dev);
2048 cx23885_ir_fini(dev);
2049
2050 cx23885_shutdown(dev);
2051
2052
2053 free_irq(pci_dev->irq, dev);
2054
2055 pci_disable_device(pci_dev);
2056
2057 cx23885_dev_unregister(dev);
2058 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2059 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2060 v4l2_device_unregister(v4l2_dev);
2061 kfree(dev);
2062}
2063
2064static struct pci_device_id cx23885_pci_tbl[] = {
2065 {
2066
2067 .vendor = 0x14f1,
2068 .device = 0x8852,
2069 .subvendor = PCI_ANY_ID,
2070 .subdevice = PCI_ANY_ID,
2071 }, {
2072
2073 .vendor = 0x14f1,
2074 .device = 0x8880,
2075 .subvendor = PCI_ANY_ID,
2076 .subdevice = PCI_ANY_ID,
2077 }, {
2078
2079 }
2080};
2081MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2082
2083static struct pci_driver cx23885_pci_driver = {
2084 .name = "cx23885",
2085 .id_table = cx23885_pci_tbl,
2086 .probe = cx23885_initdev,
2087 .remove = cx23885_finidev,
2088
2089 .suspend = NULL,
2090 .resume = NULL,
2091};
2092
2093static int __init cx23885_init(void)
2094{
2095 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2096 CX23885_VERSION);
2097 return pci_register_driver(&cx23885_pci_driver);
2098}
2099
2100static void __exit cx23885_fini(void)
2101{
2102 pci_unregister_driver(&cx23885_pci_driver);
2103}
2104
2105module_init(cx23885_init);
2106module_exit(cx23885_fini);
2107