1
2
3
4
5
6
7
8#include "cx23885.h"
9
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kmod.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <asm/div64.h>
21#include <linux/firmware.h>
22
23#include "cimax2.h"
24#include "altera-ci.h"
25#include "cx23888-ir.h"
26#include "cx23885-ir.h"
27#include "cx23885-av.h"
28#include "cx23885-input.h"
29
30MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
31MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
32MODULE_LICENSE("GPL");
33MODULE_VERSION(CX23885_VERSION);
34
35
36
37
38
39
40
41
42
43static unsigned int dma_reset_workaround = 1;
44module_param(dma_reset_workaround, int, 0644);
45MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
46
47static unsigned int debug;
48module_param(debug, int, 0644);
49MODULE_PARM_DESC(debug, "enable debug messages");
50
51static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52module_param_array(card, int, NULL, 0444);
53MODULE_PARM_DESC(card, "card type");
54
55#define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
58 __func__, ##arg); \
59 } while (0)
60
61static unsigned int cx23885_devcount;
62
63#define NO_SYNC_LINE (-1U)
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static struct sram_channel cx23885_sram_channels[] = {
86 [SRAM_CH01] = {
87 .name = "VID A",
88 .cmds_start = 0x10000,
89 .ctrl_start = 0x10380,
90 .cdt = 0x104c0,
91 .fifo_start = 0x40,
92 .fifo_size = 0x2800,
93 .ptr1_reg = DMA1_PTR1,
94 .ptr2_reg = DMA1_PTR2,
95 .cnt1_reg = DMA1_CNT1,
96 .cnt2_reg = DMA1_CNT2,
97 },
98 [SRAM_CH02] = {
99 .name = "ch2",
100 .cmds_start = 0x0,
101 .ctrl_start = 0x0,
102 .cdt = 0x0,
103 .fifo_start = 0x0,
104 .fifo_size = 0x0,
105 .ptr1_reg = DMA2_PTR1,
106 .ptr2_reg = DMA2_PTR2,
107 .cnt1_reg = DMA2_CNT1,
108 .cnt2_reg = DMA2_CNT2,
109 },
110 [SRAM_CH03] = {
111 .name = "TS1 B",
112 .cmds_start = 0x100A0,
113 .ctrl_start = 0x10400,
114 .cdt = 0x10580,
115 .fifo_start = 0x5000,
116 .fifo_size = 0x1000,
117 .ptr1_reg = DMA3_PTR1,
118 .ptr2_reg = DMA3_PTR2,
119 .cnt1_reg = DMA3_CNT1,
120 .cnt2_reg = DMA3_CNT2,
121 },
122 [SRAM_CH04] = {
123 .name = "ch4",
124 .cmds_start = 0x0,
125 .ctrl_start = 0x0,
126 .cdt = 0x0,
127 .fifo_start = 0x0,
128 .fifo_size = 0x0,
129 .ptr1_reg = DMA4_PTR1,
130 .ptr2_reg = DMA4_PTR2,
131 .cnt1_reg = DMA4_CNT1,
132 .cnt2_reg = DMA4_CNT2,
133 },
134 [SRAM_CH05] = {
135 .name = "ch5",
136 .cmds_start = 0x0,
137 .ctrl_start = 0x0,
138 .cdt = 0x0,
139 .fifo_start = 0x0,
140 .fifo_size = 0x0,
141 .ptr1_reg = DMA5_PTR1,
142 .ptr2_reg = DMA5_PTR2,
143 .cnt1_reg = DMA5_CNT1,
144 .cnt2_reg = DMA5_CNT2,
145 },
146 [SRAM_CH06] = {
147 .name = "TS2 C",
148 .cmds_start = 0x10140,
149 .ctrl_start = 0x10440,
150 .cdt = 0x105e0,
151 .fifo_start = 0x6000,
152 .fifo_size = 0x1000,
153 .ptr1_reg = DMA5_PTR1,
154 .ptr2_reg = DMA5_PTR2,
155 .cnt1_reg = DMA5_CNT1,
156 .cnt2_reg = DMA5_CNT2,
157 },
158 [SRAM_CH07] = {
159 .name = "TV Audio",
160 .cmds_start = 0x10190,
161 .ctrl_start = 0x10480,
162 .cdt = 0x10a00,
163 .fifo_start = 0x7000,
164 .fifo_size = 0x1000,
165 .ptr1_reg = DMA6_PTR1,
166 .ptr2_reg = DMA6_PTR2,
167 .cnt1_reg = DMA6_CNT1,
168 .cnt2_reg = DMA6_CNT2,
169 },
170 [SRAM_CH08] = {
171 .name = "ch8",
172 .cmds_start = 0x0,
173 .ctrl_start = 0x0,
174 .cdt = 0x0,
175 .fifo_start = 0x0,
176 .fifo_size = 0x0,
177 .ptr1_reg = DMA7_PTR1,
178 .ptr2_reg = DMA7_PTR2,
179 .cnt1_reg = DMA7_CNT1,
180 .cnt2_reg = DMA7_CNT2,
181 },
182 [SRAM_CH09] = {
183 .name = "ch9",
184 .cmds_start = 0x0,
185 .ctrl_start = 0x0,
186 .cdt = 0x0,
187 .fifo_start = 0x0,
188 .fifo_size = 0x0,
189 .ptr1_reg = DMA8_PTR1,
190 .ptr2_reg = DMA8_PTR2,
191 .cnt1_reg = DMA8_CNT1,
192 .cnt2_reg = DMA8_CNT2,
193 },
194};
195
196static struct sram_channel cx23887_sram_channels[] = {
197 [SRAM_CH01] = {
198 .name = "VID A",
199 .cmds_start = 0x10000,
200 .ctrl_start = 0x105b0,
201 .cdt = 0x107b0,
202 .fifo_start = 0x40,
203 .fifo_size = 0x2800,
204 .ptr1_reg = DMA1_PTR1,
205 .ptr2_reg = DMA1_PTR2,
206 .cnt1_reg = DMA1_CNT1,
207 .cnt2_reg = DMA1_CNT2,
208 },
209 [SRAM_CH02] = {
210 .name = "VID A (VBI)",
211 .cmds_start = 0x10050,
212 .ctrl_start = 0x105F0,
213 .cdt = 0x10810,
214 .fifo_start = 0x3000,
215 .fifo_size = 0x1000,
216 .ptr1_reg = DMA2_PTR1,
217 .ptr2_reg = DMA2_PTR2,
218 .cnt1_reg = DMA2_CNT1,
219 .cnt2_reg = DMA2_CNT2,
220 },
221 [SRAM_CH03] = {
222 .name = "TS1 B",
223 .cmds_start = 0x100A0,
224 .ctrl_start = 0x10630,
225 .cdt = 0x10870,
226 .fifo_start = 0x5000,
227 .fifo_size = 0x1000,
228 .ptr1_reg = DMA3_PTR1,
229 .ptr2_reg = DMA3_PTR2,
230 .cnt1_reg = DMA3_CNT1,
231 .cnt2_reg = DMA3_CNT2,
232 },
233 [SRAM_CH04] = {
234 .name = "ch4",
235 .cmds_start = 0x0,
236 .ctrl_start = 0x0,
237 .cdt = 0x0,
238 .fifo_start = 0x0,
239 .fifo_size = 0x0,
240 .ptr1_reg = DMA4_PTR1,
241 .ptr2_reg = DMA4_PTR2,
242 .cnt1_reg = DMA4_CNT1,
243 .cnt2_reg = DMA4_CNT2,
244 },
245 [SRAM_CH05] = {
246 .name = "ch5",
247 .cmds_start = 0x0,
248 .ctrl_start = 0x0,
249 .cdt = 0x0,
250 .fifo_start = 0x0,
251 .fifo_size = 0x0,
252 .ptr1_reg = DMA5_PTR1,
253 .ptr2_reg = DMA5_PTR2,
254 .cnt1_reg = DMA5_CNT1,
255 .cnt2_reg = DMA5_CNT2,
256 },
257 [SRAM_CH06] = {
258 .name = "TS2 C",
259 .cmds_start = 0x10140,
260 .ctrl_start = 0x10670,
261 .cdt = 0x108d0,
262 .fifo_start = 0x6000,
263 .fifo_size = 0x1000,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
268 },
269 [SRAM_CH07] = {
270 .name = "TV Audio",
271 .cmds_start = 0x10190,
272 .ctrl_start = 0x106B0,
273 .cdt = 0x10930,
274 .fifo_start = 0x7000,
275 .fifo_size = 0x1000,
276 .ptr1_reg = DMA6_PTR1,
277 .ptr2_reg = DMA6_PTR2,
278 .cnt1_reg = DMA6_CNT1,
279 .cnt2_reg = DMA6_CNT2,
280 },
281 [SRAM_CH08] = {
282 .name = "ch8",
283 .cmds_start = 0x0,
284 .ctrl_start = 0x0,
285 .cdt = 0x0,
286 .fifo_start = 0x0,
287 .fifo_size = 0x0,
288 .ptr1_reg = DMA7_PTR1,
289 .ptr2_reg = DMA7_PTR2,
290 .cnt1_reg = DMA7_CNT1,
291 .cnt2_reg = DMA7_CNT2,
292 },
293 [SRAM_CH09] = {
294 .name = "ch9",
295 .cmds_start = 0x0,
296 .ctrl_start = 0x0,
297 .cdt = 0x0,
298 .fifo_start = 0x0,
299 .fifo_size = 0x0,
300 .ptr1_reg = DMA8_PTR1,
301 .ptr2_reg = DMA8_PTR2,
302 .cnt1_reg = DMA8_CNT1,
303 .cnt2_reg = DMA8_CNT2,
304 },
305};
306
307static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
308{
309 unsigned long flags;
310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311
312 dev->pci_irqmask |= mask;
313
314 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
315}
316
317void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318{
319 unsigned long flags;
320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321
322 dev->pci_irqmask |= mask;
323 cx_set(PCI_INT_MSK, mask);
324
325 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
326}
327
328void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329{
330 u32 v;
331 unsigned long flags;
332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333
334 v = mask & dev->pci_irqmask;
335 if (v)
336 cx_set(PCI_INT_MSK, v);
337
338 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
339}
340
341static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342{
343 cx23885_irq_enable(dev, 0xffffffff);
344}
345
346void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347{
348 unsigned long flags;
349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350
351 cx_clear(PCI_INT_MSK, mask);
352
353 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
354}
355
356static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357{
358 cx23885_irq_disable(dev, 0xffffffff);
359}
360
361void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362{
363 unsigned long flags;
364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365
366 dev->pci_irqmask &= ~mask;
367 cx_clear(PCI_INT_MSK, mask);
368
369 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
370}
371
372static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373{
374 u32 v;
375 unsigned long flags;
376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377
378 v = cx_read(PCI_INT_MSK);
379
380 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
381 return v;
382}
383
384static int cx23885_risc_decode(u32 risc)
385{
386 static char *instr[16] = {
387 [RISC_SYNC >> 28] = "sync",
388 [RISC_WRITE >> 28] = "write",
389 [RISC_WRITEC >> 28] = "writec",
390 [RISC_READ >> 28] = "read",
391 [RISC_READC >> 28] = "readc",
392 [RISC_JUMP >> 28] = "jump",
393 [RISC_SKIP >> 28] = "skip",
394 [RISC_WRITERM >> 28] = "writerm",
395 [RISC_WRITECM >> 28] = "writecm",
396 [RISC_WRITECR >> 28] = "writecr",
397 };
398 static int incr[16] = {
399 [RISC_WRITE >> 28] = 3,
400 [RISC_JUMP >> 28] = 3,
401 [RISC_SKIP >> 28] = 1,
402 [RISC_SYNC >> 28] = 1,
403 [RISC_WRITERM >> 28] = 3,
404 [RISC_WRITECM >> 28] = 3,
405 [RISC_WRITECR >> 28] = 4,
406 };
407 static char *bits[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
412 };
413 int i;
414
415 printk(KERN_DEBUG "0x%08x [ %s", risc,
416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
418 if (risc & (1 << (i + 12)))
419 pr_cont(" %s", bits[i]);
420 pr_cont(" count=%d ]\n", risc & 0xfff);
421 return incr[risc >> 28] ? incr[risc >> 28] : 1;
422}
423
424static void cx23885_wakeup(struct cx23885_tsport *port,
425 struct cx23885_dmaqueue *q, u32 count)
426{
427 struct cx23885_buffer *buf;
428 int count_delta;
429 int max_buf_done = 5;
430
431 do {
432 if (list_empty(&q->active))
433 return;
434 buf = list_entry(q->active.next,
435 struct cx23885_buffer, queue);
436
437 buf->vb.vb2_buf.timestamp = ktime_get_ns();
438 buf->vb.sequence = q->count++;
439 if (count != (q->count % 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
442 } else {
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
444 buf->vb.vb2_buf.index, count, q->count);
445 }
446 list_del(&buf->queue);
447 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
448 max_buf_done--;
449
450 count_delta = ((int)count - (int)(q->count % 65536));
451 } while ((count_delta > 0) && (max_buf_done > 0));
452}
453
454int cx23885_sram_channel_setup(struct cx23885_dev *dev,
455 struct sram_channel *ch,
456 unsigned int bpl, u32 risc)
457{
458 unsigned int i, lines;
459 u32 cdt;
460
461 if (ch->cmds_start == 0) {
462 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
463 ch->name);
464 cx_write(ch->ptr1_reg, 0);
465 cx_write(ch->ptr2_reg, 0);
466 cx_write(ch->cnt2_reg, 0);
467 cx_write(ch->cnt1_reg, 0);
468 return 0;
469 } else {
470 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
471 ch->name);
472 }
473
474 bpl = (bpl + 7) & ~7;
475 cdt = ch->cdt;
476 lines = ch->fifo_size / bpl;
477 if (lines > 6)
478 lines = 6;
479 BUG_ON(lines < 2);
480
481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
482 cx_write(8 + 4, 12);
483 cx_write(8 + 8, 0);
484
485
486 for (i = 0; i < lines; i++) {
487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
488 ch->fifo_start + bpl*i);
489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i + 4, 0);
491 cx_write(cdt + 16*i + 8, 0);
492 cx_write(cdt + 16*i + 12, 0);
493 }
494
495
496 if (ch->jumponly)
497 cx_write(ch->cmds_start + 0, 8);
498 else
499 cx_write(ch->cmds_start + 0, risc);
500 cx_write(ch->cmds_start + 4, 0);
501 cx_write(ch->cmds_start + 8, cdt);
502 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
503 cx_write(ch->cmds_start + 16, ch->ctrl_start);
504 if (ch->jumponly)
505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
506 else
507 cx_write(ch->cmds_start + 20, 64 >> 2);
508 for (i = 24; i < 80; i += 4)
509 cx_write(ch->cmds_start + i, 0);
510
511
512 cx_write(ch->ptr1_reg, ch->fifo_start);
513 cx_write(ch->ptr2_reg, cdt);
514 cx_write(ch->cnt2_reg, (lines*16) >> 3);
515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
516
517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
518 dev->bridge,
519 ch->name,
520 bpl,
521 lines);
522
523 return 0;
524}
525
526void cx23885_sram_channel_dump(struct cx23885_dev *dev,
527 struct sram_channel *ch)
528{
529 static char *name[] = {
530 "init risc lo",
531 "init risc hi",
532 "cdt base",
533 "cdt size",
534 "iq base",
535 "iq size",
536 "risc pc lo",
537 "risc pc hi",
538 "iq wr ptr",
539 "iq rd ptr",
540 "cdt current",
541 "pci target lo",
542 "pci target hi",
543 "line / byte",
544 };
545 u32 risc;
546 unsigned int i, j, n;
547
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev->name, ch->name);
550 for (i = 0; i < ARRAY_SIZE(name); i++)
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
552 dev->name, name[i],
553 cx_read(ch->cmds_start + 4*i));
554
555 for (i = 0; i < 4; i++) {
556 risc = cx_read(ch->cmds_start + 4 * (i + 14));
557 pr_warn("%s: risc%d: ", dev->name, i);
558 cx23885_risc_decode(risc);
559 }
560 for (i = 0; i < (64 >> 2); i += n) {
561 risc = cx_read(ch->ctrl_start + 4 * i);
562
563
564 pr_warn("%s: (0x%08x) iq %x: ", dev->name,
565 ch->ctrl_start + 4 * i, i);
566 n = cx23885_risc_decode(risc);
567 for (j = 1; j < n; j++) {
568 risc = cx_read(ch->ctrl_start + 4 * (i + j));
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev->name, i+j, risc, j);
571 }
572 }
573
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev->name, cx_read(ch->ptr1_reg));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev->name, cx_read(ch->ptr2_reg));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->cnt1_reg));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->cnt2_reg));
586}
587
588static void cx23885_risc_disasm(struct cx23885_tsport *port,
589 struct cx23885_riscmem *risc)
590{
591 struct cx23885_dev *dev = port->dev;
592 unsigned int i, j, n;
593
594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
595 dev->name, risc->cpu, (unsigned long)risc->dma);
596 for (i = 0; i < (risc->size >> 2); i += n) {
597 pr_info("%s: %04d: ", dev->name, i);
598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
599 for (j = 1; j < n; j++)
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev->name, i + j, risc->cpu[i + j], j);
602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
603 break;
604 }
605}
606
607static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
608{
609 uint32_t reg1_val, reg2_val;
610
611 if (!dev->need_dma_reset)
612 return;
613
614 reg1_val = cx_read(TC_REQ);
615 reg2_val = cx_read(TC_REQ_SET);
616
617 if (reg1_val && reg2_val) {
618 cx_write(TC_REQ, reg1_val);
619 cx_write(TC_REQ_SET, reg2_val);
620 cx_read(VID_B_DMA);
621 cx_read(VBI_B_DMA);
622 cx_read(VID_C_DMA);
623 cx_read(VBI_C_DMA);
624
625 dev_info(&dev->pci->dev,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
627 reg1_val, reg2_val);
628 }
629}
630
631static void cx23885_shutdown(struct cx23885_dev *dev)
632{
633
634 cx_write(DEV_CNTRL2, 0);
635
636
637 cx_write(IR_CNTRL_REG, 0);
638
639
640 cx_write(VID_A_DMA_CTL, 0);
641 cx_write(VID_B_DMA_CTL, 0);
642 cx_write(VID_C_DMA_CTL, 0);
643
644
645 cx_write(AUD_INT_DMA_CTL, 0);
646 cx_write(AUD_EXT_DMA_CTL, 0);
647
648
649 cx_write(UART_CTL, 0);
650
651
652 cx23885_irq_disable_all(dev);
653 cx_write(VID_A_INT_MSK, 0);
654 cx_write(VID_B_INT_MSK, 0);
655 cx_write(VID_C_INT_MSK, 0);
656 cx_write(AUDIO_INT_INT_MSK, 0);
657 cx_write(AUDIO_EXT_INT_MSK, 0);
658
659}
660
661static void cx23885_reset(struct cx23885_dev *dev)
662{
663 dprintk(1, "%s()\n", __func__);
664
665 cx23885_shutdown(dev);
666
667 cx_write(PCI_INT_STAT, 0xffffffff);
668 cx_write(VID_A_INT_STAT, 0xffffffff);
669 cx_write(VID_B_INT_STAT, 0xffffffff);
670 cx_write(VID_C_INT_STAT, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
674 cx_write(PAD_CTRL, 0x00500300);
675
676
677 cx23885_clear_bridge_error(dev);
678 msleep(100);
679
680 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
681 720*4, 0);
682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
684 188*4, 0);
685 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
686 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
687 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
688 188*4, 0);
689 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
690 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
691 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
692
693 cx23885_gpio_setup(dev);
694
695 cx23885_irq_get_mask(dev);
696
697
698 cx23885_clear_bridge_error(dev);
699}
700
701
702static int cx23885_pci_quirks(struct cx23885_dev *dev)
703{
704 dprintk(1, "%s()\n", __func__);
705
706
707
708
709
710 if (dev->bridge == CX23885_BRIDGE_885)
711 cx_clear(RDR_TLCTL0, 1 << 4);
712
713
714 cx23885_clear_bridge_error(dev);
715 return 0;
716}
717
718static int get_resources(struct cx23885_dev *dev)
719{
720 if (request_mem_region(pci_resource_start(dev->pci, 0),
721 pci_resource_len(dev->pci, 0),
722 dev->name))
723 return 0;
724
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
727
728 return -EBUSY;
729}
730
731static int cx23885_init_tsport(struct cx23885_dev *dev,
732 struct cx23885_tsport *port, int portno)
733{
734 dprintk(1, "%s(portno=%d)\n", __func__, portno);
735
736
737 port->dma_ctl_val = 0x11;
738 port->ts_int_msk_val = 0x1111;
739 port->vld_misc_val = 0x0;
740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
741
742 spin_lock_init(&port->slock);
743 port->dev = dev;
744 port->nr = portno;
745
746 INIT_LIST_HEAD(&port->mpegq.active);
747 mutex_init(&port->frontends.lock);
748 INIT_LIST_HEAD(&port->frontends.felist);
749 port->frontends.active_fe_id = 0;
750
751
752
753
754
755 if (!port->num_frontends)
756 port->num_frontends = 1;
757
758 switch (portno) {
759 case 1:
760 port->reg_gpcnt = VID_B_GPCNT;
761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
762 port->reg_dma_ctl = VID_B_DMA_CTL;
763 port->reg_lngth = VID_B_LNGTH;
764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
765 port->reg_gen_ctrl = VID_B_GEN_CTL;
766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
767 port->reg_sop_status = VID_B_SOP_STATUS;
768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
769 port->reg_vld_misc = VID_B_VLD_MISC;
770 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
771 port->reg_src_sel = VID_B_SRC_SEL;
772 port->reg_ts_int_msk = VID_B_INT_MSK;
773 port->reg_ts_int_stat = VID_B_INT_STAT;
774 port->sram_chno = SRAM_CH03;
775 port->pci_irqmask = 0x02;
776 break;
777 case 2:
778 port->reg_gpcnt = VID_C_GPCNT;
779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
780 port->reg_dma_ctl = VID_C_DMA_CTL;
781 port->reg_lngth = VID_C_LNGTH;
782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
783 port->reg_gen_ctrl = VID_C_GEN_CTL;
784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
785 port->reg_sop_status = VID_C_SOP_STATUS;
786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
787 port->reg_vld_misc = VID_C_VLD_MISC;
788 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
789 port->reg_src_sel = 0;
790 port->reg_ts_int_msk = VID_C_INT_MSK;
791 port->reg_ts_int_stat = VID_C_INT_STAT;
792 port->sram_chno = SRAM_CH06;
793 port->pci_irqmask = 0x04;
794 break;
795 default:
796 BUG();
797 }
798
799 return 0;
800}
801
802static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
803{
804 switch (cx_read(RDR_CFG2) & 0xff) {
805 case 0x00:
806
807 dev->hwrevision = 0xa0;
808 break;
809 case 0x01:
810
811 dev->hwrevision = 0xa1;
812 break;
813 case 0x02:
814
815 dev->hwrevision = 0xb0;
816 break;
817 case 0x03:
818 if (dev->pci->device == 0x8880) {
819
820 dev->hwrevision = 0xc0;
821 } else {
822
823 dev->hwrevision = 0xa4;
824 }
825 break;
826 case 0x04:
827 if (dev->pci->device == 0x8880) {
828
829 dev->hwrevision = 0xd0;
830 } else {
831
832 dev->hwrevision = 0xa5;
833 }
834 break;
835 case 0x0e:
836
837 dev->hwrevision = 0xc0;
838 break;
839 case 0x0f:
840
841 dev->hwrevision = 0xb1;
842 break;
843 default:
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__, dev->hwrevision);
846 }
847 if (dev->hwrevision)
848 pr_info("%s() Hardware revision = 0x%02x\n",
849 __func__, dev->hwrevision);
850 else
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__, dev->hwrevision);
853}
854
855
856struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
857{
858 struct v4l2_subdev *result = NULL;
859 struct v4l2_subdev *sd;
860
861 spin_lock(&dev->v4l2_dev.lock);
862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
863 if (sd->grp_id == hw) {
864 result = sd;
865 break;
866 }
867 }
868 spin_unlock(&dev->v4l2_dev.lock);
869 return result;
870}
871
872static int cx23885_dev_setup(struct cx23885_dev *dev)
873{
874 int i;
875
876 spin_lock_init(&dev->pci_irqmask_lock);
877 spin_lock_init(&dev->slock);
878
879 mutex_init(&dev->lock);
880 mutex_init(&dev->gpio_lock);
881
882 atomic_inc(&dev->refcount);
883
884 dev->nr = cx23885_devcount++;
885 sprintf(dev->name, "cx23885[%d]", dev->nr);
886
887
888 if (dev->pci->device == 0x8880) {
889
890 dev->bridge = CX23885_BRIDGE_888;
891
892 dev->clk_freq = 50000000;
893 dev->sram_channels = cx23887_sram_channels;
894 } else
895 if (dev->pci->device == 0x8852) {
896 dev->bridge = CX23885_BRIDGE_885;
897
898 dev->clk_freq = 28000000;
899 dev->sram_channels = cx23885_sram_channels;
900 } else
901 BUG();
902
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
904 __func__, dev->bridge);
905
906
907 dev->board = UNSET;
908 if (card[dev->nr] < cx23885_bcount)
909 dev->board = card[dev->nr];
910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
912 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
913 dev->board = cx23885_subids[i].card;
914 if (UNSET == dev->board) {
915 dev->board = CX23885_BOARD_UNKNOWN;
916 cx23885_card_list(dev);
917 }
918
919 if (dev->pci->device == 0x8852) {
920
921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
925 }
926
927
928 if (cx23885_boards[dev->board].clk_freq > 0)
929 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
930
931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
932 dev->pci->subsystem_device == 0x7137) {
933
934
935
936
937
938 dev->clk_freq = 25000000;
939 }
940
941 dev->pci_bus = dev->pci->bus->number;
942 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
943 cx23885_irq_add(dev, 0x001f00);
944
945
946 dev->i2c_bus[0].nr = 0;
947 dev->i2c_bus[0].dev = dev;
948 dev->i2c_bus[0].reg_stat = I2C1_STAT;
949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
950 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
953 dev->i2c_bus[0].i2c_period = (0x9d << 24);
954
955
956 dev->i2c_bus[1].nr = 1;
957 dev->i2c_bus[1].dev = dev;
958 dev->i2c_bus[1].reg_stat = I2C2_STAT;
959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
960 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
963 dev->i2c_bus[1].i2c_period = (0x9d << 24);
964
965
966 dev->i2c_bus[2].nr = 2;
967 dev->i2c_bus[2].dev = dev;
968 dev->i2c_bus[2].reg_stat = I2C3_STAT;
969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
970 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
973 dev->i2c_bus[2].i2c_period = (0x07 << 24);
974
975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
977 cx23885_init_tsport(dev, &dev->ts1, 1);
978
979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
981 cx23885_init_tsport(dev, &dev->ts2, 2);
982
983 if (get_resources(dev) < 0) {
984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
985 dev->name, dev->pci->subsystem_vendor,
986 dev->pci->subsystem_device);
987
988 cx23885_devcount--;
989 return -ENODEV;
990 }
991
992
993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
994 pci_resource_len(dev->pci, 0));
995
996 dev->bmmio = (u8 __iomem *)dev->lmmio;
997
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev->name, dev->pci->subsystem_vendor,
1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1001 dev->board, card[dev->nr] == dev->board ?
1002 "insmod option" : "autodetected");
1003
1004 cx23885_pci_quirks(dev);
1005
1006
1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
1010 dev->radio_type = cx23885_boards[dev->board].radio_type;
1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1012
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1016 __func__, dev->radio_type, dev->radio_addr);
1017
1018
1019
1020
1021
1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1024 cx23885_mc417_init(dev);
1025
1026
1027 cx23885_reset(dev);
1028
1029 cx23885_i2c_register(&dev->i2c_bus[0]);
1030 cx23885_i2c_register(&dev->i2c_bus[1]);
1031 cx23885_i2c_register(&dev->i2c_bus[2]);
1032 cx23885_card_setup(dev);
1033 call_all(dev, tuner, standby);
1034 cx23885_ir_init(dev);
1035
1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1037
1038
1039
1040
1041
1042
1043 cx23885_gpio_enable(dev, 0x300, 0);
1044 }
1045
1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1047 if (cx23885_video_register(dev) < 0) {
1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
1049 __func__);
1050 }
1051 }
1052
1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1054 if (cx23885_boards[dev->board].num_fds_portb)
1055 dev->ts1.num_frontends =
1056 cx23885_boards[dev->board].num_fds_portb;
1057 if (cx23885_dvb_register(&dev->ts1) < 0) {
1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
1059 __func__);
1060 }
1061 } else
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1063 if (cx23885_417_register(dev) < 0) {
1064 pr_err("%s() Failed to register 417 on VID_B\n",
1065 __func__);
1066 }
1067 }
1068
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1070 if (cx23885_boards[dev->board].num_fds_portc)
1071 dev->ts2.num_frontends =
1072 cx23885_boards[dev->board].num_fds_portc;
1073 if (cx23885_dvb_register(&dev->ts2) < 0) {
1074 pr_err("%s() Failed to register dvb on VID_C\n",
1075 __func__);
1076 }
1077 } else
1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1079 if (cx23885_417_register(dev) < 0) {
1080 pr_err("%s() Failed to register 417 on VID_C\n",
1081 __func__);
1082 }
1083 }
1084
1085 cx23885_dev_checkrevision(dev);
1086
1087
1088 if (cx23885_boards[dev->board].ci_type > 0)
1089 cx_clear(RDR_RDRCTL1, 1 << 8);
1090
1091 switch (dev->board) {
1092 case CX23885_BOARD_TEVII_S470:
1093 case CX23885_BOARD_TEVII_S471:
1094 cx_clear(RDR_RDRCTL1, 1 << 8);
1095 break;
1096 }
1097
1098 return 0;
1099}
1100
1101static void cx23885_dev_unregister(struct cx23885_dev *dev)
1102{
1103 release_mem_region(pci_resource_start(dev->pci, 0),
1104 pci_resource_len(dev->pci, 0));
1105
1106 if (!atomic_dec_and_test(&dev->refcount))
1107 return;
1108
1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1110 cx23885_video_unregister(dev);
1111
1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1113 cx23885_dvb_unregister(&dev->ts1);
1114
1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1116 cx23885_417_unregister(dev);
1117
1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1119 cx23885_dvb_unregister(&dev->ts2);
1120
1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1122 cx23885_417_unregister(dev);
1123
1124 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1125 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1126 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1127
1128 iounmap(dev->lmmio);
1129}
1130
1131static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1132 unsigned int offset, u32 sync_line,
1133 unsigned int bpl, unsigned int padding,
1134 unsigned int lines, unsigned int lpi, bool jump)
1135{
1136 struct scatterlist *sg;
1137 unsigned int line, todo, sol;
1138
1139
1140 if (jump) {
1141 *(rp++) = cpu_to_le32(RISC_JUMP);
1142 *(rp++) = cpu_to_le32(0);
1143 *(rp++) = cpu_to_le32(0);
1144 }
1145
1146
1147 if (sync_line != NO_SYNC_LINE)
1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1149
1150
1151 sg = sglist;
1152 for (line = 0; line < lines; line++) {
1153 while (offset && offset >= sg_dma_len(sg)) {
1154 offset -= sg_dma_len(sg);
1155 sg = sg_next(sg);
1156 }
1157
1158 if (lpi && line > 0 && !(line % lpi))
1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1160 else
1161 sol = RISC_SOL;
1162
1163 if (bpl <= sg_dma_len(sg)-offset) {
1164
1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0);
1168 offset += bpl;
1169 } else {
1170
1171 todo = bpl;
1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1173 (sg_dma_len(sg)-offset));
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1175 *(rp++) = cpu_to_le32(0);
1176 todo -= (sg_dma_len(sg)-offset);
1177 offset = 0;
1178 sg = sg_next(sg);
1179 while (todo > sg_dma_len(sg)) {
1180 *(rp++) = cpu_to_le32(RISC_WRITE|
1181 sg_dma_len(sg));
1182 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1183 *(rp++) = cpu_to_le32(0);
1184 todo -= sg_dma_len(sg);
1185 sg = sg_next(sg);
1186 }
1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1188 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1189 *(rp++) = cpu_to_le32(0);
1190 offset += todo;
1191 }
1192 offset += padding;
1193 }
1194
1195 return rp;
1196}
1197
1198int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1199 struct scatterlist *sglist, unsigned int top_offset,
1200 unsigned int bottom_offset, unsigned int bpl,
1201 unsigned int padding, unsigned int lines)
1202{
1203 u32 instructions, fields;
1204 __le32 *rp;
1205
1206 fields = 0;
1207 if (UNSET != top_offset)
1208 fields++;
1209 if (UNSET != bottom_offset)
1210 fields++;
1211
1212
1213
1214
1215
1216
1217 instructions = fields * (1 + ((bpl + padding) * lines)
1218 / PAGE_SIZE + lines);
1219 instructions += 5;
1220 risc->size = instructions * 12;
1221 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1222 if (risc->cpu == NULL)
1223 return -ENOMEM;
1224
1225
1226 rp = risc->cpu;
1227 if (UNSET != top_offset)
1228 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1229 bpl, padding, lines, 0, true);
1230 if (UNSET != bottom_offset)
1231 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1232 bpl, padding, lines, 0, UNSET == top_offset);
1233
1234
1235 risc->jmp = rp;
1236 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1237 return 0;
1238}
1239
1240int cx23885_risc_databuffer(struct pci_dev *pci,
1241 struct cx23885_riscmem *risc,
1242 struct scatterlist *sglist,
1243 unsigned int bpl,
1244 unsigned int lines, unsigned int lpi)
1245{
1246 u32 instructions;
1247 __le32 *rp;
1248
1249
1250
1251
1252
1253
1254 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1255 instructions += 4;
1256
1257 risc->size = instructions * 12;
1258 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1259 if (risc->cpu == NULL)
1260 return -ENOMEM;
1261
1262
1263 rp = risc->cpu;
1264 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1265 bpl, 0, lines, lpi, lpi == 0);
1266
1267
1268 risc->jmp = rp;
1269 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1270 return 0;
1271}
1272
1273int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1274 struct scatterlist *sglist, unsigned int top_offset,
1275 unsigned int bottom_offset, unsigned int bpl,
1276 unsigned int padding, unsigned int lines)
1277{
1278 u32 instructions, fields;
1279 __le32 *rp;
1280
1281 fields = 0;
1282 if (UNSET != top_offset)
1283 fields++;
1284 if (UNSET != bottom_offset)
1285 fields++;
1286
1287
1288
1289
1290
1291
1292 instructions = fields * (1 + ((bpl + padding) * lines)
1293 / PAGE_SIZE + lines);
1294 instructions += 5;
1295 risc->size = instructions * 12;
1296 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1297 if (risc->cpu == NULL)
1298 return -ENOMEM;
1299
1300 rp = risc->cpu;
1301
1302
1303
1304 if (UNSET != top_offset)
1305 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1306 bpl, padding, lines, 0, true);
1307
1308 if (UNSET != bottom_offset)
1309 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1310 bpl, padding, lines, 0, UNSET == top_offset);
1311
1312
1313
1314
1315 risc->jmp = rp;
1316 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1317 return 0;
1318}
1319
1320
1321void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1322{
1323 struct cx23885_riscmem *risc = &buf->risc;
1324
1325 BUG_ON(in_interrupt());
1326 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1327}
1328
1329static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1330{
1331 struct cx23885_dev *dev = port->dev;
1332
1333 dprintk(1, "%s() Register Dump\n", __func__);
1334 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1335 cx_read(DEV_CNTRL2));
1336 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1337 cx23885_irq_get_mask(dev));
1338 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1339 cx_read(AUDIO_INT_INT_MSK));
1340 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1341 cx_read(AUD_INT_DMA_CTL));
1342 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1343 cx_read(AUDIO_EXT_INT_MSK));
1344 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1345 cx_read(AUD_EXT_DMA_CTL));
1346 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1347 cx_read(PAD_CTRL));
1348 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1349 cx_read(ALT_PIN_OUT_SEL));
1350 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1351 cx_read(GPIO2));
1352 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1353 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1354 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1355 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1356 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1357 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1358 if (port->reg_src_sel)
1359 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1360 port->reg_src_sel, cx_read(port->reg_src_sel));
1361 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1362 port->reg_lngth, cx_read(port->reg_lngth));
1363 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1364 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1365 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1366 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1367 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1368 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1369 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1370 port->reg_sop_status, cx_read(port->reg_sop_status));
1371 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1372 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1373 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1374 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1375 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1376 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1377 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1378 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1379 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
1380 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1381 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
1382 cx_read(PCI_INT_STAT));
1383 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
1384 cx_read(VID_B_INT_MSTAT));
1385 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
1386 cx_read(VID_B_INT_SSTAT));
1387 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
1388 cx_read(VID_C_INT_MSTAT));
1389 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
1390 cx_read(VID_C_INT_SSTAT));
1391}
1392
1393int cx23885_start_dma(struct cx23885_tsport *port,
1394 struct cx23885_dmaqueue *q,
1395 struct cx23885_buffer *buf)
1396{
1397 struct cx23885_dev *dev = port->dev;
1398 u32 reg;
1399
1400 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1401 dev->width, dev->height, dev->field);
1402
1403
1404 cx23885_clear_bridge_error(dev);
1405
1406
1407 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1408
1409
1410 cx23885_sram_channel_setup(dev,
1411 &dev->sram_channels[port->sram_chno],
1412 port->ts_packet_size, buf->risc.dma);
1413 if (debug > 5) {
1414 cx23885_sram_channel_dump(dev,
1415 &dev->sram_channels[port->sram_chno]);
1416 cx23885_risc_disasm(port, &buf->risc);
1417 }
1418
1419
1420 cx_write(port->reg_lngth, port->ts_packet_size);
1421
1422 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1423 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1424 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1425 __func__,
1426 cx23885_boards[dev->board].portb,
1427 cx23885_boards[dev->board].portc);
1428 return -EINVAL;
1429 }
1430
1431 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1432 cx23885_av_clk(dev, 0);
1433
1434 udelay(100);
1435
1436
1437 if (port->reg_src_sel)
1438 cx_write(port->reg_src_sel, port->src_sel_val);
1439
1440 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1441 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1442 cx_write(port->reg_vld_misc, port->vld_misc_val);
1443 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1444 udelay(100);
1445
1446
1447
1448 cx_write(port->reg_gpcnt_ctl, 3);
1449 q->count = 0;
1450
1451
1452 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1453 reg = cx_read(PAD_CTRL);
1454 reg &= ~0x3;
1455 cx_write(PAD_CTRL, reg);
1456 }
1457
1458
1459 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1460 reg = cx_read(PAD_CTRL);
1461 reg &= ~0x4;
1462 cx_write(PAD_CTRL, reg);
1463 }
1464
1465 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1466
1467 reg = cx_read(PAD_CTRL);
1468 reg = reg & ~0x1;
1469
1470
1471
1472 reg = reg | 0xa;
1473 cx_write(PAD_CTRL, reg);
1474
1475
1476
1477 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1478
1479
1480
1481
1482
1483
1484 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1485 }
1486
1487 switch (dev->bridge) {
1488 case CX23885_BRIDGE_885:
1489 case CX23885_BRIDGE_887:
1490 case CX23885_BRIDGE_888:
1491
1492 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1493
1494 cx23885_clear_bridge_error(dev);
1495 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1496 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1497
1498
1499 cx23885_clear_bridge_error(dev);
1500 cx23885_irq_add(dev, port->pci_irqmask);
1501 cx23885_irq_enable_all(dev);
1502
1503
1504 cx23885_clear_bridge_error(dev);
1505 break;
1506 default:
1507 BUG();
1508 }
1509
1510 cx_set(DEV_CNTRL2, (1<<5));
1511
1512 cx23885_clear_bridge_error(dev);
1513
1514 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1515 cx23885_av_clk(dev, 1);
1516
1517 if (debug > 4)
1518 cx23885_tsport_reg_dump(port);
1519
1520 cx23885_irq_get_mask(dev);
1521
1522
1523 cx23885_clear_bridge_error(dev);
1524
1525 return 0;
1526}
1527
1528static int cx23885_stop_dma(struct cx23885_tsport *port)
1529{
1530 struct cx23885_dev *dev = port->dev;
1531 u32 reg;
1532 int delay = 0;
1533 uint32_t reg1_val;
1534 uint32_t reg2_val;
1535
1536 dprintk(1, "%s()\n", __func__);
1537
1538
1539 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1540 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1541
1542 mdelay(20);
1543 for (delay = 0; delay < 100; delay++) {
1544 reg1_val = cx_read(TC_REQ);
1545 reg2_val = cx_read(TC_REQ_SET);
1546 if (reg1_val == 0 || reg2_val == 0)
1547 break;
1548 mdelay(1);
1549 }
1550 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1551 delay, reg1_val, reg2_val);
1552
1553 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1554 reg = cx_read(PAD_CTRL);
1555
1556
1557 reg = reg | 0x1;
1558
1559
1560 reg = reg & ~0xa;
1561 cx_write(PAD_CTRL, reg);
1562 cx_write(port->reg_src_sel, 0);
1563 cx_write(port->reg_gen_ctrl, 8);
1564 }
1565
1566 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1567 cx23885_av_clk(dev, 0);
1568
1569 return 0;
1570}
1571
1572
1573
1574int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1575{
1576 struct cx23885_dev *dev = port->dev;
1577 int size = port->ts_packet_size * port->ts_packet_count;
1578 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1579
1580 dprintk(1, "%s: %p\n", __func__, buf);
1581 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1582 return -EINVAL;
1583 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1584
1585 cx23885_risc_databuffer(dev->pci, &buf->risc,
1586 sgt->sgl,
1587 port->ts_packet_size, port->ts_packet_count, 0);
1588 return 0;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1613{
1614 struct cx23885_buffer *prev;
1615 struct cx23885_dev *dev = port->dev;
1616 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1617 unsigned long flags;
1618
1619 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1620 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1621 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1622 buf->risc.jmp[2] = cpu_to_le32(0);
1623
1624 spin_lock_irqsave(&dev->slock, flags);
1625 if (list_empty(&cx88q->active)) {
1626 list_add_tail(&buf->queue, &cx88q->active);
1627 dprintk(1, "[%p/%d] %s - first active\n",
1628 buf, buf->vb.vb2_buf.index, __func__);
1629 } else {
1630 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1631 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1632 queue);
1633 list_add_tail(&buf->queue, &cx88q->active);
1634 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1635 dprintk(1, "[%p/%d] %s - append to active\n",
1636 buf, buf->vb.vb2_buf.index, __func__);
1637 }
1638 spin_unlock_irqrestore(&dev->slock, flags);
1639}
1640
1641
1642
1643static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1644{
1645 struct cx23885_dmaqueue *q = &port->mpegq;
1646 struct cx23885_buffer *buf;
1647 unsigned long flags;
1648
1649 spin_lock_irqsave(&port->slock, flags);
1650 while (!list_empty(&q->active)) {
1651 buf = list_entry(q->active.next, struct cx23885_buffer,
1652 queue);
1653 list_del(&buf->queue);
1654 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1655 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1656 buf, buf->vb.vb2_buf.index, reason,
1657 (unsigned long)buf->risc.dma);
1658 }
1659 spin_unlock_irqrestore(&port->slock, flags);
1660}
1661
1662void cx23885_cancel_buffers(struct cx23885_tsport *port)
1663{
1664 dprintk(1, "%s()\n", __func__);
1665 cx23885_stop_dma(port);
1666 do_cancel_buffers(port, "cancel");
1667}
1668
1669int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1670{
1671
1672 struct cx23885_tsport *port = &dev->ts1;
1673 int count = 0;
1674 int handled = 0;
1675
1676 if (status == 0)
1677 return handled;
1678
1679 count = cx_read(port->reg_gpcnt);
1680 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1681 status, cx_read(port->reg_ts_int_msk), count);
1682
1683 if ((status & VID_B_MSK_BAD_PKT) ||
1684 (status & VID_B_MSK_OPC_ERR) ||
1685 (status & VID_B_MSK_VBI_OPC_ERR) ||
1686 (status & VID_B_MSK_SYNC) ||
1687 (status & VID_B_MSK_VBI_SYNC) ||
1688 (status & VID_B_MSK_OF) ||
1689 (status & VID_B_MSK_VBI_OF)) {
1690 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1691 dev->name, status);
1692 if (status & VID_B_MSK_BAD_PKT)
1693 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1694 if (status & VID_B_MSK_OPC_ERR)
1695 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1696 if (status & VID_B_MSK_VBI_OPC_ERR)
1697 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1698 if (status & VID_B_MSK_SYNC)
1699 dprintk(1, " VID_B_MSK_SYNC\n");
1700 if (status & VID_B_MSK_VBI_SYNC)
1701 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1702 if (status & VID_B_MSK_OF)
1703 dprintk(1, " VID_B_MSK_OF\n");
1704 if (status & VID_B_MSK_VBI_OF)
1705 dprintk(1, " VID_B_MSK_VBI_OF\n");
1706
1707 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1708 cx23885_sram_channel_dump(dev,
1709 &dev->sram_channels[port->sram_chno]);
1710 cx23885_417_check_encoder(dev);
1711 } else if (status & VID_B_MSK_RISCI1) {
1712 dprintk(7, " VID_B_MSK_RISCI1\n");
1713 spin_lock(&port->slock);
1714 cx23885_wakeup(port, &port->mpegq, count);
1715 spin_unlock(&port->slock);
1716 }
1717 if (status) {
1718 cx_write(port->reg_ts_int_stat, status);
1719 handled = 1;
1720 }
1721
1722 return handled;
1723}
1724
1725static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1726{
1727 struct cx23885_dev *dev = port->dev;
1728 int handled = 0;
1729 u32 count;
1730
1731 if ((status & VID_BC_MSK_OPC_ERR) ||
1732 (status & VID_BC_MSK_BAD_PKT) ||
1733 (status & VID_BC_MSK_SYNC) ||
1734 (status & VID_BC_MSK_OF)) {
1735
1736 if (status & VID_BC_MSK_OPC_ERR)
1737 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1738 VID_BC_MSK_OPC_ERR);
1739
1740 if (status & VID_BC_MSK_BAD_PKT)
1741 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1742 VID_BC_MSK_BAD_PKT);
1743
1744 if (status & VID_BC_MSK_SYNC)
1745 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1746 VID_BC_MSK_SYNC);
1747
1748 if (status & VID_BC_MSK_OF)
1749 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1750 VID_BC_MSK_OF);
1751
1752 pr_err("%s: mpeg risc op code error\n", dev->name);
1753
1754 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1755 cx23885_sram_channel_dump(dev,
1756 &dev->sram_channels[port->sram_chno]);
1757
1758 } else if (status & VID_BC_MSK_RISCI1) {
1759
1760 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1761
1762 spin_lock(&port->slock);
1763 count = cx_read(port->reg_gpcnt);
1764 cx23885_wakeup(port, &port->mpegq, count);
1765 spin_unlock(&port->slock);
1766
1767 }
1768 if (status) {
1769 cx_write(port->reg_ts_int_stat, status);
1770 handled = 1;
1771 }
1772
1773 return handled;
1774}
1775
1776static irqreturn_t cx23885_irq(int irq, void *dev_id)
1777{
1778 struct cx23885_dev *dev = dev_id;
1779 struct cx23885_tsport *ts1 = &dev->ts1;
1780 struct cx23885_tsport *ts2 = &dev->ts2;
1781 u32 pci_status, pci_mask;
1782 u32 vida_status, vida_mask;
1783 u32 audint_status, audint_mask;
1784 u32 ts1_status, ts1_mask;
1785 u32 ts2_status, ts2_mask;
1786 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1787 int audint_count = 0;
1788 bool subdev_handled;
1789
1790 pci_status = cx_read(PCI_INT_STAT);
1791 pci_mask = cx23885_irq_get_mask(dev);
1792 if ((pci_status & pci_mask) == 0) {
1793 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1794 pci_status, pci_mask);
1795 goto out;
1796 }
1797
1798 vida_status = cx_read(VID_A_INT_STAT);
1799 vida_mask = cx_read(VID_A_INT_MSK);
1800 audint_status = cx_read(AUDIO_INT_INT_STAT);
1801 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1802 ts1_status = cx_read(VID_B_INT_STAT);
1803 ts1_mask = cx_read(VID_B_INT_MSK);
1804 ts2_status = cx_read(VID_C_INT_STAT);
1805 ts2_mask = cx_read(VID_C_INT_MSK);
1806
1807 if (((pci_status & pci_mask) == 0) &&
1808 ((ts2_status & ts2_mask) == 0) &&
1809 ((ts1_status & ts1_mask) == 0))
1810 goto out;
1811
1812 vida_count = cx_read(VID_A_GPCNT);
1813 audint_count = cx_read(AUD_INT_A_GPCNT);
1814 ts1_count = cx_read(ts1->reg_gpcnt);
1815 ts2_count = cx_read(ts2->reg_gpcnt);
1816 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1817 pci_status, pci_mask);
1818 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1819 vida_status, vida_mask, vida_count);
1820 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1821 audint_status, audint_mask, audint_count);
1822 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1823 ts1_status, ts1_mask, ts1_count);
1824 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1825 ts2_status, ts2_mask, ts2_count);
1826
1827 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1828 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1829 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1830 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1831 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1832 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1833
1834 if (pci_status & PCI_MSK_RISC_RD)
1835 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1836 PCI_MSK_RISC_RD);
1837
1838 if (pci_status & PCI_MSK_RISC_WR)
1839 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1840 PCI_MSK_RISC_WR);
1841
1842 if (pci_status & PCI_MSK_AL_RD)
1843 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1844 PCI_MSK_AL_RD);
1845
1846 if (pci_status & PCI_MSK_AL_WR)
1847 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1848 PCI_MSK_AL_WR);
1849
1850 if (pci_status & PCI_MSK_APB_DMA)
1851 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1852 PCI_MSK_APB_DMA);
1853
1854 if (pci_status & PCI_MSK_VID_C)
1855 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1856 PCI_MSK_VID_C);
1857
1858 if (pci_status & PCI_MSK_VID_B)
1859 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1860 PCI_MSK_VID_B);
1861
1862 if (pci_status & PCI_MSK_VID_A)
1863 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1864 PCI_MSK_VID_A);
1865
1866 if (pci_status & PCI_MSK_AUD_INT)
1867 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1868 PCI_MSK_AUD_INT);
1869
1870 if (pci_status & PCI_MSK_AUD_EXT)
1871 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1872 PCI_MSK_AUD_EXT);
1873
1874 if (pci_status & PCI_MSK_GPIO0)
1875 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1876 PCI_MSK_GPIO0);
1877
1878 if (pci_status & PCI_MSK_GPIO1)
1879 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1880 PCI_MSK_GPIO1);
1881
1882 if (pci_status & PCI_MSK_AV_CORE)
1883 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1884 PCI_MSK_AV_CORE);
1885
1886 if (pci_status & PCI_MSK_IR)
1887 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1888 PCI_MSK_IR);
1889 }
1890
1891 if (cx23885_boards[dev->board].ci_type == 1 &&
1892 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1893 handled += netup_ci_slot_status(dev, pci_status);
1894
1895 if (cx23885_boards[dev->board].ci_type == 2 &&
1896 (pci_status & PCI_MSK_GPIO0))
1897 handled += altera_ci_irq(dev);
1898
1899 if (ts1_status) {
1900 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1901 handled += cx23885_irq_ts(ts1, ts1_status);
1902 else
1903 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1904 handled += cx23885_irq_417(dev, ts1_status);
1905 }
1906
1907 if (ts2_status) {
1908 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1909 handled += cx23885_irq_ts(ts2, ts2_status);
1910 else
1911 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1912 handled += cx23885_irq_417(dev, ts2_status);
1913 }
1914
1915 if (vida_status)
1916 handled += cx23885_video_irq(dev, vida_status);
1917
1918 if (audint_status)
1919 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1920
1921 if (pci_status & PCI_MSK_IR) {
1922 subdev_handled = false;
1923 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1924 pci_status, &subdev_handled);
1925 if (subdev_handled)
1926 handled++;
1927 }
1928
1929 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1930 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1931 schedule_work(&dev->cx25840_work);
1932 handled++;
1933 }
1934
1935 if (handled)
1936 cx_write(PCI_INT_STAT, pci_status & pci_mask);
1937out:
1938 return IRQ_RETVAL(handled);
1939}
1940
1941static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1942 unsigned int notification, void *arg)
1943{
1944 struct cx23885_dev *dev;
1945
1946 if (sd == NULL)
1947 return;
1948
1949 dev = to_cx23885(sd->v4l2_dev);
1950
1951 switch (notification) {
1952 case V4L2_SUBDEV_IR_RX_NOTIFY:
1953 if (sd == dev->sd_ir)
1954 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1955 break;
1956 case V4L2_SUBDEV_IR_TX_NOTIFY:
1957 if (sd == dev->sd_ir)
1958 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1959 break;
1960 }
1961}
1962
1963static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1964{
1965 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1966 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1967 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1968 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1969}
1970
1971static inline int encoder_on_portb(struct cx23885_dev *dev)
1972{
1973 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1974}
1975
1976static inline int encoder_on_portc(struct cx23885_dev *dev)
1977{
1978 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1994{
1995 if (mask & 0x7)
1996 cx_set(GP0_IO, mask & 0x7);
1997
1998 if (mask & 0x0007fff8) {
1999 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2000 pr_err("%s: Setting GPIO on encoder ports\n",
2001 dev->name);
2002 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2003 }
2004
2005
2006 if (mask & 0x00f80000)
2007 pr_info("%s: Unsupported\n", dev->name);
2008}
2009
2010void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2011{
2012 if (mask & 0x00000007)
2013 cx_clear(GP0_IO, mask & 0x7);
2014
2015 if (mask & 0x0007fff8) {
2016 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2017 pr_err("%s: Clearing GPIO moving on encoder ports\n",
2018 dev->name);
2019 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2020 }
2021
2022
2023 if (mask & 0x00f80000)
2024 pr_info("%s: Unsupported\n", dev->name);
2025}
2026
2027u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2028{
2029 if (mask & 0x00000007)
2030 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2031
2032 if (mask & 0x0007fff8) {
2033 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2034 pr_err("%s: Reading GPIO moving on encoder ports\n",
2035 dev->name);
2036 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2037 }
2038
2039
2040 if (mask & 0x00f80000)
2041 pr_info("%s: Unsupported\n", dev->name);
2042
2043 return 0;
2044}
2045
2046void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2047{
2048 if ((mask & 0x00000007) && asoutput)
2049 cx_set(GP0_IO, (mask & 0x7) << 16);
2050 else if ((mask & 0x00000007) && !asoutput)
2051 cx_clear(GP0_IO, (mask & 0x7) << 16);
2052
2053 if (mask & 0x0007fff8) {
2054 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2055 pr_err("%s: Enabling GPIO on encoder ports\n",
2056 dev->name);
2057 }
2058
2059
2060 if ((mask & 0x0007fff8) && asoutput)
2061 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2062
2063 else if ((mask & 0x0007fff8) && !asoutput)
2064 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2065
2066
2067}
2068
2069static struct {
2070 int vendor, dev;
2071} const broken_dev_id[] = {
2072
2073
2074
2075
2076 { PCI_VENDOR_ID_AMD, 0x1451 },
2077};
2078
2079static bool cx23885_does_need_dma_reset(void)
2080{
2081 int i;
2082 struct pci_dev *pdev = NULL;
2083
2084 if (dma_reset_workaround == 0)
2085 return false;
2086 else if (dma_reset_workaround == 2)
2087 return true;
2088
2089 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2090 pdev = pci_get_device(broken_dev_id[i].vendor,
2091 broken_dev_id[i].dev, NULL);
2092 if (pdev) {
2093 pci_dev_put(pdev);
2094 return true;
2095 }
2096 }
2097 return false;
2098}
2099
2100static int cx23885_initdev(struct pci_dev *pci_dev,
2101 const struct pci_device_id *pci_id)
2102{
2103 struct cx23885_dev *dev;
2104 struct v4l2_ctrl_handler *hdl;
2105 int err;
2106
2107 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2108 if (NULL == dev)
2109 return -ENOMEM;
2110
2111 dev->need_dma_reset = cx23885_does_need_dma_reset();
2112
2113 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2114 if (err < 0)
2115 goto fail_free;
2116
2117 hdl = &dev->ctrl_handler;
2118 v4l2_ctrl_handler_init(hdl, 6);
2119 if (hdl->error) {
2120 err = hdl->error;
2121 goto fail_ctrl;
2122 }
2123 dev->v4l2_dev.ctrl_handler = hdl;
2124
2125
2126 cx23885_v4l2_dev_notify_init(dev);
2127
2128
2129 dev->pci = pci_dev;
2130 if (pci_enable_device(pci_dev)) {
2131 err = -EIO;
2132 goto fail_ctrl;
2133 }
2134
2135 if (cx23885_dev_setup(dev) < 0) {
2136 err = -EINVAL;
2137 goto fail_ctrl;
2138 }
2139
2140
2141 dev->pci_rev = pci_dev->revision;
2142 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2143 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2144 dev->name,
2145 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2146 dev->pci_lat,
2147 (unsigned long long)pci_resource_start(pci_dev, 0));
2148
2149 pci_set_master(pci_dev);
2150 err = pci_set_dma_mask(pci_dev, 0xffffffff);
2151 if (err) {
2152 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2153 goto fail_ctrl;
2154 }
2155
2156 err = request_irq(pci_dev->irq, cx23885_irq,
2157 IRQF_SHARED, dev->name, dev);
2158 if (err < 0) {
2159 pr_err("%s: can't get IRQ %d\n",
2160 dev->name, pci_dev->irq);
2161 goto fail_irq;
2162 }
2163
2164 switch (dev->board) {
2165 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2166 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2167 break;
2168 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2169 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2170 break;
2171 }
2172
2173
2174
2175
2176
2177
2178 cx23885_ir_pci_int_enable(dev);
2179 cx23885_input_init(dev);
2180
2181 return 0;
2182
2183fail_irq:
2184 cx23885_dev_unregister(dev);
2185fail_ctrl:
2186 v4l2_ctrl_handler_free(hdl);
2187 v4l2_device_unregister(&dev->v4l2_dev);
2188fail_free:
2189 kfree(dev);
2190 return err;
2191}
2192
2193static void cx23885_finidev(struct pci_dev *pci_dev)
2194{
2195 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2196 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2197
2198 cx23885_input_fini(dev);
2199 cx23885_ir_fini(dev);
2200
2201 cx23885_shutdown(dev);
2202
2203
2204 free_irq(pci_dev->irq, dev);
2205
2206 pci_disable_device(pci_dev);
2207
2208 cx23885_dev_unregister(dev);
2209 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2210 v4l2_device_unregister(v4l2_dev);
2211 kfree(dev);
2212}
2213
2214static const struct pci_device_id cx23885_pci_tbl[] = {
2215 {
2216
2217 .vendor = 0x14f1,
2218 .device = 0x8852,
2219 .subvendor = PCI_ANY_ID,
2220 .subdevice = PCI_ANY_ID,
2221 }, {
2222
2223 .vendor = 0x14f1,
2224 .device = 0x8880,
2225 .subvendor = PCI_ANY_ID,
2226 .subdevice = PCI_ANY_ID,
2227 }, {
2228
2229 }
2230};
2231MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2232
2233static struct pci_driver cx23885_pci_driver = {
2234 .name = "cx23885",
2235 .id_table = cx23885_pci_tbl,
2236 .probe = cx23885_initdev,
2237 .remove = cx23885_finidev,
2238
2239 .suspend = NULL,
2240 .resume = NULL,
2241};
2242
2243static int __init cx23885_init(void)
2244{
2245 pr_info("cx23885 driver version %s loaded\n",
2246 CX23885_VERSION);
2247 return pci_register_driver(&cx23885_pci_driver);
2248}
2249
2250static void __exit cx23885_fini(void)
2251{
2252 pci_unregister_driver(&cx23885_pci_driver);
2253}
2254
2255module_init(cx23885_init);
2256module_exit(cx23885_fini);
2257