1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/atomic.h>
15#include <linux/clk.h>
16#include <linux/completion.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dvb/dmx.h>
21#include <linux/dvb/frontend.h>
22#include <linux/errno.h>
23#include <linux/firmware.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/of_gpio.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/usb.h>
32#include <linux/slab.h>
33#include <linux/time.h>
34#include <linux/version.h>
35#include <linux/wait.h>
36#include <linux/pinctrl/pinctrl.h>
37
38#include "c8sectpfe-core.h"
39#include "c8sectpfe-common.h"
40#include "c8sectpfe-debugfs.h"
41#include "dmxdev.h"
42#include "dvb_demux.h"
43#include "dvb_frontend.h"
44#include "dvb_net.h"
45
46#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
47MODULE_FIRMWARE(FIRMWARE_MEMDMA);
48
49#define PID_TABLE_SIZE 1024
50#define POLL_MSECS 50
51
52static int load_c8sectpfe_fw(struct c8sectpfei *fei);
53
54#define TS_PKT_SIZE 188
55#define HEADER_SIZE (4)
56#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
57
58#define FEI_ALIGNMENT (32)
59
60#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
61
62#define FIFO_LEN 1024
63
64static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
65{
66 struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
67 struct channel_info *channel;
68 int chan_num;
69
70
71 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
72 channel = fei->channel_data[chan_num];
73
74
75 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
76 tasklet_schedule(&channel->tsklet);
77 }
78
79 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
80 add_timer(&fei->timer);
81}
82
83static void channel_swdemux_tsklet(unsigned long data)
84{
85 struct channel_info *channel = (struct channel_info *)data;
86 struct c8sectpfei *fei = channel->fei;
87 unsigned long wp, rp;
88 int pos, num_packets, n, size;
89 u8 *buf;
90
91 if (unlikely(!channel || !channel->irec))
92 return;
93
94 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
95 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
96
97 pos = rp - channel->back_buffer_busaddr;
98
99
100 if (wp < rp)
101 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
102
103 size = wp - rp;
104 num_packets = size / PACKET_SIZE;
105
106
107 dma_sync_single_for_cpu(fei->dev,
108 rp,
109 size,
110 DMA_FROM_DEVICE);
111
112 buf = (u8 *) channel->back_buffer_aligned;
113
114 dev_dbg(fei->dev,
115 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
116 "rp=0x%lx, wp=0x%lx\n",
117 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
118
119 for (n = 0; n < num_packets; n++) {
120 dvb_dmx_swfilter_packets(
121 &fei->c8sectpfe[0]->
122 demux[channel->demux_mapping].dvb_demux,
123 &buf[pos], 1);
124
125 pos += PACKET_SIZE;
126 }
127
128
129 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
130 writel(channel->back_buffer_busaddr, channel->irec +
131 DMA_PRDS_BUSRP_TP(0));
132 else
133 writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
134}
135
136static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
137{
138 struct dvb_demux *demux = dvbdmxfeed->demux;
139 struct stdemux *stdemux = (struct stdemux *)demux->priv;
140 struct c8sectpfei *fei = stdemux->c8sectpfei;
141 struct channel_info *channel;
142 u32 tmp;
143 unsigned long *bitmap;
144 int ret;
145
146 switch (dvbdmxfeed->type) {
147 case DMX_TYPE_TS:
148 break;
149 case DMX_TYPE_SEC:
150 break;
151 default:
152 dev_err(fei->dev, "%s:%d Error bailing\n"
153 , __func__, __LINE__);
154 return -EINVAL;
155 }
156
157 if (dvbdmxfeed->type == DMX_TYPE_TS) {
158 switch (dvbdmxfeed->pes_type) {
159 case DMX_PES_VIDEO:
160 case DMX_PES_AUDIO:
161 case DMX_PES_TELETEXT:
162 case DMX_PES_PCR:
163 case DMX_PES_OTHER:
164 break;
165 default:
166 dev_err(fei->dev, "%s:%d Error bailing\n"
167 , __func__, __LINE__);
168 return -EINVAL;
169 }
170 }
171
172 if (!atomic_read(&fei->fw_loaded)) {
173 ret = load_c8sectpfe_fw(fei);
174 if (ret)
175 return ret;
176 }
177
178 mutex_lock(&fei->lock);
179
180 channel = fei->channel_data[stdemux->tsin_index];
181
182 bitmap = (unsigned long *) channel->pid_buffer_aligned;
183
184
185 if (dvbdmxfeed->pid == 8192) {
186 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
187 tmp &= ~C8SECTPFE_PID_ENABLE;
188 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
189
190 } else {
191 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
192 }
193
194
195 dma_sync_single_for_device(fei->dev,
196 channel->pid_buffer_busaddr,
197 PID_TABLE_SIZE,
198 DMA_TO_DEVICE);
199
200 channel->active = 1;
201
202 if (fei->global_feed_count == 0) {
203 fei->timer.expires = jiffies +
204 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
205
206 add_timer(&fei->timer);
207 }
208
209 if (stdemux->running_feed_count == 0) {
210
211 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
212
213 tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
214 (unsigned long) channel);
215
216
217 writel(channel->fifo,
218 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
219 writel(channel->fifo + FIFO_LEN - 1,
220 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
221
222 writel(channel->fifo,
223 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
224 writel(channel->fifo,
225 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
226
227
228
229 writel(channel->back_buffer_busaddr, channel->irec +
230 DMA_PRDS_BUSBASE_TP(0));
231
232 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
233 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
234
235 writel(channel->back_buffer_busaddr, channel->irec +
236 DMA_PRDS_BUSWP_TP(0));
237
238
239 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
240 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
241
242
243 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
244
245 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
246 , __func__, __LINE__, stdemux);
247 }
248
249 stdemux->running_feed_count++;
250 fei->global_feed_count++;
251
252 mutex_unlock(&fei->lock);
253
254 return 0;
255}
256
257static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
258{
259
260 struct dvb_demux *demux = dvbdmxfeed->demux;
261 struct stdemux *stdemux = (struct stdemux *)demux->priv;
262 struct c8sectpfei *fei = stdemux->c8sectpfei;
263 struct channel_info *channel;
264 int idlereq;
265 u32 tmp;
266 int ret;
267 unsigned long *bitmap;
268
269 if (!atomic_read(&fei->fw_loaded)) {
270 ret = load_c8sectpfe_fw(fei);
271 if (ret)
272 return ret;
273 }
274
275 mutex_lock(&fei->lock);
276
277 channel = fei->channel_data[stdemux->tsin_index];
278
279 bitmap = (unsigned long *) channel->pid_buffer_aligned;
280
281 if (dvbdmxfeed->pid == 8192) {
282 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
283 tmp |= C8SECTPFE_PID_ENABLE;
284 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
285 } else {
286 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
287 }
288
289
290 dma_sync_single_for_device(fei->dev,
291 channel->pid_buffer_busaddr,
292 PID_TABLE_SIZE,
293 DMA_TO_DEVICE);
294
295 if (--stdemux->running_feed_count == 0) {
296
297 channel = fei->channel_data[stdemux->tsin_index];
298
299
300
301
302 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
303
304
305 writel(0, channel->irec + DMA_PRDS_TPENABLE);
306
307 tasklet_disable(&channel->tsklet);
308
309
310 idlereq = (1 << channel->tsin_id) | IDLEREQ;
311 writel(idlereq, fei->io + DMA_IDLE_REQ);
312
313
314 ret = wait_for_completion_timeout(&channel->idle_completion,
315 msecs_to_jiffies(100));
316
317 if (ret == 0)
318 dev_warn(fei->dev,
319 "Timeout waiting for idle irq on tsin%d\n",
320 channel->tsin_id);
321
322 reinit_completion(&channel->idle_completion);
323
324
325
326 writel(channel->back_buffer_busaddr,
327 channel->irec + DMA_PRDS_BUSBASE_TP(0));
328
329 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
330 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
331
332 writel(channel->back_buffer_busaddr,
333 channel->irec + DMA_PRDS_BUSWP_TP(0));
334
335 dev_dbg(fei->dev,
336 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
337 __func__, __LINE__, stdemux, channel->tsin_id);
338
339
340 memset((void *)channel->pid_buffer_aligned
341 , 0x00, PID_TABLE_SIZE);
342
343
344 dma_sync_single_for_device(fei->dev,
345 channel->pid_buffer_busaddr,
346 PID_TABLE_SIZE,
347 DMA_TO_DEVICE);
348
349 channel->active = 0;
350 }
351
352 if (--fei->global_feed_count == 0) {
353 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
354 , __func__, __LINE__, fei->global_feed_count);
355
356 del_timer(&fei->timer);
357 }
358
359 mutex_unlock(&fei->lock);
360
361 return 0;
362}
363
364static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
365{
366 int i;
367
368 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
369 if (!fei->channel_data[i])
370 continue;
371
372 if (fei->channel_data[i]->tsin_id == tsin_num)
373 return fei->channel_data[i];
374 }
375
376 return NULL;
377}
378
379static void c8sectpfe_getconfig(struct c8sectpfei *fei)
380{
381 struct c8sectpfe_hw *hw = &fei->hw_stats;
382
383 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
384 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
385 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
386 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
387 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
388 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
389 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
390
391 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
392 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
393 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
394 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
395 , hw->num_swts);
396 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
397 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
398 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
399 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
400 , hw->num_tp);
401}
402
403static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
404{
405 struct c8sectpfei *fei = priv;
406 struct channel_info *chan;
407 int bit;
408 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
409
410
411
412
413
414 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
415
416 chan = find_channel(fei, bit);
417
418 if (chan)
419 complete(&chan->idle_completion);
420 }
421
422 writel(0, fei->io + DMA_IDLE_REQ);
423
424 return IRQ_HANDLED;
425}
426
427
428static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
429{
430 if (!fei || !tsin)
431 return;
432
433 if (tsin->back_buffer_busaddr)
434 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
435 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
436 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
437
438 kfree(tsin->back_buffer_start);
439
440 if (tsin->pid_buffer_busaddr)
441 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
442 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
443 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
444
445 kfree(tsin->pid_buffer_start);
446}
447
448#define MAX_NAME 20
449
450static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
451 struct channel_info *tsin)
452{
453 int ret;
454 u32 tmp;
455 char tsin_pin_name[MAX_NAME];
456
457 if (!fei || !tsin)
458 return -EINVAL;
459
460 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
461 , __func__, __LINE__, tsin, tsin->tsin_id);
462
463 init_completion(&tsin->idle_completion);
464
465 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
466 FEI_ALIGNMENT, GFP_KERNEL);
467
468 if (!tsin->back_buffer_start) {
469 ret = -ENOMEM;
470 goto err_unmap;
471 }
472
473
474 tsin->back_buffer_aligned = tsin->back_buffer_start
475 + FEI_ALIGNMENT;
476
477 tsin->back_buffer_aligned = (void *)
478 (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
479
480 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
481 (void *)tsin->back_buffer_aligned,
482 FEI_BUFFER_SIZE,
483 DMA_BIDIRECTIONAL);
484
485 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
486 dev_err(fei->dev, "failed to map back_buffer\n");
487 ret = -EFAULT;
488 goto err_unmap;
489 }
490
491
492
493
494
495
496 tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
497
498 if (!tsin->pid_buffer_start) {
499 ret = -ENOMEM;
500 goto err_unmap;
501 }
502
503
504
505
506
507
508
509
510 tsin->pid_buffer_aligned = tsin->pid_buffer_start +
511 PID_TABLE_SIZE;
512
513 tsin->pid_buffer_aligned = (void *)
514 (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
515
516 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
517 tsin->pid_buffer_aligned,
518 PID_TABLE_SIZE,
519 DMA_BIDIRECTIONAL);
520
521 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
522 dev_err(fei->dev, "failed to map pid_bitmap\n");
523 ret = -EFAULT;
524 goto err_unmap;
525 }
526
527
528 dma_sync_single_for_device(fei->dev,
529 tsin->pid_buffer_busaddr,
530 PID_TABLE_SIZE,
531 DMA_TO_DEVICE);
532
533 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
534 (tsin->serial_not_parallel ? "serial" : "parallel"));
535
536 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
537 if (IS_ERR(tsin->pstate)) {
538 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
539 , __func__, tsin_pin_name);
540 ret = PTR_ERR(tsin->pstate);
541 goto err_unmap;
542 }
543
544 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
545
546 if (ret) {
547 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
548 , __func__);
549 goto err_unmap;
550 }
551
552
553 tmp = readl(fei->io + SYS_INPUT_CLKEN);
554 tmp |= BIT(tsin->tsin_id);
555 writel(tmp, fei->io + SYS_INPUT_CLKEN);
556
557 if (tsin->serial_not_parallel)
558 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
559
560 if (tsin->invert_ts_clk)
561 tmp |= C8SECTPFE_INVERT_TSCLK;
562
563 if (tsin->async_not_sync)
564 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
565
566 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
567
568 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
569
570 writel(C8SECTPFE_SYNC(0x9) |
571 C8SECTPFE_DROP(0x9) |
572 C8SECTPFE_TOKEN(0x47),
573 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
574
575 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
576
577
578
579 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
580
581 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
582 writel(tsin->fifo + FIFO_LEN - 1,
583 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
584
585 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
586 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
587
588 writel(tsin->pid_buffer_busaddr,
589 fei->io + PIDF_BASE(tsin->tsin_id));
590
591 dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
592 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
593 &tsin->pid_buffer_busaddr);
594
595
596
597
598
599
600
601
602
603 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
604 | C8SECTPFE_PID_OFFSET(40));
605
606 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
607
608 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
609 tsin->tsin_id,
610 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
611 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
612 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
613 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
614
615
616 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
617 readl(fei->io + DMA_PTRREC_BASE);
618
619
620
621
622 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
623
624 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
625
626 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
627
628 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
629
630 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
631
632
633
634 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
635
636 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
637 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
638
639 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
640 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
641
642
643 tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
644 (unsigned long) tsin);
645
646 return 0;
647
648err_unmap:
649 free_input_block(fei, tsin);
650 return ret;
651}
652
653static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
654{
655 struct c8sectpfei *fei = priv;
656
657 dev_err(fei->dev, "%s: error handling not yet implemented\n"
658 , __func__);
659
660
661
662
663
664
665 return IRQ_HANDLED;
666}
667
668static int c8sectpfe_probe(struct platform_device *pdev)
669{
670 struct device *dev = &pdev->dev;
671 struct device_node *child, *np = dev->of_node;
672 struct c8sectpfei *fei;
673 struct resource *res;
674 int ret, index = 0;
675 struct channel_info *tsin;
676
677
678 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
679 if (!fei)
680 return -ENOMEM;
681
682 fei->dev = dev;
683
684 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
685 fei->io = devm_ioremap_resource(dev, res);
686 if (IS_ERR(fei->io))
687 return PTR_ERR(fei->io);
688
689 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
690 "c8sectpfe-ram");
691 fei->sram = devm_ioremap_resource(dev, res);
692 if (IS_ERR(fei->sram))
693 return PTR_ERR(fei->sram);
694
695 fei->sram_size = res->end - res->start;
696
697 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
698 if (fei->idle_irq < 0) {
699 dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
700 return fei->idle_irq;
701 }
702
703 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
704 if (fei->error_irq < 0) {
705 dev_err(dev, "Can't get c8sectpfe-error-irq\n");
706 return fei->error_irq;
707 }
708
709 platform_set_drvdata(pdev, fei);
710
711 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
712 if (IS_ERR(fei->c8sectpfeclk)) {
713 dev_err(dev, "c8sectpfe clk not found\n");
714 return PTR_ERR(fei->c8sectpfeclk);
715 }
716
717 ret = clk_prepare_enable(fei->c8sectpfeclk);
718 if (ret) {
719 dev_err(dev, "Failed to enable c8sectpfe clock\n");
720 return ret;
721 }
722
723
724 writel(0, fei->io + SYS_INPUT_CLKEN);
725
726
727 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
728
729
730 memset_io(fei->sram, 0x0, fei->sram_size);
731
732 c8sectpfe_getconfig(fei);
733
734 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
735 0, "c8sectpfe-idle-irq", fei);
736 if (ret) {
737 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
738 goto err_clk_disable;
739 }
740
741 ret = devm_request_irq(dev, fei->error_irq,
742 c8sectpfe_error_irq_handler, 0,
743 "c8sectpfe-error-irq", fei);
744 if (ret) {
745 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
746 goto err_clk_disable;
747 }
748
749 fei->tsin_count = of_get_child_count(np);
750
751 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
752 fei->tsin_count > fei->hw_stats.num_ib) {
753
754 dev_err(dev, "More tsin declared than exist on SoC!\n");
755 ret = -EINVAL;
756 goto err_clk_disable;
757 }
758
759 fei->pinctrl = devm_pinctrl_get(dev);
760
761 if (IS_ERR(fei->pinctrl)) {
762 dev_err(dev, "Error getting tsin pins\n");
763 ret = PTR_ERR(fei->pinctrl);
764 goto err_clk_disable;
765 }
766
767 for_each_child_of_node(np, child) {
768 struct device_node *i2c_bus;
769
770 fei->channel_data[index] = devm_kzalloc(dev,
771 sizeof(struct channel_info),
772 GFP_KERNEL);
773
774 if (!fei->channel_data[index]) {
775 ret = -ENOMEM;
776 goto err_clk_disable;
777 }
778
779 tsin = fei->channel_data[index];
780
781 tsin->fei = fei;
782
783 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
784 if (ret) {
785 dev_err(&pdev->dev, "No tsin_num found\n");
786 goto err_clk_disable;
787 }
788
789
790 if (tsin->tsin_id > fei->hw_stats.num_ib) {
791 dev_err(&pdev->dev,
792 "tsin-num %d specified greater than number\n\t"
793 "of input block hw in SoC! (%d)",
794 tsin->tsin_id, fei->hw_stats.num_ib);
795 ret = -EINVAL;
796 goto err_clk_disable;
797 }
798
799 tsin->invert_ts_clk = of_property_read_bool(child,
800 "invert-ts-clk");
801
802 tsin->serial_not_parallel = of_property_read_bool(child,
803 "serial-not-parallel");
804
805 tsin->async_not_sync = of_property_read_bool(child,
806 "async-not-sync");
807
808 ret = of_property_read_u32(child, "dvb-card",
809 &tsin->dvb_card);
810 if (ret) {
811 dev_err(&pdev->dev, "No dvb-card found\n");
812 goto err_clk_disable;
813 }
814
815 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
816 if (!i2c_bus) {
817 dev_err(&pdev->dev, "No i2c-bus found\n");
818 goto err_clk_disable;
819 }
820 tsin->i2c_adapter =
821 of_find_i2c_adapter_by_node(i2c_bus);
822 if (!tsin->i2c_adapter) {
823 dev_err(&pdev->dev, "No i2c adapter found\n");
824 of_node_put(i2c_bus);
825 goto err_clk_disable;
826 }
827 of_node_put(i2c_bus);
828
829 tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
830
831 ret = gpio_is_valid(tsin->rst_gpio);
832 if (!ret) {
833 dev_err(dev,
834 "reset gpio for tsin%d not valid (gpio=%d)\n",
835 tsin->tsin_id, tsin->rst_gpio);
836 goto err_clk_disable;
837 }
838
839 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
840 GPIOF_OUT_INIT_LOW, "NIM reset");
841 if (ret && ret != -EBUSY) {
842 dev_err(dev, "Can't request tsin%d reset gpio\n"
843 , fei->channel_data[index]->tsin_id);
844 goto err_clk_disable;
845 }
846
847 if (!ret) {
848
849 gpio_direction_output(tsin->rst_gpio, 0);
850 usleep_range(3500, 5000);
851 gpio_direction_output(tsin->rst_gpio, 1);
852 usleep_range(3000, 5000);
853 }
854
855 tsin->demux_mapping = index;
856
857 dev_dbg(fei->dev,
858 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
859 "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
860 fei->channel_data[index], index,
861 tsin->tsin_id, tsin->invert_ts_clk,
862 tsin->serial_not_parallel, tsin->async_not_sync,
863 tsin->dvb_card);
864
865 index++;
866 }
867
868
869 init_timer(&fei->timer);
870 fei->timer.function = c8sectpfe_timer_interrupt;
871 fei->timer.data = (unsigned long)fei;
872
873 mutex_init(&fei->lock);
874
875
876 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
877 (void *)fei,
878 c8sectpfe_start_feed,
879 c8sectpfe_stop_feed);
880 if (ret) {
881 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
882 ret);
883 goto err_clk_disable;
884 }
885
886 c8sectpfe_debugfs_init(fei);
887
888 return 0;
889
890err_clk_disable:
891
892
893 return ret;
894}
895
896static int c8sectpfe_remove(struct platform_device *pdev)
897{
898 struct c8sectpfei *fei = platform_get_drvdata(pdev);
899 struct channel_info *channel;
900 int i;
901
902 wait_for_completion(&fei->fw_ack);
903
904 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
905
906
907
908
909 for (i = 0; i < fei->tsin_count; i++) {
910 channel = fei->channel_data[i];
911 free_input_block(fei, channel);
912 }
913
914 c8sectpfe_debugfs_exit(fei);
915
916 dev_info(fei->dev, "Stopping memdma SLIM core\n");
917 if (readl(fei->io + DMA_CPU_RUN))
918 writel(0x0, fei->io + DMA_CPU_RUN);
919
920
921 if (readl(fei->io + SYS_INPUT_CLKEN))
922 writel(0, fei->io + SYS_INPUT_CLKEN);
923
924 if (readl(fei->io + SYS_OTHER_CLKEN))
925 writel(0, fei->io + SYS_OTHER_CLKEN);
926
927
928
929
930
931
932
933 return 0;
934}
935
936
937static int configure_channels(struct c8sectpfei *fei)
938{
939 int index = 0, ret;
940 struct channel_info *tsin;
941 struct device_node *child, *np = fei->dev->of_node;
942
943
944 for_each_child_of_node(np, child) {
945
946 tsin = fei->channel_data[index];
947
948 ret = configure_memdma_and_inputblock(fei,
949 fei->channel_data[index]);
950
951 if (ret) {
952 dev_err(fei->dev,
953 "configure_memdma_and_inputblock failed\n");
954 goto err_unmap;
955 }
956 index++;
957 }
958
959 return 0;
960
961err_unmap:
962 for (index = 0; index < fei->tsin_count; index++) {
963 tsin = fei->channel_data[index];
964 free_input_block(fei, tsin);
965 }
966 return ret;
967}
968
969static int
970c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
971{
972 struct elf32_hdr *ehdr;
973 char class;
974
975 if (!fw) {
976 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
977 return -EINVAL;
978 }
979
980 if (fw->size < sizeof(struct elf32_hdr)) {
981 dev_err(fei->dev, "Image is too small\n");
982 return -EINVAL;
983 }
984
985 ehdr = (struct elf32_hdr *)fw->data;
986
987
988 class = ehdr->e_ident[EI_CLASS];
989 if (class != ELFCLASS32) {
990 dev_err(fei->dev, "Unsupported class: %d\n", class);
991 return -EINVAL;
992 }
993
994 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
995 dev_err(fei->dev, "Unsupported firmware endianness\n");
996 return -EINVAL;
997 }
998
999 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
1000 dev_err(fei->dev, "Image is too small\n");
1001 return -EINVAL;
1002 }
1003
1004 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
1005 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
1006 return -EINVAL;
1007 }
1008
1009
1010 ehdr = (Elf32_Ehdr *)fw->data;
1011 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
1012 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
1013 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
1014 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
1015 dev_err(fei->dev, "Invalid ELF magic\n");
1016 return -EINVAL;
1017 }
1018
1019 if (ehdr->e_type != ET_EXEC) {
1020 dev_err(fei->dev, "Unsupported ELF header type\n");
1021 return -EINVAL;
1022 }
1023
1024 if (ehdr->e_phoff > fw->size) {
1025 dev_err(fei->dev, "Firmware size is too small\n");
1026 return -EINVAL;
1027 }
1028
1029 return 0;
1030}
1031
1032
1033static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1034 const struct firmware *fw, u8 __iomem *dest,
1035 int seg_num)
1036{
1037 const u8 *imem_src = fw->data + phdr->p_offset;
1038 int i;
1039
1040
1041
1042
1043
1044
1045
1046
1047 dev_dbg(fei->dev,
1048 "Loading IMEM segment %d 0x%08x\n\t"
1049 " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
1050 phdr->p_paddr, phdr->p_filesz,
1051 dest, phdr->p_memsz + phdr->p_memsz / 3);
1052
1053 for (i = 0; i < phdr->p_filesz; i++) {
1054
1055 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1056
1057
1058
1059 if (i % 3 == 2) {
1060 dest++;
1061 writeb(0x00, (void __iomem *)dest);
1062 }
1063
1064 dest++;
1065 imem_src++;
1066 }
1067}
1068
1069static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1070 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1071{
1072
1073
1074
1075
1076
1077 dev_dbg(fei->dev,
1078 "Loading DMEM segment %d 0x%08x\n\t"
1079 "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1080 seg_num, phdr->p_paddr, phdr->p_filesz,
1081 dst, phdr->p_memsz);
1082
1083 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1084 phdr->p_filesz);
1085
1086 memset((void __force *)dst + phdr->p_filesz, 0,
1087 phdr->p_memsz - phdr->p_filesz);
1088}
1089
1090static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1091{
1092 Elf32_Ehdr *ehdr;
1093 Elf32_Phdr *phdr;
1094 u8 __iomem *dst;
1095 int err = 0, i;
1096
1097 if (!fw || !fei)
1098 return -EINVAL;
1099
1100 ehdr = (Elf32_Ehdr *)fw->data;
1101 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1102
1103
1104 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1105
1106
1107 if (phdr->p_type != PT_LOAD)
1108 continue;
1109
1110
1111
1112
1113 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1114 dev_err(fei->dev,
1115 "Segment %d is outside of firmware file\n", i);
1116 err = -EINVAL;
1117 break;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126 if (phdr->p_flags & PF_X) {
1127 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1128
1129
1130
1131
1132 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1133 load_imem_segment(fei, phdr, fw, dst, i);
1134 } else {
1135 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1136
1137
1138
1139
1140 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1141 load_dmem_segment(fei, phdr, fw, dst, i);
1142 }
1143 }
1144
1145 release_firmware(fw);
1146 return err;
1147}
1148
1149static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1150{
1151 const struct firmware *fw;
1152 int err;
1153
1154 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1155
1156 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1157 if (err)
1158 return err;
1159
1160 err = c8sectpfe_elf_sanity_check(fei, fw);
1161 if (err) {
1162 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1163 , err);
1164 release_firmware(fw);
1165 return err;
1166 }
1167
1168 err = load_slim_core_fw(fw, fei);
1169 if (err) {
1170 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1171 return err;
1172 }
1173
1174
1175 err = configure_channels(fei);
1176 if (err) {
1177 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1178 return err;
1179 }
1180
1181
1182
1183
1184
1185 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1186
1187 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1188 writel(0x1, fei->io + DMA_CPU_RUN);
1189
1190 atomic_set(&fei->fw_loaded, 1);
1191
1192 return 0;
1193}
1194
1195static const struct of_device_id c8sectpfe_match[] = {
1196 { .compatible = "st,stih407-c8sectpfe" },
1197 { },
1198};
1199MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1200
1201static struct platform_driver c8sectpfe_driver = {
1202 .driver = {
1203 .name = "c8sectpfe",
1204 .of_match_table = of_match_ptr(c8sectpfe_match),
1205 },
1206 .probe = c8sectpfe_probe,
1207 .remove = c8sectpfe_remove,
1208};
1209
1210module_platform_driver(c8sectpfe_driver);
1211
1212MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1213MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1214MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1215MODULE_LICENSE("GPL");
1216