1
2
3
4
5
6
7
8
9
10
11#include <linux/atomic.h>
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/dvb/dmx.h>
18#include <linux/dvb/frontend.h>
19#include <linux/errno.h>
20#include <linux/firmware.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/of_gpio.h>
26#include <linux/of_platform.h>
27#include <linux/platform_device.h>
28#include <linux/usb.h>
29#include <linux/slab.h>
30#include <linux/time.h>
31#include <linux/version.h>
32#include <linux/wait.h>
33#include <linux/pinctrl/pinctrl.h>
34
35#include "c8sectpfe-core.h"
36#include "c8sectpfe-common.h"
37#include "c8sectpfe-debugfs.h"
38#include <media/dmxdev.h>
39#include <media/dvb_demux.h>
40#include <media/dvb_frontend.h>
41#include <media/dvb_net.h>
42
43#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
44MODULE_FIRMWARE(FIRMWARE_MEMDMA);
45
46#define PID_TABLE_SIZE 1024
47#define POLL_MSECS 50
48
49static int load_c8sectpfe_fw(struct c8sectpfei *fei);
50
51#define TS_PKT_SIZE 188
52#define HEADER_SIZE (4)
53#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
54
55#define FEI_ALIGNMENT (32)
56
57#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
58
59#define FIFO_LEN 1024
60
61static void c8sectpfe_timer_interrupt(struct timer_list *t)
62{
63 struct c8sectpfei *fei = from_timer(fei, t, timer);
64 struct channel_info *channel;
65 int chan_num;
66
67
68 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
69 channel = fei->channel_data[chan_num];
70
71
72 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
73 tasklet_schedule(&channel->tsklet);
74 }
75
76 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
77 add_timer(&fei->timer);
78}
79
80static void channel_swdemux_tsklet(struct tasklet_struct *t)
81{
82 struct channel_info *channel = from_tasklet(channel, t, tsklet);
83 struct c8sectpfei *fei;
84 unsigned long wp, rp;
85 int pos, num_packets, n, size;
86 u8 *buf;
87
88 if (unlikely(!channel || !channel->irec))
89 return;
90
91 fei = channel->fei;
92
93 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
94 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
95
96 pos = rp - channel->back_buffer_busaddr;
97
98
99 if (wp < rp)
100 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
101
102 size = wp - rp;
103 num_packets = size / PACKET_SIZE;
104
105
106 dma_sync_single_for_cpu(fei->dev,
107 rp,
108 size,
109 DMA_FROM_DEVICE);
110
111 buf = (u8 *) channel->back_buffer_aligned;
112
113 dev_dbg(fei->dev,
114 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
115 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
116
117 for (n = 0; n < num_packets; n++) {
118 dvb_dmx_swfilter_packets(
119 &fei->c8sectpfe[0]->
120 demux[channel->demux_mapping].dvb_demux,
121 &buf[pos], 1);
122
123 pos += PACKET_SIZE;
124 }
125
126
127 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
128 writel(channel->back_buffer_busaddr, channel->irec +
129 DMA_PRDS_BUSRP_TP(0));
130 else
131 writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
132}
133
134static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
135{
136 struct dvb_demux *demux = dvbdmxfeed->demux;
137 struct stdemux *stdemux = (struct stdemux *)demux->priv;
138 struct c8sectpfei *fei = stdemux->c8sectpfei;
139 struct channel_info *channel;
140 u32 tmp;
141 unsigned long *bitmap;
142 int ret;
143
144 switch (dvbdmxfeed->type) {
145 case DMX_TYPE_TS:
146 break;
147 case DMX_TYPE_SEC:
148 break;
149 default:
150 dev_err(fei->dev, "%s:%d Error bailing\n"
151 , __func__, __LINE__);
152 return -EINVAL;
153 }
154
155 if (dvbdmxfeed->type == DMX_TYPE_TS) {
156 switch (dvbdmxfeed->pes_type) {
157 case DMX_PES_VIDEO:
158 case DMX_PES_AUDIO:
159 case DMX_PES_TELETEXT:
160 case DMX_PES_PCR:
161 case DMX_PES_OTHER:
162 break;
163 default:
164 dev_err(fei->dev, "%s:%d Error bailing\n"
165 , __func__, __LINE__);
166 return -EINVAL;
167 }
168 }
169
170 if (!atomic_read(&fei->fw_loaded)) {
171 ret = load_c8sectpfe_fw(fei);
172 if (ret)
173 return ret;
174 }
175
176 mutex_lock(&fei->lock);
177
178 channel = fei->channel_data[stdemux->tsin_index];
179
180 bitmap = (unsigned long *) channel->pid_buffer_aligned;
181
182
183 if (dvbdmxfeed->pid == 8192) {
184 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
185 tmp &= ~C8SECTPFE_PID_ENABLE;
186 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
187
188 } else {
189 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
190 }
191
192
193 dma_sync_single_for_device(fei->dev,
194 channel->pid_buffer_busaddr,
195 PID_TABLE_SIZE,
196 DMA_TO_DEVICE);
197
198 channel->active = 1;
199
200 if (fei->global_feed_count == 0) {
201 fei->timer.expires = jiffies +
202 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
203
204 add_timer(&fei->timer);
205 }
206
207 if (stdemux->running_feed_count == 0) {
208
209 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
210
211 tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
212
213
214 writel(channel->fifo,
215 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
216 writel(channel->fifo + FIFO_LEN - 1,
217 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
218
219 writel(channel->fifo,
220 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
221 writel(channel->fifo,
222 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
223
224
225
226 writel(channel->back_buffer_busaddr, channel->irec +
227 DMA_PRDS_BUSBASE_TP(0));
228
229 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
230 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
231
232 writel(channel->back_buffer_busaddr, channel->irec +
233 DMA_PRDS_BUSWP_TP(0));
234
235
236 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
237 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
238
239
240 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
241
242 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
243 , __func__, __LINE__, stdemux);
244 }
245
246 stdemux->running_feed_count++;
247 fei->global_feed_count++;
248
249 mutex_unlock(&fei->lock);
250
251 return 0;
252}
253
254static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
255{
256
257 struct dvb_demux *demux = dvbdmxfeed->demux;
258 struct stdemux *stdemux = (struct stdemux *)demux->priv;
259 struct c8sectpfei *fei = stdemux->c8sectpfei;
260 struct channel_info *channel;
261 int idlereq;
262 u32 tmp;
263 int ret;
264 unsigned long *bitmap;
265
266 if (!atomic_read(&fei->fw_loaded)) {
267 ret = load_c8sectpfe_fw(fei);
268 if (ret)
269 return ret;
270 }
271
272 mutex_lock(&fei->lock);
273
274 channel = fei->channel_data[stdemux->tsin_index];
275
276 bitmap = (unsigned long *) channel->pid_buffer_aligned;
277
278 if (dvbdmxfeed->pid == 8192) {
279 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
280 tmp |= C8SECTPFE_PID_ENABLE;
281 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
282 } else {
283 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
284 }
285
286
287 dma_sync_single_for_device(fei->dev,
288 channel->pid_buffer_busaddr,
289 PID_TABLE_SIZE,
290 DMA_TO_DEVICE);
291
292 if (--stdemux->running_feed_count == 0) {
293
294 channel = fei->channel_data[stdemux->tsin_index];
295
296
297
298
299 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
300
301
302 writel(0, channel->irec + DMA_PRDS_TPENABLE);
303
304 tasklet_disable(&channel->tsklet);
305
306
307 idlereq = (1 << channel->tsin_id) | IDLEREQ;
308 writel(idlereq, fei->io + DMA_IDLE_REQ);
309
310
311 ret = wait_for_completion_timeout(&channel->idle_completion,
312 msecs_to_jiffies(100));
313
314 if (ret == 0)
315 dev_warn(fei->dev,
316 "Timeout waiting for idle irq on tsin%d\n",
317 channel->tsin_id);
318
319 reinit_completion(&channel->idle_completion);
320
321
322
323 writel(channel->back_buffer_busaddr,
324 channel->irec + DMA_PRDS_BUSBASE_TP(0));
325
326 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
327 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
328
329 writel(channel->back_buffer_busaddr,
330 channel->irec + DMA_PRDS_BUSWP_TP(0));
331
332 dev_dbg(fei->dev,
333 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
334 __func__, __LINE__, stdemux, channel->tsin_id);
335
336
337 memset((void *)channel->pid_buffer_aligned
338 , 0x00, PID_TABLE_SIZE);
339
340
341 dma_sync_single_for_device(fei->dev,
342 channel->pid_buffer_busaddr,
343 PID_TABLE_SIZE,
344 DMA_TO_DEVICE);
345
346 channel->active = 0;
347 }
348
349 if (--fei->global_feed_count == 0) {
350 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
351 , __func__, __LINE__, fei->global_feed_count);
352
353 del_timer(&fei->timer);
354 }
355
356 mutex_unlock(&fei->lock);
357
358 return 0;
359}
360
361static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
362{
363 int i;
364
365 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
366 if (!fei->channel_data[i])
367 continue;
368
369 if (fei->channel_data[i]->tsin_id == tsin_num)
370 return fei->channel_data[i];
371 }
372
373 return NULL;
374}
375
376static void c8sectpfe_getconfig(struct c8sectpfei *fei)
377{
378 struct c8sectpfe_hw *hw = &fei->hw_stats;
379
380 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
381 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
382 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
383 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
384 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
385 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
386 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
387
388 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
389 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
390 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
391 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
392 , hw->num_swts);
393 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
394 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
395 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
396 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
397 , hw->num_tp);
398}
399
400static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
401{
402 struct c8sectpfei *fei = priv;
403 struct channel_info *chan;
404 int bit;
405 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
406
407
408
409
410
411 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
412
413 chan = find_channel(fei, bit);
414
415 if (chan)
416 complete(&chan->idle_completion);
417 }
418
419 writel(0, fei->io + DMA_IDLE_REQ);
420
421 return IRQ_HANDLED;
422}
423
424
425static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
426{
427 if (!fei || !tsin)
428 return;
429
430 if (tsin->back_buffer_busaddr)
431 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
432 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
433 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
434
435 kfree(tsin->back_buffer_start);
436
437 if (tsin->pid_buffer_busaddr)
438 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
439 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
440 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
441
442 kfree(tsin->pid_buffer_start);
443}
444
445#define MAX_NAME 20
446
447static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
448 struct channel_info *tsin)
449{
450 int ret;
451 u32 tmp;
452 char tsin_pin_name[MAX_NAME];
453
454 if (!fei || !tsin)
455 return -EINVAL;
456
457 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
458 , __func__, __LINE__, tsin, tsin->tsin_id);
459
460 init_completion(&tsin->idle_completion);
461
462 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
463 FEI_ALIGNMENT, GFP_KERNEL);
464
465 if (!tsin->back_buffer_start) {
466 ret = -ENOMEM;
467 goto err_unmap;
468 }
469
470
471 tsin->back_buffer_aligned = tsin->back_buffer_start
472 + FEI_ALIGNMENT;
473
474 tsin->back_buffer_aligned = (void *)
475 (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
476
477 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
478 (void *)tsin->back_buffer_aligned,
479 FEI_BUFFER_SIZE,
480 DMA_BIDIRECTIONAL);
481
482 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
483 dev_err(fei->dev, "failed to map back_buffer\n");
484 ret = -EFAULT;
485 goto err_unmap;
486 }
487
488
489
490
491
492
493 tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
494
495 if (!tsin->pid_buffer_start) {
496 ret = -ENOMEM;
497 goto err_unmap;
498 }
499
500
501
502
503
504
505
506
507 tsin->pid_buffer_aligned = tsin->pid_buffer_start +
508 PID_TABLE_SIZE;
509
510 tsin->pid_buffer_aligned = (void *)
511 (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
512
513 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
514 tsin->pid_buffer_aligned,
515 PID_TABLE_SIZE,
516 DMA_BIDIRECTIONAL);
517
518 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
519 dev_err(fei->dev, "failed to map pid_bitmap\n");
520 ret = -EFAULT;
521 goto err_unmap;
522 }
523
524
525 dma_sync_single_for_device(fei->dev,
526 tsin->pid_buffer_busaddr,
527 PID_TABLE_SIZE,
528 DMA_TO_DEVICE);
529
530 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
531 (tsin->serial_not_parallel ? "serial" : "parallel"));
532
533 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
534 if (IS_ERR(tsin->pstate)) {
535 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
536 , __func__, tsin_pin_name);
537 ret = PTR_ERR(tsin->pstate);
538 goto err_unmap;
539 }
540
541 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
542
543 if (ret) {
544 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
545 , __func__);
546 goto err_unmap;
547 }
548
549
550 tmp = readl(fei->io + SYS_INPUT_CLKEN);
551 tmp |= BIT(tsin->tsin_id);
552 writel(tmp, fei->io + SYS_INPUT_CLKEN);
553
554 if (tsin->serial_not_parallel)
555 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
556
557 if (tsin->invert_ts_clk)
558 tmp |= C8SECTPFE_INVERT_TSCLK;
559
560 if (tsin->async_not_sync)
561 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
562
563 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
564
565 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
566
567 writel(C8SECTPFE_SYNC(0x9) |
568 C8SECTPFE_DROP(0x9) |
569 C8SECTPFE_TOKEN(0x47),
570 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
571
572 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
573
574
575
576 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
577
578 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
579 writel(tsin->fifo + FIFO_LEN - 1,
580 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
581
582 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
583 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
584
585 writel(tsin->pid_buffer_busaddr,
586 fei->io + PIDF_BASE(tsin->tsin_id));
587
588 dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
589 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
590 &tsin->pid_buffer_busaddr);
591
592
593
594
595
596
597
598
599
600 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
601 | C8SECTPFE_PID_OFFSET(40));
602
603 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
604
605 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
606 tsin->tsin_id,
607 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
608 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
609 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
610 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
611
612
613 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
614 readl(fei->io + DMA_PTRREC_BASE);
615
616
617
618
619 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
620
621 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
622
623 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
624
625 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
626
627 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
628
629
630
631 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
632
633 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
634 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
635
636 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
637 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
638
639
640 tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
641
642 return 0;
643
644err_unmap:
645 free_input_block(fei, tsin);
646 return ret;
647}
648
649static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
650{
651 struct c8sectpfei *fei = priv;
652
653 dev_err(fei->dev, "%s: error handling not yet implemented\n"
654 , __func__);
655
656
657
658
659
660
661 return IRQ_HANDLED;
662}
663
664static int c8sectpfe_probe(struct platform_device *pdev)
665{
666 struct device *dev = &pdev->dev;
667 struct device_node *child, *np = dev->of_node;
668 struct c8sectpfei *fei;
669 struct resource *res;
670 int ret, index = 0;
671 struct channel_info *tsin;
672
673
674 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
675 if (!fei)
676 return -ENOMEM;
677
678 fei->dev = dev;
679
680 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
681 fei->io = devm_ioremap_resource(dev, res);
682 if (IS_ERR(fei->io))
683 return PTR_ERR(fei->io);
684
685 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
686 "c8sectpfe-ram");
687 fei->sram = devm_ioremap_resource(dev, res);
688 if (IS_ERR(fei->sram))
689 return PTR_ERR(fei->sram);
690
691 fei->sram_size = resource_size(res);
692
693 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
694 if (fei->idle_irq < 0)
695 return fei->idle_irq;
696
697 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
698 if (fei->error_irq < 0)
699 return fei->error_irq;
700
701 platform_set_drvdata(pdev, fei);
702
703 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
704 if (IS_ERR(fei->c8sectpfeclk)) {
705 dev_err(dev, "c8sectpfe clk not found\n");
706 return PTR_ERR(fei->c8sectpfeclk);
707 }
708
709 ret = clk_prepare_enable(fei->c8sectpfeclk);
710 if (ret) {
711 dev_err(dev, "Failed to enable c8sectpfe clock\n");
712 return ret;
713 }
714
715
716 writel(0, fei->io + SYS_INPUT_CLKEN);
717
718
719 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
720
721
722 memset_io(fei->sram, 0x0, fei->sram_size);
723
724 c8sectpfe_getconfig(fei);
725
726 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
727 0, "c8sectpfe-idle-irq", fei);
728 if (ret) {
729 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
730 goto err_clk_disable;
731 }
732
733 ret = devm_request_irq(dev, fei->error_irq,
734 c8sectpfe_error_irq_handler, 0,
735 "c8sectpfe-error-irq", fei);
736 if (ret) {
737 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
738 goto err_clk_disable;
739 }
740
741 fei->tsin_count = of_get_child_count(np);
742
743 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
744 fei->tsin_count > fei->hw_stats.num_ib) {
745
746 dev_err(dev, "More tsin declared than exist on SoC!\n");
747 ret = -EINVAL;
748 goto err_clk_disable;
749 }
750
751 fei->pinctrl = devm_pinctrl_get(dev);
752
753 if (IS_ERR(fei->pinctrl)) {
754 dev_err(dev, "Error getting tsin pins\n");
755 ret = PTR_ERR(fei->pinctrl);
756 goto err_clk_disable;
757 }
758
759 for_each_child_of_node(np, child) {
760 struct device_node *i2c_bus;
761
762 fei->channel_data[index] = devm_kzalloc(dev,
763 sizeof(struct channel_info),
764 GFP_KERNEL);
765
766 if (!fei->channel_data[index]) {
767 ret = -ENOMEM;
768 goto err_node_put;
769 }
770
771 tsin = fei->channel_data[index];
772
773 tsin->fei = fei;
774
775 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
776 if (ret) {
777 dev_err(&pdev->dev, "No tsin_num found\n");
778 goto err_node_put;
779 }
780
781
782 if (tsin->tsin_id > fei->hw_stats.num_ib) {
783 dev_err(&pdev->dev,
784 "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
785 tsin->tsin_id, fei->hw_stats.num_ib);
786 ret = -EINVAL;
787 goto err_node_put;
788 }
789
790 tsin->invert_ts_clk = of_property_read_bool(child,
791 "invert-ts-clk");
792
793 tsin->serial_not_parallel = of_property_read_bool(child,
794 "serial-not-parallel");
795
796 tsin->async_not_sync = of_property_read_bool(child,
797 "async-not-sync");
798
799 ret = of_property_read_u32(child, "dvb-card",
800 &tsin->dvb_card);
801 if (ret) {
802 dev_err(&pdev->dev, "No dvb-card found\n");
803 goto err_node_put;
804 }
805
806 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
807 if (!i2c_bus) {
808 dev_err(&pdev->dev, "No i2c-bus found\n");
809 ret = -ENODEV;
810 goto err_node_put;
811 }
812 tsin->i2c_adapter =
813 of_find_i2c_adapter_by_node(i2c_bus);
814 if (!tsin->i2c_adapter) {
815 dev_err(&pdev->dev, "No i2c adapter found\n");
816 of_node_put(i2c_bus);
817 ret = -ENODEV;
818 goto err_node_put;
819 }
820 of_node_put(i2c_bus);
821
822 tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
823
824 ret = gpio_is_valid(tsin->rst_gpio);
825 if (!ret) {
826 dev_err(dev,
827 "reset gpio for tsin%d not valid (gpio=%d)\n",
828 tsin->tsin_id, tsin->rst_gpio);
829 ret = -EINVAL;
830 goto err_node_put;
831 }
832
833 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
834 GPIOF_OUT_INIT_LOW, "NIM reset");
835 if (ret && ret != -EBUSY) {
836 dev_err(dev, "Can't request tsin%d reset gpio\n"
837 , fei->channel_data[index]->tsin_id);
838 goto err_node_put;
839 }
840
841 if (!ret) {
842
843 gpio_direction_output(tsin->rst_gpio, 0);
844 usleep_range(3500, 5000);
845 gpio_direction_output(tsin->rst_gpio, 1);
846 usleep_range(3000, 5000);
847 }
848
849 tsin->demux_mapping = index;
850
851 dev_dbg(fei->dev,
852 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
853 fei->channel_data[index], index,
854 tsin->tsin_id, tsin->invert_ts_clk,
855 tsin->serial_not_parallel, tsin->async_not_sync,
856 tsin->dvb_card);
857
858 index++;
859 }
860
861
862 timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
863
864 mutex_init(&fei->lock);
865
866
867 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
868 (void *)fei,
869 c8sectpfe_start_feed,
870 c8sectpfe_stop_feed);
871 if (ret) {
872 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
873 ret);
874 goto err_clk_disable;
875 }
876
877 c8sectpfe_debugfs_init(fei);
878
879 return 0;
880
881err_node_put:
882 of_node_put(child);
883err_clk_disable:
884 clk_disable_unprepare(fei->c8sectpfeclk);
885 return ret;
886}
887
888static int c8sectpfe_remove(struct platform_device *pdev)
889{
890 struct c8sectpfei *fei = platform_get_drvdata(pdev);
891 struct channel_info *channel;
892 int i;
893
894 wait_for_completion(&fei->fw_ack);
895
896 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
897
898
899
900
901 for (i = 0; i < fei->tsin_count; i++) {
902 channel = fei->channel_data[i];
903 free_input_block(fei, channel);
904 }
905
906 c8sectpfe_debugfs_exit(fei);
907
908 dev_info(fei->dev, "Stopping memdma SLIM core\n");
909 if (readl(fei->io + DMA_CPU_RUN))
910 writel(0x0, fei->io + DMA_CPU_RUN);
911
912
913 if (readl(fei->io + SYS_INPUT_CLKEN))
914 writel(0, fei->io + SYS_INPUT_CLKEN);
915
916 if (readl(fei->io + SYS_OTHER_CLKEN))
917 writel(0, fei->io + SYS_OTHER_CLKEN);
918
919 if (fei->c8sectpfeclk)
920 clk_disable_unprepare(fei->c8sectpfeclk);
921
922 return 0;
923}
924
925
926static int configure_channels(struct c8sectpfei *fei)
927{
928 int index = 0, ret;
929 struct channel_info *tsin;
930 struct device_node *child, *np = fei->dev->of_node;
931
932
933 for_each_child_of_node(np, child) {
934
935 tsin = fei->channel_data[index];
936
937 ret = configure_memdma_and_inputblock(fei,
938 fei->channel_data[index]);
939
940 if (ret) {
941 dev_err(fei->dev,
942 "configure_memdma_and_inputblock failed\n");
943 goto err_unmap;
944 }
945 index++;
946 }
947
948 return 0;
949
950err_unmap:
951 for (index = 0; index < fei->tsin_count; index++) {
952 tsin = fei->channel_data[index];
953 free_input_block(fei, tsin);
954 }
955 return ret;
956}
957
958static int
959c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
960{
961 struct elf32_hdr *ehdr;
962 char class;
963
964 if (!fw) {
965 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
966 return -EINVAL;
967 }
968
969 if (fw->size < sizeof(struct elf32_hdr)) {
970 dev_err(fei->dev, "Image is too small\n");
971 return -EINVAL;
972 }
973
974 ehdr = (struct elf32_hdr *)fw->data;
975
976
977 class = ehdr->e_ident[EI_CLASS];
978 if (class != ELFCLASS32) {
979 dev_err(fei->dev, "Unsupported class: %d\n", class);
980 return -EINVAL;
981 }
982
983 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
984 dev_err(fei->dev, "Unsupported firmware endianness\n");
985 return -EINVAL;
986 }
987
988 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
989 dev_err(fei->dev, "Image is too small\n");
990 return -EINVAL;
991 }
992
993 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
994 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
995 return -EINVAL;
996 }
997
998
999 ehdr = (Elf32_Ehdr *)fw->data;
1000 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
1001 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
1002 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
1003 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
1004 dev_err(fei->dev, "Invalid ELF magic\n");
1005 return -EINVAL;
1006 }
1007
1008 if (ehdr->e_type != ET_EXEC) {
1009 dev_err(fei->dev, "Unsupported ELF header type\n");
1010 return -EINVAL;
1011 }
1012
1013 if (ehdr->e_phoff > fw->size) {
1014 dev_err(fei->dev, "Firmware size is too small\n");
1015 return -EINVAL;
1016 }
1017
1018 return 0;
1019}
1020
1021
1022static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1023 const struct firmware *fw, u8 __iomem *dest,
1024 int seg_num)
1025{
1026 const u8 *imem_src = fw->data + phdr->p_offset;
1027 int i;
1028
1029
1030
1031
1032
1033
1034
1035
1036 dev_dbg(fei->dev,
1037 "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
1038 seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1039 phdr->p_memsz + phdr->p_memsz / 3);
1040
1041 for (i = 0; i < phdr->p_filesz; i++) {
1042
1043 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1044
1045
1046
1047 if (i % 3 == 2) {
1048 dest++;
1049 writeb(0x00, (void __iomem *)dest);
1050 }
1051
1052 dest++;
1053 imem_src++;
1054 }
1055}
1056
1057static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1058 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1059{
1060
1061
1062
1063
1064
1065 dev_dbg(fei->dev,
1066 "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1067 seg_num, phdr->p_paddr, phdr->p_filesz,
1068 dst, phdr->p_memsz);
1069
1070 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1071 phdr->p_filesz);
1072
1073 memset((void __force *)dst + phdr->p_filesz, 0,
1074 phdr->p_memsz - phdr->p_filesz);
1075}
1076
1077static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1078{
1079 Elf32_Ehdr *ehdr;
1080 Elf32_Phdr *phdr;
1081 u8 __iomem *dst;
1082 int err = 0, i;
1083
1084 if (!fw || !fei)
1085 return -EINVAL;
1086
1087 ehdr = (Elf32_Ehdr *)fw->data;
1088 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1089
1090
1091 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1092
1093
1094 if (phdr->p_type != PT_LOAD)
1095 continue;
1096
1097
1098
1099
1100 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1101 dev_err(fei->dev,
1102 "Segment %d is outside of firmware file\n", i);
1103 err = -EINVAL;
1104 break;
1105 }
1106
1107
1108
1109
1110
1111
1112
1113 if (phdr->p_flags & PF_X) {
1114 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1115
1116
1117
1118
1119 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1120 load_imem_segment(fei, phdr, fw, dst, i);
1121 } else {
1122 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1123
1124
1125
1126
1127 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1128 load_dmem_segment(fei, phdr, fw, dst, i);
1129 }
1130 }
1131
1132 release_firmware(fw);
1133 return err;
1134}
1135
1136static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1137{
1138 const struct firmware *fw;
1139 int err;
1140
1141 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1142
1143 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1144 if (err)
1145 return err;
1146
1147 err = c8sectpfe_elf_sanity_check(fei, fw);
1148 if (err) {
1149 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1150 , err);
1151 release_firmware(fw);
1152 return err;
1153 }
1154
1155 err = load_slim_core_fw(fw, fei);
1156 if (err) {
1157 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1158 return err;
1159 }
1160
1161
1162 err = configure_channels(fei);
1163 if (err) {
1164 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1165 return err;
1166 }
1167
1168
1169
1170
1171
1172 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1173
1174 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1175 writel(0x1, fei->io + DMA_CPU_RUN);
1176
1177 atomic_set(&fei->fw_loaded, 1);
1178
1179 return 0;
1180}
1181
1182static const struct of_device_id c8sectpfe_match[] = {
1183 { .compatible = "st,stih407-c8sectpfe" },
1184 { },
1185};
1186MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1187
1188static struct platform_driver c8sectpfe_driver = {
1189 .driver = {
1190 .name = "c8sectpfe",
1191 .of_match_table = of_match_ptr(c8sectpfe_match),
1192 },
1193 .probe = c8sectpfe_probe,
1194 .remove = c8sectpfe_remove,
1195};
1196
1197module_platform_driver(c8sectpfe_driver);
1198
1199MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1200MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1201MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1202MODULE_LICENSE("GPL");
1203