1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/of_platform.h>
12#include <linux/printk.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/io.h>
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/sched.h>
22#include <linux/kthread.h>
23#include <linux/most.h>
24#include "hal.h"
25#include "errors.h"
26#include "sysfs.h"
27
28#define DMA_CHANNELS (32 - 1)
29
30#define MAX_BUFFERS_PACKET 32
31#define MAX_BUFFERS_STREAMING 32
32#define MAX_BUF_SIZE_PACKET 2048
33#define MAX_BUF_SIZE_STREAMING (8 * 1024)
34
35
36
37
38
39
40
41
42static u8 fcnt = 4;
43module_param(fcnt, byte, 0000);
44MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
45
46static DEFINE_SPINLOCK(dim_lock);
47
48static void dim2_tasklet_fn(unsigned long data);
49static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
50
51
52
53
54
55
56
57
58
59
60
61
62struct hdm_channel {
63 char name[sizeof "caNNN"];
64 bool is_initialized;
65 struct dim_channel ch;
66 u16 *reset_dbr_size;
67 struct list_head pending_list;
68 struct list_head started_list;
69 enum most_channel_direction direction;
70 enum most_channel_data_type data_type;
71};
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86struct dim2_hdm {
87 struct device dev;
88 struct hdm_channel hch[DMA_CHANNELS];
89 struct most_channel_capability capabilities[DMA_CHANNELS];
90 struct most_interface most_iface;
91 char name[16 + sizeof "dim2-"];
92 void __iomem *io_base;
93 u8 clk_speed;
94 struct clk *clk;
95 struct clk *clk_pll;
96 struct task_struct *netinfo_task;
97 wait_queue_head_t netinfo_waitq;
98 int deliver_netinfo;
99 unsigned char mac_addrs[6];
100 unsigned char link_state;
101 int atx_idx;
102 struct medialb_bus bus;
103 void (*on_netinfo)(struct most_interface *most_iface,
104 unsigned char link_state, unsigned char *addrs);
105 void (*disable_platform)(struct platform_device *pdev);
106};
107
108struct dim2_platform_data {
109 int (*enable)(struct platform_device *pdev);
110 void (*disable)(struct platform_device *pdev);
111 u8 fcnt;
112};
113
114#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
115
116
117#define PACKET_IS_NET_INFO(p) \
118 (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
119 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
120
121static ssize_t state_show(struct device *dev, struct device_attribute *attr,
122 char *buf)
123{
124 bool state;
125 unsigned long flags;
126
127 spin_lock_irqsave(&dim_lock, flags);
128 state = dim_get_lock_state();
129 spin_unlock_irqrestore(&dim_lock, flags);
130
131 return sysfs_emit(buf, "%s\n", state ? "locked" : "");
132}
133
134static DEVICE_ATTR_RO(state);
135
136static struct attribute *dim2_attrs[] = {
137 &dev_attr_state.attr,
138 NULL,
139};
140
141ATTRIBUTE_GROUPS(dim2);
142
143
144
145
146
147
148
149void dimcb_on_error(u8 error_id, const char *error_message)
150{
151 pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
152 error_message);
153}
154
155
156
157
158
159
160
161static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
162{
163 u16 buf_size;
164 struct list_head *head = &hdm_ch->pending_list;
165 struct mbo *mbo;
166 unsigned long flags;
167 struct dim_ch_state_t st;
168
169 BUG_ON(!hdm_ch);
170 BUG_ON(!hdm_ch->is_initialized);
171
172 spin_lock_irqsave(&dim_lock, flags);
173 if (list_empty(head)) {
174 spin_unlock_irqrestore(&dim_lock, flags);
175 return -EAGAIN;
176 }
177
178 if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
179 spin_unlock_irqrestore(&dim_lock, flags);
180 return -EAGAIN;
181 }
182
183 mbo = list_first_entry(head, struct mbo, list);
184 buf_size = mbo->buffer_length;
185
186 if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
187 spin_unlock_irqrestore(&dim_lock, flags);
188 return -EAGAIN;
189 }
190
191 BUG_ON(mbo->bus_address == 0);
192 if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
193 list_del(head->next);
194 spin_unlock_irqrestore(&dim_lock, flags);
195 mbo->processed_length = 0;
196 mbo->status = MBO_E_INVAL;
197 mbo->complete(mbo);
198 return -EFAULT;
199 }
200
201 list_move_tail(head->next, &hdm_ch->started_list);
202 spin_unlock_irqrestore(&dim_lock, flags);
203
204 return 0;
205}
206
207
208
209
210
211
212
213static int deliver_netinfo_thread(void *data)
214{
215 struct dim2_hdm *dev = data;
216
217 while (!kthread_should_stop()) {
218 wait_event_interruptible(dev->netinfo_waitq,
219 dev->deliver_netinfo ||
220 kthread_should_stop());
221
222 if (dev->deliver_netinfo) {
223 dev->deliver_netinfo--;
224 if (dev->on_netinfo) {
225 dev->on_netinfo(&dev->most_iface,
226 dev->link_state,
227 dev->mac_addrs);
228 }
229 }
230 }
231
232 return 0;
233}
234
235
236
237
238
239
240
241
242
243static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
244{
245 u8 *data = mbo->virt_address;
246
247 pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
248 dev->link_state = data[18];
249 pr_info("NIState: %d\n", dev->link_state);
250 memcpy(dev->mac_addrs, data + 19, 6);
251 dev->deliver_netinfo++;
252 wake_up_interruptible(&dev->netinfo_waitq);
253}
254
255
256
257
258
259
260
261
262static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
263{
264 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
265 struct dim_ch_state_t st;
266 struct list_head *head;
267 struct mbo *mbo;
268 int done_buffers;
269 unsigned long flags;
270 u8 *data;
271
272 BUG_ON(!hdm_ch);
273 BUG_ON(!hdm_ch->is_initialized);
274
275 spin_lock_irqsave(&dim_lock, flags);
276
277 done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
278 if (!done_buffers) {
279 spin_unlock_irqrestore(&dim_lock, flags);
280 return;
281 }
282
283 if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
284 spin_unlock_irqrestore(&dim_lock, flags);
285 return;
286 }
287 spin_unlock_irqrestore(&dim_lock, flags);
288
289 head = &hdm_ch->started_list;
290
291 while (done_buffers) {
292 spin_lock_irqsave(&dim_lock, flags);
293 if (list_empty(head)) {
294 spin_unlock_irqrestore(&dim_lock, flags);
295 pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
296 break;
297 }
298
299 mbo = list_first_entry(head, struct mbo, list);
300 list_del(head->next);
301 spin_unlock_irqrestore(&dim_lock, flags);
302
303 data = mbo->virt_address;
304
305 if (hdm_ch->data_type == MOST_CH_ASYNC &&
306 hdm_ch->direction == MOST_CH_RX &&
307 PACKET_IS_NET_INFO(data)) {
308 retrieve_netinfo(dev, mbo);
309
310 spin_lock_irqsave(&dim_lock, flags);
311 list_add_tail(&mbo->list, &hdm_ch->pending_list);
312 spin_unlock_irqrestore(&dim_lock, flags);
313 } else {
314 if (hdm_ch->data_type == MOST_CH_CONTROL ||
315 hdm_ch->data_type == MOST_CH_ASYNC) {
316 u32 const data_size =
317 (u32)data[0] * 256 + data[1] + 2;
318
319 mbo->processed_length =
320 min_t(u32, data_size,
321 mbo->buffer_length);
322 } else {
323 mbo->processed_length = mbo->buffer_length;
324 }
325 mbo->status = MBO_SUCCESS;
326 mbo->complete(mbo);
327 }
328
329 done_buffers--;
330 }
331}
332
333static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
334 struct dim_channel **buffer)
335{
336 int idx = 0;
337 int ch_idx;
338
339 for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
340 if (dev->hch[ch_idx].is_initialized)
341 buffer[idx++] = &dev->hch[ch_idx].ch;
342 }
343 buffer[idx++] = NULL;
344
345 return buffer;
346}
347
348static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
349{
350 struct dim2_hdm *dev = _dev;
351 unsigned long flags;
352
353 spin_lock_irqsave(&dim_lock, flags);
354 dim_service_mlb_int_irq();
355 spin_unlock_irqrestore(&dim_lock, flags);
356
357 if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
358 while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
359 continue;
360
361 return IRQ_HANDLED;
362}
363
364
365
366
367
368
369
370static void dim2_tasklet_fn(unsigned long data)
371{
372 struct dim2_hdm *dev = (struct dim2_hdm *)data;
373 unsigned long flags;
374 int ch_idx;
375
376 for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
377 if (!dev->hch[ch_idx].is_initialized)
378 continue;
379
380 spin_lock_irqsave(&dim_lock, flags);
381 dim_service_channel(&dev->hch[ch_idx].ch);
382 spin_unlock_irqrestore(&dim_lock, flags);
383
384 service_done_flag(dev, ch_idx);
385 while (!try_start_dim_transfer(dev->hch + ch_idx))
386 continue;
387 }
388}
389
390
391
392
393
394
395
396
397
398static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
399{
400 struct dim2_hdm *dev = _dev;
401 struct dim_channel *buffer[DMA_CHANNELS + 1];
402 unsigned long flags;
403
404 spin_lock_irqsave(&dim_lock, flags);
405 dim_service_ahb_int_irq(get_active_channels(dev, buffer));
406 spin_unlock_irqrestore(&dim_lock, flags);
407
408 dim2_tasklet.data = (unsigned long)dev;
409 tasklet_schedule(&dim2_tasklet);
410 return IRQ_HANDLED;
411}
412
413
414
415
416
417
418
419
420static void complete_all_mbos(struct list_head *head)
421{
422 unsigned long flags;
423 struct mbo *mbo;
424
425 for (;;) {
426 spin_lock_irqsave(&dim_lock, flags);
427 if (list_empty(head)) {
428 spin_unlock_irqrestore(&dim_lock, flags);
429 break;
430 }
431
432 mbo = list_first_entry(head, struct mbo, list);
433 list_del(head->next);
434 spin_unlock_irqrestore(&dim_lock, flags);
435
436 mbo->processed_length = 0;
437 mbo->status = MBO_E_CLOSE;
438 mbo->complete(mbo);
439 }
440}
441
442
443
444
445
446
447
448
449
450
451static int configure_channel(struct most_interface *most_iface, int ch_idx,
452 struct most_channel_config *ccfg)
453{
454 struct dim2_hdm *dev = iface_to_hdm(most_iface);
455 bool const is_tx = ccfg->direction == MOST_CH_TX;
456 u16 const sub_size = ccfg->subbuffer_size;
457 u16 const buf_size = ccfg->buffer_size;
458 u16 new_size;
459 unsigned long flags;
460 u8 hal_ret;
461 int const ch_addr = ch_idx * 2 + 2;
462 struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
463
464 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
465
466 if (hdm_ch->is_initialized)
467 return -EPERM;
468
469
470 hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
471
472
473 hdm_ch->ch.dbr_size = ccfg->dbr_size;
474
475 switch (ccfg->data_type) {
476 case MOST_CH_CONTROL:
477 new_size = dim_norm_ctrl_async_buffer_size(buf_size);
478 if (new_size == 0) {
479 pr_err("%s: too small buffer size\n", hdm_ch->name);
480 return -EINVAL;
481 }
482 ccfg->buffer_size = new_size;
483 if (new_size != buf_size)
484 pr_warn("%s: fixed buffer size (%d -> %d)\n",
485 hdm_ch->name, buf_size, new_size);
486 spin_lock_irqsave(&dim_lock, flags);
487 hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
488 is_tx ? new_size * 2 : new_size);
489 break;
490 case MOST_CH_ASYNC:
491 new_size = dim_norm_ctrl_async_buffer_size(buf_size);
492 if (new_size == 0) {
493 pr_err("%s: too small buffer size\n", hdm_ch->name);
494 return -EINVAL;
495 }
496 ccfg->buffer_size = new_size;
497 if (new_size != buf_size)
498 pr_warn("%s: fixed buffer size (%d -> %d)\n",
499 hdm_ch->name, buf_size, new_size);
500 spin_lock_irqsave(&dim_lock, flags);
501 hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
502 is_tx ? new_size * 2 : new_size);
503 break;
504 case MOST_CH_ISOC:
505 new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
506 if (new_size == 0) {
507 pr_err("%s: invalid sub-buffer size or too small buffer size\n",
508 hdm_ch->name);
509 return -EINVAL;
510 }
511 ccfg->buffer_size = new_size;
512 if (new_size != buf_size)
513 pr_warn("%s: fixed buffer size (%d -> %d)\n",
514 hdm_ch->name, buf_size, new_size);
515 spin_lock_irqsave(&dim_lock, flags);
516 hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
517 break;
518 case MOST_CH_SYNC:
519 new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
520 if (new_size == 0) {
521 pr_err("%s: invalid sub-buffer size or too small buffer size\n",
522 hdm_ch->name);
523 return -EINVAL;
524 }
525 ccfg->buffer_size = new_size;
526 if (new_size != buf_size)
527 pr_warn("%s: fixed buffer size (%d -> %d)\n",
528 hdm_ch->name, buf_size, new_size);
529 spin_lock_irqsave(&dim_lock, flags);
530 hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
531 break;
532 default:
533 pr_err("%s: configure failed, bad channel type: %d\n",
534 hdm_ch->name, ccfg->data_type);
535 return -EINVAL;
536 }
537
538 if (hal_ret != DIM_NO_ERROR) {
539 spin_unlock_irqrestore(&dim_lock, flags);
540 pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
541 hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
542 return -ENODEV;
543 }
544
545 hdm_ch->data_type = ccfg->data_type;
546 hdm_ch->direction = ccfg->direction;
547 hdm_ch->is_initialized = true;
548
549 if (hdm_ch->data_type == MOST_CH_ASYNC &&
550 hdm_ch->direction == MOST_CH_TX &&
551 dev->atx_idx < 0)
552 dev->atx_idx = ch_idx;
553
554 spin_unlock_irqrestore(&dim_lock, flags);
555 ccfg->dbr_size = hdm_ch->ch.dbr_size;
556
557 return 0;
558}
559
560
561
562
563
564
565
566
567
568
569static int enqueue(struct most_interface *most_iface, int ch_idx,
570 struct mbo *mbo)
571{
572 struct dim2_hdm *dev = iface_to_hdm(most_iface);
573 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
574 unsigned long flags;
575
576 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
577
578 if (!hdm_ch->is_initialized)
579 return -EPERM;
580
581 if (mbo->bus_address == 0)
582 return -EFAULT;
583
584 spin_lock_irqsave(&dim_lock, flags);
585 list_add_tail(&mbo->list, &hdm_ch->pending_list);
586 spin_unlock_irqrestore(&dim_lock, flags);
587
588 (void)try_start_dim_transfer(hdm_ch);
589
590 return 0;
591}
592
593
594
595
596
597
598
599
600
601
602static void request_netinfo(struct most_interface *most_iface, int ch_idx,
603 void (*on_netinfo)(struct most_interface *,
604 unsigned char, unsigned char *))
605{
606 struct dim2_hdm *dev = iface_to_hdm(most_iface);
607 struct mbo *mbo;
608 u8 *data;
609
610 dev->on_netinfo = on_netinfo;
611 if (!on_netinfo)
612 return;
613
614 if (dev->atx_idx < 0) {
615 pr_err("Async Tx Not initialized\n");
616 return;
617 }
618
619 mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
620 if (!mbo)
621 return;
622
623 mbo->buffer_length = 5;
624
625 data = mbo->virt_address;
626
627 data[0] = 0x00;
628 data[1] = 0x03;
629 data[2] = 0x02;
630 data[3] = 0x08;
631 data[4] = 0x40;
632
633 most_submit_mbo(mbo);
634}
635
636
637
638
639
640
641
642
643
644static int poison_channel(struct most_interface *most_iface, int ch_idx)
645{
646 struct dim2_hdm *dev = iface_to_hdm(most_iface);
647 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
648 unsigned long flags;
649 u8 hal_ret;
650 int ret = 0;
651
652 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
653
654 if (!hdm_ch->is_initialized)
655 return -EPERM;
656
657 tasklet_disable(&dim2_tasklet);
658 spin_lock_irqsave(&dim_lock, flags);
659 hal_ret = dim_destroy_channel(&hdm_ch->ch);
660 hdm_ch->is_initialized = false;
661 if (ch_idx == dev->atx_idx)
662 dev->atx_idx = -1;
663 spin_unlock_irqrestore(&dim_lock, flags);
664 tasklet_enable(&dim2_tasklet);
665 if (hal_ret != DIM_NO_ERROR) {
666 pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
667 ret = -EFAULT;
668 }
669
670 complete_all_mbos(&hdm_ch->started_list);
671 complete_all_mbos(&hdm_ch->pending_list);
672 if (hdm_ch->reset_dbr_size)
673 *hdm_ch->reset_dbr_size = 0;
674
675 return ret;
676}
677
678static void *dma_alloc(struct mbo *mbo, u32 size)
679{
680 struct device *dev = mbo->ifp->driver_dev;
681
682 return dma_alloc_coherent(dev, size, &mbo->bus_address, GFP_KERNEL);
683}
684
685static void dma_free(struct mbo *mbo, u32 size)
686{
687 struct device *dev = mbo->ifp->driver_dev;
688
689 dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
690}
691
692static const struct of_device_id dim2_of_match[];
693
694static struct {
695 const char *clock_speed;
696 u8 clk_speed;
697} clk_mt[] = {
698 { "256fs", CLK_256FS },
699 { "512fs", CLK_512FS },
700 { "1024fs", CLK_1024FS },
701 { "2048fs", CLK_2048FS },
702 { "3072fs", CLK_3072FS },
703 { "4096fs", CLK_4096FS },
704 { "6144fs", CLK_6144FS },
705 { "8192fs", CLK_8192FS },
706};
707
708
709
710
711
712
713
714
715
716
717static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
718{
719 int i;
720
721 for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
722 if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
723 *val = clk_mt[i].clk_speed;
724 return 0;
725 }
726 }
727 return -EINVAL;
728}
729
730static void dim2_release(struct device *d)
731{
732 struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev);
733 unsigned long flags;
734
735 kthread_stop(dev->netinfo_task);
736
737 spin_lock_irqsave(&dim_lock, flags);
738 dim_shutdown();
739 spin_unlock_irqrestore(&dim_lock, flags);
740
741 if (dev->disable_platform)
742 dev->disable_platform(to_platform_device(d->parent));
743
744 kfree(dev);
745}
746
747
748
749
750
751
752
753
754static int dim2_probe(struct platform_device *pdev)
755{
756 const struct dim2_platform_data *pdata;
757 const struct of_device_id *of_id;
758 const char *clock_speed;
759 struct dim2_hdm *dev;
760 struct resource *res;
761 int ret, i;
762 u8 hal_ret;
763 u8 dev_fcnt = fcnt;
764 int irq;
765
766 enum { MLB_INT_IDX, AHB0_INT_IDX };
767
768 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
769 if (!dev)
770 return -ENOMEM;
771
772 dev->atx_idx = -1;
773
774 platform_set_drvdata(pdev, dev);
775
776 ret = of_property_read_string(pdev->dev.of_node,
777 "microchip,clock-speed", &clock_speed);
778 if (ret) {
779 dev_err(&pdev->dev, "missing dt property clock-speed\n");
780 goto err_free_dev;
781 }
782
783 ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
784 if (ret) {
785 dev_err(&pdev->dev, "bad dt property clock-speed\n");
786 goto err_free_dev;
787 }
788
789 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
790 dev->io_base = devm_ioremap_resource(&pdev->dev, res);
791 if (IS_ERR(dev->io_base)) {
792 ret = PTR_ERR(dev->io_base);
793 goto err_free_dev;
794 }
795
796 of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
797 pdata = of_id->data;
798 if (pdata) {
799 if (pdata->enable) {
800 ret = pdata->enable(pdev);
801 if (ret)
802 goto err_free_dev;
803 }
804 dev->disable_platform = pdata->disable;
805 if (pdata->fcnt)
806 dev_fcnt = pdata->fcnt;
807 }
808
809 dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n",
810 dev_fcnt);
811 hal_ret = dim_startup(dev->io_base, dev->clk_speed, dev_fcnt);
812 if (hal_ret != DIM_NO_ERROR) {
813 dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
814 ret = -ENODEV;
815 goto err_disable_platform;
816 }
817
818 irq = platform_get_irq(pdev, AHB0_INT_IDX);
819 if (irq < 0) {
820 ret = irq;
821 goto err_shutdown_dim;
822 }
823
824 ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
825 "dim2_ahb0_int", dev);
826 if (ret) {
827 dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
828 goto err_shutdown_dim;
829 }
830
831 irq = platform_get_irq(pdev, MLB_INT_IDX);
832 if (irq < 0) {
833 ret = irq;
834 goto err_shutdown_dim;
835 }
836
837 ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
838 "dim2_mlb_int", dev);
839 if (ret) {
840 dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
841 goto err_shutdown_dim;
842 }
843
844 init_waitqueue_head(&dev->netinfo_waitq);
845 dev->deliver_netinfo = 0;
846 dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
847 "dim2_netinfo");
848 if (IS_ERR(dev->netinfo_task)) {
849 ret = PTR_ERR(dev->netinfo_task);
850 goto err_shutdown_dim;
851 }
852
853 for (i = 0; i < DMA_CHANNELS; i++) {
854 struct most_channel_capability *cap = dev->capabilities + i;
855 struct hdm_channel *hdm_ch = dev->hch + i;
856
857 INIT_LIST_HEAD(&hdm_ch->pending_list);
858 INIT_LIST_HEAD(&hdm_ch->started_list);
859 hdm_ch->is_initialized = false;
860 snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
861
862 cap->name_suffix = hdm_ch->name;
863 cap->direction = MOST_CH_RX | MOST_CH_TX;
864 cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
865 MOST_CH_ISOC | MOST_CH_SYNC;
866 cap->num_buffers_packet = MAX_BUFFERS_PACKET;
867 cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
868 cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
869 cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
870 }
871
872 {
873 const char *fmt;
874
875 if (sizeof(res->start) == sizeof(long long))
876 fmt = "dim2-%016llx";
877 else if (sizeof(res->start) == sizeof(long))
878 fmt = "dim2-%016lx";
879 else
880 fmt = "dim2-%016x";
881
882 snprintf(dev->name, sizeof(dev->name), fmt, res->start);
883 }
884
885 dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
886 dev->most_iface.description = dev->name;
887 dev->most_iface.num_channels = DMA_CHANNELS;
888 dev->most_iface.channel_vector = dev->capabilities;
889 dev->most_iface.configure = configure_channel;
890 dev->most_iface.enqueue = enqueue;
891 dev->most_iface.dma_alloc = dma_alloc;
892 dev->most_iface.dma_free = dma_free;
893 dev->most_iface.poison_channel = poison_channel;
894 dev->most_iface.request_netinfo = request_netinfo;
895 dev->most_iface.driver_dev = &pdev->dev;
896 dev->most_iface.dev = &dev->dev;
897 dev->dev.init_name = dev->name;
898 dev->dev.parent = &pdev->dev;
899 dev->dev.release = dim2_release;
900
901 return most_register_interface(&dev->most_iface);
902
903err_shutdown_dim:
904 dim_shutdown();
905err_disable_platform:
906 if (dev->disable_platform)
907 dev->disable_platform(pdev);
908err_free_dev:
909 kfree(dev);
910
911 return ret;
912}
913
914
915
916
917
918
919
920static int dim2_remove(struct platform_device *pdev)
921{
922 struct dim2_hdm *dev = platform_get_drvdata(pdev);
923
924 most_deregister_interface(&dev->most_iface);
925
926 return 0;
927}
928
929
930
931static int fsl_mx6_enable(struct platform_device *pdev)
932{
933 struct dim2_hdm *dev = platform_get_drvdata(pdev);
934 int ret;
935
936 dev->clk = devm_clk_get(&pdev->dev, "mlb");
937 if (IS_ERR_OR_NULL(dev->clk)) {
938 dev_err(&pdev->dev, "unable to get mlb clock\n");
939 return -EFAULT;
940 }
941
942 ret = clk_prepare_enable(dev->clk);
943 if (ret) {
944 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
945 return ret;
946 }
947
948 if (dev->clk_speed >= CLK_2048FS) {
949
950 dev->clk_pll = devm_clk_get(&pdev->dev, "pll8_mlb");
951 if (IS_ERR_OR_NULL(dev->clk_pll)) {
952 dev_err(&pdev->dev, "unable to get mlb pll clock\n");
953 clk_disable_unprepare(dev->clk);
954 return -EFAULT;
955 }
956
957 writel(0x888, dev->io_base + 0x38);
958 clk_prepare_enable(dev->clk_pll);
959 }
960
961 return 0;
962}
963
964static void fsl_mx6_disable(struct platform_device *pdev)
965{
966 struct dim2_hdm *dev = platform_get_drvdata(pdev);
967
968 if (dev->clk_speed >= CLK_2048FS)
969 clk_disable_unprepare(dev->clk_pll);
970
971 clk_disable_unprepare(dev->clk);
972}
973
974static int rcar_h2_enable(struct platform_device *pdev)
975{
976 struct dim2_hdm *dev = platform_get_drvdata(pdev);
977 int ret;
978
979 dev->clk = devm_clk_get(&pdev->dev, NULL);
980 if (IS_ERR(dev->clk)) {
981 dev_err(&pdev->dev, "cannot get clock\n");
982 return PTR_ERR(dev->clk);
983 }
984
985 ret = clk_prepare_enable(dev->clk);
986 if (ret) {
987 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
988 return ret;
989 }
990
991 if (dev->clk_speed >= CLK_2048FS) {
992
993 writel(0x03, dev->io_base + 0x600);
994
995 writel(0x888, dev->io_base + 0x38);
996 } else {
997
998 writel(0x04, dev->io_base + 0x600);
999 }
1000
1001
1002
1003 writel(0x03, dev->io_base + 0x500);
1004 writel(0x0002FF02, dev->io_base + 0x508);
1005
1006 return 0;
1007}
1008
1009static void rcar_h2_disable(struct platform_device *pdev)
1010{
1011 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1012
1013 clk_disable_unprepare(dev->clk);
1014
1015
1016 writel(0x0, dev->io_base + 0x600);
1017}
1018
1019static int rcar_m3_enable(struct platform_device *pdev)
1020{
1021 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1022 u32 enable_512fs = dev->clk_speed == CLK_512FS;
1023 int ret;
1024
1025 dev->clk = devm_clk_get(&pdev->dev, NULL);
1026 if (IS_ERR(dev->clk)) {
1027 dev_err(&pdev->dev, "cannot get clock\n");
1028 return PTR_ERR(dev->clk);
1029 }
1030
1031 ret = clk_prepare_enable(dev->clk);
1032 if (ret) {
1033 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
1034 return ret;
1035 }
1036
1037
1038 writel(0x04, dev->io_base + 0x600);
1039
1040 writel(enable_512fs, dev->io_base + 0x604);
1041
1042
1043 writel(0x03, dev->io_base + 0x500);
1044 writel(0x0002FF02, dev->io_base + 0x508);
1045
1046 return 0;
1047}
1048
1049static void rcar_m3_disable(struct platform_device *pdev)
1050{
1051 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1052
1053 clk_disable_unprepare(dev->clk);
1054
1055
1056 writel(0x0, dev->io_base + 0x600);
1057}
1058
1059
1060
1061enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
1062
1063static struct dim2_platform_data plat_data[] = {
1064 [FSL_MX6] = {
1065 .enable = fsl_mx6_enable,
1066 .disable = fsl_mx6_disable,
1067 },
1068 [RCAR_H2] = {
1069 .enable = rcar_h2_enable,
1070 .disable = rcar_h2_disable,
1071 },
1072 [RCAR_M3] = {
1073 .enable = rcar_m3_enable,
1074 .disable = rcar_m3_disable,
1075 .fcnt = 3,
1076 },
1077};
1078
1079static const struct of_device_id dim2_of_match[] = {
1080 {
1081 .compatible = "fsl,imx6q-mlb150",
1082 .data = plat_data + FSL_MX6
1083 },
1084 {
1085 .compatible = "renesas,mlp",
1086 .data = plat_data + RCAR_H2
1087 },
1088 {
1089 .compatible = "rcar,medialb-dim2",
1090 .data = plat_data + RCAR_M3
1091 },
1092 {
1093 .compatible = "xlnx,axi4-os62420_3pin-1.00.a",
1094 },
1095 {
1096 .compatible = "xlnx,axi4-os62420_6pin-1.00.a",
1097 },
1098 {},
1099};
1100
1101MODULE_DEVICE_TABLE(of, dim2_of_match);
1102
1103static struct platform_driver dim2_driver = {
1104 .probe = dim2_probe,
1105 .remove = dim2_remove,
1106 .driver = {
1107 .name = "hdm_dim2",
1108 .of_match_table = dim2_of_match,
1109 .dev_groups = dim2_groups,
1110 },
1111};
1112
1113module_platform_driver(dim2_driver);
1114
1115MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
1116MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
1117MODULE_LICENSE("GPL");
1118