1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/log2.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/amba/bus.h>
24#include <linux/regulator/consumer.h>
25#include <linux/platform_data/dma-ste-dma40.h>
26
27#include "dmaengine.h"
28#include "ste_dma40_ll.h"
29
30#define D40_NAME "dma40"
31
32#define D40_PHY_CHAN -1
33
34
35#define D40_CHAN_POS(chan) (2 * (chan / 2))
36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39#define D40_SUSPEND_MAX_IT 500
40
41
42#define DMA40_AUTOSUSPEND_DELAY 100
43
44
45#define LCLA_ALIGNMENT 0x40000
46
47
48#define D40_LCLA_LINK_PER_EVENT_GRP 128
49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52#define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55#define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58#define D40_ALLOC_FREE BIT(31)
59#define D40_ALLOC_PHY BIT(30)
60#define D40_ALLOC_LOG_FREE 0
61
62#define D40_MEMCPY_MAX_CHANS 8
63
64
65#define DB8500_DMA_MEMCPY_EV_0 51
66#define DB8500_DMA_MEMCPY_EV_1 56
67#define DB8500_DMA_MEMCPY_EV_2 57
68#define DB8500_DMA_MEMCPY_EV_3 58
69#define DB8500_DMA_MEMCPY_EV_4 59
70#define DB8500_DMA_MEMCPY_EV_5 60
71
72static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79};
80
81
82static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93};
94
95
96static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107};
108
109
110
111
112
113
114
115
116
117enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122};
123
124
125
126
127
128
129
130
131
132
133enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138};
139
140
141
142
143
144
145static __maybe_unused u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152};
153
154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185};
186
187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210};
211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214static __maybe_unused u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223};
224
225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242};
243
244
245static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256};
257
258static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271};
272
273
274
275
276
277
278
279struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282};
283
284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301};
302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322};
323
324
325
326
327
328
329
330
331
332
333
334
335struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341};
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378};
379
380
381
382
383
384
385
386
387
388
389
390
391
392struct d40_lcla_pool {
393 void *base;
394 dma_addr_t dma_addr;
395 void *base_unaligned;
396 int pages;
397 spinlock_t lock;
398 struct d40_desc **alloc_map;
399};
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415struct d40_phy_res {
416 spinlock_t lock;
417 bool reserved;
418 int num;
419 u32 allocated_src;
420 u32 allocated_dst;
421 bool use_soft_lli;
422};
423
424struct d40_base;
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458struct d40_chan {
459 spinlock_t lock;
460 int log_num;
461 int pending_tx;
462 bool busy;
463 struct d40_phy_res *phy_chan;
464 struct dma_chan chan;
465 struct tasklet_struct tasklet;
466 struct list_head client;
467 struct list_head pending_queue;
468 struct list_head active;
469 struct list_head done;
470 struct list_head queue;
471 struct list_head prepare_queue;
472 struct stedma40_chan_cfg dma_cfg;
473 struct dma_slave_config slave_config;
474 bool configured;
475 struct d40_base *base;
476
477 u32 src_def_cfg;
478 u32 dst_def_cfg;
479 struct d40_def_lcsp log_def;
480 struct d40_log_lli_full *lcpa;
481
482 dma_addr_t runtime_addr;
483 enum dma_transfer_direction runtime_direction;
484};
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503struct d40_gen_dmac {
504 u32 *backup;
505 u32 backup_size;
506 u32 realtime_en;
507 u32 realtime_clear;
508 u32 high_prio_en;
509 u32 high_prio_clear;
510 u32 interrupt_en;
511 u32 interrupt_clear;
512 struct d40_interrupt_lookup *il;
513 u32 il_size;
514 struct d40_reg_val *init_reg;
515 u32 init_reg_size;
516};
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567struct d40_base {
568 spinlock_t interrupt_lock;
569 spinlock_t execmd_lock;
570 struct device *dev;
571 void __iomem *virtbase;
572 u8 rev:4;
573 struct clk *clk;
574 phys_addr_t phy_start;
575 resource_size_t phy_size;
576 int irq;
577 int num_memcpy_chans;
578 int num_phy_chans;
579 int num_log_chans;
580 struct device_dma_parameters dma_parms;
581 struct dma_device dma_both;
582 struct dma_device dma_slave;
583 struct dma_device dma_memcpy;
584 struct d40_chan *phy_chans;
585 struct d40_chan *log_chans;
586 struct d40_chan **lookup_log_chans;
587 struct d40_chan **lookup_phy_chans;
588 struct stedma40_platform_data *plat_data;
589 struct regulator *lcpa_regulator;
590
591 struct d40_phy_res *phy_res;
592 struct d40_lcla_pool lcla_pool;
593 void *lcpa_base;
594 dma_addr_t phy_lcpa;
595 resource_size_t lcpa_size;
596 struct kmem_cache *desc_slab;
597 u32 reg_val_backup[BACKUP_REGS_SZ];
598 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
599 u32 *reg_val_backup_chan;
600 u32 *regs_interrupt;
601 u16 gcc_pwr_off_mask;
602 struct d40_gen_dmac gen_dmac;
603};
604
605static struct device *chan2dev(struct d40_chan *d40c)
606{
607 return &d40c->chan.dev->device;
608}
609
610static bool chan_is_physical(struct d40_chan *chan)
611{
612 return chan->log_num == D40_PHY_CHAN;
613}
614
615static bool chan_is_logical(struct d40_chan *chan)
616{
617 return !chan_is_physical(chan);
618}
619
620static void __iomem *chan_base(struct d40_chan *chan)
621{
622 return chan->base->virtbase + D40_DREG_PCBASE +
623 chan->phy_chan->num * D40_DREG_PCDELTA;
624}
625
626#define d40_err(dev, format, arg...) \
627 dev_err(dev, "[%s] " format, __func__, ## arg)
628
629#define chan_err(d40c, format, arg...) \
630 d40_err(chan2dev(d40c), format, ## arg)
631
632static int d40_set_runtime_config_write(struct dma_chan *chan,
633 struct dma_slave_config *config,
634 enum dma_transfer_direction direction);
635
636static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
637 int lli_len)
638{
639 bool is_log = chan_is_logical(d40c);
640 u32 align;
641 void *base;
642
643 if (is_log)
644 align = sizeof(struct d40_log_lli);
645 else
646 align = sizeof(struct d40_phy_lli);
647
648 if (lli_len == 1) {
649 base = d40d->lli_pool.pre_alloc_lli;
650 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
651 d40d->lli_pool.base = NULL;
652 } else {
653 d40d->lli_pool.size = lli_len * 2 * align;
654
655 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
656 d40d->lli_pool.base = base;
657
658 if (d40d->lli_pool.base == NULL)
659 return -ENOMEM;
660 }
661
662 if (is_log) {
663 d40d->lli_log.src = PTR_ALIGN(base, align);
664 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
665
666 d40d->lli_pool.dma_addr = 0;
667 } else {
668 d40d->lli_phy.src = PTR_ALIGN(base, align);
669 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
670
671 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
672 d40d->lli_phy.src,
673 d40d->lli_pool.size,
674 DMA_TO_DEVICE);
675
676 if (dma_mapping_error(d40c->base->dev,
677 d40d->lli_pool.dma_addr)) {
678 kfree(d40d->lli_pool.base);
679 d40d->lli_pool.base = NULL;
680 d40d->lli_pool.dma_addr = 0;
681 return -ENOMEM;
682 }
683 }
684
685 return 0;
686}
687
688static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
689{
690 if (d40d->lli_pool.dma_addr)
691 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
692 d40d->lli_pool.size, DMA_TO_DEVICE);
693
694 kfree(d40d->lli_pool.base);
695 d40d->lli_pool.base = NULL;
696 d40d->lli_pool.size = 0;
697 d40d->lli_log.src = NULL;
698 d40d->lli_log.dst = NULL;
699 d40d->lli_phy.src = NULL;
700 d40d->lli_phy.dst = NULL;
701}
702
703static int d40_lcla_alloc_one(struct d40_chan *d40c,
704 struct d40_desc *d40d)
705{
706 unsigned long flags;
707 int i;
708 int ret = -EINVAL;
709
710 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
711
712
713
714
715
716 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
717 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
718
719 if (!d40c->base->lcla_pool.alloc_map[idx]) {
720 d40c->base->lcla_pool.alloc_map[idx] = d40d;
721 d40d->lcla_alloc++;
722 ret = i;
723 break;
724 }
725 }
726
727 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
728
729 return ret;
730}
731
732static int d40_lcla_free_all(struct d40_chan *d40c,
733 struct d40_desc *d40d)
734{
735 unsigned long flags;
736 int i;
737 int ret = -EINVAL;
738
739 if (chan_is_physical(d40c))
740 return 0;
741
742 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
743
744 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
745 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
746
747 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
748 d40c->base->lcla_pool.alloc_map[idx] = NULL;
749 d40d->lcla_alloc--;
750 if (d40d->lcla_alloc == 0) {
751 ret = 0;
752 break;
753 }
754 }
755 }
756
757 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
758
759 return ret;
760
761}
762
763static void d40_desc_remove(struct d40_desc *d40d)
764{
765 list_del(&d40d->node);
766}
767
768static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
769{
770 struct d40_desc *desc = NULL;
771
772 if (!list_empty(&d40c->client)) {
773 struct d40_desc *d;
774 struct d40_desc *_d;
775
776 list_for_each_entry_safe(d, _d, &d40c->client, node) {
777 if (async_tx_test_ack(&d->txd)) {
778 d40_desc_remove(d);
779 desc = d;
780 memset(desc, 0, sizeof(*desc));
781 break;
782 }
783 }
784 }
785
786 if (!desc)
787 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
788
789 if (desc)
790 INIT_LIST_HEAD(&desc->node);
791
792 return desc;
793}
794
795static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
796{
797
798 d40_pool_lli_free(d40c, d40d);
799 d40_lcla_free_all(d40c, d40d);
800 kmem_cache_free(d40c->base->desc_slab, d40d);
801}
802
803static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
804{
805 list_add_tail(&desc->node, &d40c->active);
806}
807
808static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
809{
810 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
811 struct d40_phy_lli *lli_src = desc->lli_phy.src;
812 void __iomem *base = chan_base(chan);
813
814 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
815 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
816 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
817 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
818
819 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
820 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
821 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
822 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
823}
824
825static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
826{
827 list_add_tail(&desc->node, &d40c->done);
828}
829
830static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
831{
832 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
833 struct d40_log_lli_bidir *lli = &desc->lli_log;
834 int lli_current = desc->lli_current;
835 int lli_len = desc->lli_len;
836 bool cyclic = desc->cyclic;
837 int curr_lcla = -EINVAL;
838 int first_lcla = 0;
839 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
840 bool linkback;
841
842
843
844
845
846 linkback = cyclic && lli_current == 0;
847
848
849
850
851
852 if (linkback || (lli_len - lli_current > 1)) {
853
854
855
856
857
858
859 if (!(chan->phy_chan->use_soft_lli &&
860 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
861 curr_lcla = d40_lcla_alloc_one(chan, desc);
862
863 first_lcla = curr_lcla;
864 }
865
866
867
868
869
870
871
872 if (!linkback || curr_lcla == -EINVAL) {
873 unsigned int flags = 0;
874
875 if (curr_lcla == -EINVAL)
876 flags |= LLI_TERM_INT;
877
878 d40_log_lli_lcpa_write(chan->lcpa,
879 &lli->dst[lli_current],
880 &lli->src[lli_current],
881 curr_lcla,
882 flags);
883 lli_current++;
884 }
885
886 if (curr_lcla < 0)
887 goto set_current;
888
889 for (; lli_current < lli_len; lli_current++) {
890 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
891 8 * curr_lcla * 2;
892 struct d40_log_lli *lcla = pool->base + lcla_offset;
893 unsigned int flags = 0;
894 int next_lcla;
895
896 if (lli_current + 1 < lli_len)
897 next_lcla = d40_lcla_alloc_one(chan, desc);
898 else
899 next_lcla = linkback ? first_lcla : -EINVAL;
900
901 if (cyclic || next_lcla == -EINVAL)
902 flags |= LLI_TERM_INT;
903
904 if (linkback && curr_lcla == first_lcla) {
905
906 d40_log_lli_lcpa_write(chan->lcpa,
907 &lli->dst[lli_current],
908 &lli->src[lli_current],
909 next_lcla, flags);
910 }
911
912
913
914
915
916 d40_log_lli_lcla_write(lcla,
917 &lli->dst[lli_current],
918 &lli->src[lli_current],
919 next_lcla, flags);
920
921
922
923
924
925 if (!use_esram_lcla) {
926 dma_sync_single_range_for_device(chan->base->dev,
927 pool->dma_addr, lcla_offset,
928 2 * sizeof(struct d40_log_lli),
929 DMA_TO_DEVICE);
930 }
931 curr_lcla = next_lcla;
932
933 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
934 lli_current++;
935 break;
936 }
937 }
938 set_current:
939 desc->lli_current = lli_current;
940}
941
942static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
943{
944 if (chan_is_physical(d40c)) {
945 d40_phy_lli_load(d40c, d40d);
946 d40d->lli_current = d40d->lli_len;
947 } else
948 d40_log_lli_to_lcxa(d40c, d40d);
949}
950
951static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
952{
953 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
954}
955
956
957static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
958{
959 d40_desc_remove(desc);
960 desc->is_in_client_list = false;
961 list_add_tail(&desc->node, &d40c->pending_queue);
962}
963
964static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
965{
966 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
967 node);
968}
969
970static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
971{
972 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
973}
974
975static struct d40_desc *d40_first_done(struct d40_chan *d40c)
976{
977 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
978}
979
980static int d40_psize_2_burst_size(bool is_log, int psize)
981{
982 if (is_log) {
983 if (psize == STEDMA40_PSIZE_LOG_1)
984 return 1;
985 } else {
986 if (psize == STEDMA40_PSIZE_PHY_1)
987 return 1;
988 }
989
990 return 2 << psize;
991}
992
993
994
995
996
997
998
999static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1000{
1001 int dmalen;
1002 u32 max_w = max(data_width1, data_width2);
1003 u32 min_w = min(data_width1, data_width2);
1004 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1005
1006 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1007 seg_max -= max_w;
1008
1009 if (!IS_ALIGNED(size, max_w))
1010 return -EINVAL;
1011
1012 if (size <= seg_max)
1013 dmalen = 1;
1014 else {
1015 dmalen = size / seg_max;
1016 if (dmalen * seg_max < size)
1017 dmalen++;
1018 }
1019 return dmalen;
1020}
1021
1022static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1023 u32 data_width1, u32 data_width2)
1024{
1025 struct scatterlist *sg;
1026 int i;
1027 int len = 0;
1028 int ret;
1029
1030 for_each_sg(sgl, sg, sg_len, i) {
1031 ret = d40_size_2_dmalen(sg_dma_len(sg),
1032 data_width1, data_width2);
1033 if (ret < 0)
1034 return ret;
1035 len += ret;
1036 }
1037 return len;
1038}
1039
1040static int __d40_execute_command_phy(struct d40_chan *d40c,
1041 enum d40_command command)
1042{
1043 u32 status;
1044 int i;
1045 void __iomem *active_reg;
1046 int ret = 0;
1047 unsigned long flags;
1048 u32 wmask;
1049
1050 if (command == D40_DMA_STOP) {
1051 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1052 if (ret)
1053 return ret;
1054 }
1055
1056 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1057
1058 if (d40c->phy_chan->num % 2 == 0)
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1060 else
1061 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1062
1063 if (command == D40_DMA_SUSPEND_REQ) {
1064 status = (readl(active_reg) &
1065 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1066 D40_CHAN_POS(d40c->phy_chan->num);
1067
1068 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1069 goto unlock;
1070 }
1071
1072 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1073 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1074 active_reg);
1075
1076 if (command == D40_DMA_SUSPEND_REQ) {
1077
1078 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1079 status = (readl(active_reg) &
1080 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1081 D40_CHAN_POS(d40c->phy_chan->num);
1082
1083 cpu_relax();
1084
1085
1086
1087
1088 udelay(3);
1089
1090 if (status == D40_DMA_STOP ||
1091 status == D40_DMA_SUSPENDED)
1092 break;
1093 }
1094
1095 if (i == D40_SUSPEND_MAX_IT) {
1096 chan_err(d40c,
1097 "unable to suspend the chl %d (log: %d) status %x\n",
1098 d40c->phy_chan->num, d40c->log_num,
1099 status);
1100 dump_stack();
1101 ret = -EBUSY;
1102 }
1103
1104 }
1105 unlock:
1106 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1107 return ret;
1108}
1109
1110static void d40_term_all(struct d40_chan *d40c)
1111{
1112 struct d40_desc *d40d;
1113 struct d40_desc *_d;
1114
1115
1116 while ((d40d = d40_first_done(d40c))) {
1117 d40_desc_remove(d40d);
1118 d40_desc_free(d40c, d40d);
1119 }
1120
1121
1122 while ((d40d = d40_first_active_get(d40c))) {
1123 d40_desc_remove(d40d);
1124 d40_desc_free(d40c, d40d);
1125 }
1126
1127
1128 while ((d40d = d40_first_queued(d40c))) {
1129 d40_desc_remove(d40d);
1130 d40_desc_free(d40c, d40d);
1131 }
1132
1133
1134 while ((d40d = d40_first_pending(d40c))) {
1135 d40_desc_remove(d40d);
1136 d40_desc_free(d40c, d40d);
1137 }
1138
1139
1140 if (!list_empty(&d40c->client))
1141 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1142 d40_desc_remove(d40d);
1143 d40_desc_free(d40c, d40d);
1144 }
1145
1146
1147 if (!list_empty(&d40c->prepare_queue))
1148 list_for_each_entry_safe(d40d, _d,
1149 &d40c->prepare_queue, node) {
1150 d40_desc_remove(d40d);
1151 d40_desc_free(d40c, d40d);
1152 }
1153
1154 d40c->pending_tx = 0;
1155}
1156
1157static void __d40_config_set_event(struct d40_chan *d40c,
1158 enum d40_events event_type, u32 event,
1159 int reg)
1160{
1161 void __iomem *addr = chan_base(d40c) + reg;
1162 int tries;
1163 u32 status;
1164
1165 switch (event_type) {
1166
1167 case D40_DEACTIVATE_EVENTLINE:
1168
1169 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1170 | ~D40_EVENTLINE_MASK(event), addr);
1171 break;
1172
1173 case D40_SUSPEND_REQ_EVENTLINE:
1174 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1175 D40_EVENTLINE_POS(event);
1176
1177 if (status == D40_DEACTIVATE_EVENTLINE ||
1178 status == D40_SUSPEND_REQ_EVENTLINE)
1179 break;
1180
1181 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1182 | ~D40_EVENTLINE_MASK(event), addr);
1183
1184 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1185
1186 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1187 D40_EVENTLINE_POS(event);
1188
1189 cpu_relax();
1190
1191
1192
1193
1194 udelay(3);
1195
1196 if (status == D40_DEACTIVATE_EVENTLINE)
1197 break;
1198 }
1199
1200 if (tries == D40_SUSPEND_MAX_IT) {
1201 chan_err(d40c,
1202 "unable to stop the event_line chl %d (log: %d)"
1203 "status %x\n", d40c->phy_chan->num,
1204 d40c->log_num, status);
1205 }
1206 break;
1207
1208 case D40_ACTIVATE_EVENTLINE:
1209
1210
1211
1212
1213
1214 tries = 100;
1215 while (--tries) {
1216 writel((D40_ACTIVATE_EVENTLINE <<
1217 D40_EVENTLINE_POS(event)) |
1218 ~D40_EVENTLINE_MASK(event), addr);
1219
1220 if (readl(addr) & D40_EVENTLINE_MASK(event))
1221 break;
1222 }
1223
1224 if (tries != 99)
1225 dev_dbg(chan2dev(d40c),
1226 "[%s] workaround enable S%cLNK (%d tries)\n",
1227 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1228 100 - tries);
1229
1230 WARN_ON(!tries);
1231 break;
1232
1233 case D40_ROUND_EVENTLINE:
1234 BUG();
1235 break;
1236
1237 }
1238}
1239
1240static void d40_config_set_event(struct d40_chan *d40c,
1241 enum d40_events event_type)
1242{
1243 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1244
1245
1246 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1247 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1248 __d40_config_set_event(d40c, event_type, event,
1249 D40_CHAN_REG_SSLNK);
1250
1251 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1252 __d40_config_set_event(d40c, event_type, event,
1253 D40_CHAN_REG_SDLNK);
1254}
1255
1256static u32 d40_chan_has_events(struct d40_chan *d40c)
1257{
1258 void __iomem *chanbase = chan_base(d40c);
1259 u32 val;
1260
1261 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1262 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1263
1264 return val;
1265}
1266
1267static int
1268__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1269{
1270 unsigned long flags;
1271 int ret = 0;
1272 u32 active_status;
1273 void __iomem *active_reg;
1274
1275 if (d40c->phy_chan->num % 2 == 0)
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1277 else
1278 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1279
1280
1281 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1282
1283 switch (command) {
1284 case D40_DMA_STOP:
1285 case D40_DMA_SUSPEND_REQ:
1286
1287 active_status = (readl(active_reg) &
1288 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1289 D40_CHAN_POS(d40c->phy_chan->num);
1290
1291 if (active_status == D40_DMA_RUN)
1292 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1293 else
1294 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1295
1296 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1297 ret = __d40_execute_command_phy(d40c, command);
1298
1299 break;
1300
1301 case D40_DMA_RUN:
1302
1303 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1304 ret = __d40_execute_command_phy(d40c, command);
1305 break;
1306
1307 case D40_DMA_SUSPENDED:
1308 BUG();
1309 break;
1310 }
1311
1312 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1313 return ret;
1314}
1315
1316static int d40_channel_execute_command(struct d40_chan *d40c,
1317 enum d40_command command)
1318{
1319 if (chan_is_logical(d40c))
1320 return __d40_execute_command_log(d40c, command);
1321 else
1322 return __d40_execute_command_phy(d40c, command);
1323}
1324
1325static u32 d40_get_prmo(struct d40_chan *d40c)
1326{
1327 static const unsigned int phy_map[] = {
1328 [STEDMA40_PCHAN_BASIC_MODE]
1329 = D40_DREG_PRMO_PCHAN_BASIC,
1330 [STEDMA40_PCHAN_MODULO_MODE]
1331 = D40_DREG_PRMO_PCHAN_MODULO,
1332 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1333 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1334 };
1335 static const unsigned int log_map[] = {
1336 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1337 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1338 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1339 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1340 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1341 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1342 };
1343
1344 if (chan_is_physical(d40c))
1345 return phy_map[d40c->dma_cfg.mode_opt];
1346 else
1347 return log_map[d40c->dma_cfg.mode_opt];
1348}
1349
1350static void d40_config_write(struct d40_chan *d40c)
1351{
1352 u32 addr_base;
1353 u32 var;
1354
1355
1356 addr_base = (d40c->phy_chan->num % 2) * 4;
1357
1358 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1359 D40_CHAN_POS(d40c->phy_chan->num);
1360 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1361
1362
1363 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1364
1365 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1366
1367 if (chan_is_logical(d40c)) {
1368 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1369 & D40_SREG_ELEM_LOG_LIDX_MASK;
1370 void __iomem *chanbase = chan_base(d40c);
1371
1372
1373 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1374 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1375
1376
1377 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1378 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1379
1380
1381 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1382 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1383 }
1384}
1385
1386static u32 d40_residue(struct d40_chan *d40c)
1387{
1388 u32 num_elt;
1389
1390 if (chan_is_logical(d40c))
1391 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1392 >> D40_MEM_LCSP2_ECNT_POS;
1393 else {
1394 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1395 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1396 >> D40_SREG_ELEM_PHY_ECNT_POS;
1397 }
1398
1399 return num_elt * d40c->dma_cfg.dst_info.data_width;
1400}
1401
1402static bool d40_tx_is_linked(struct d40_chan *d40c)
1403{
1404 bool is_link;
1405
1406 if (chan_is_logical(d40c))
1407 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1408 else
1409 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1410 & D40_SREG_LNK_PHYS_LNK_MASK;
1411
1412 return is_link;
1413}
1414
1415static int d40_pause(struct dma_chan *chan)
1416{
1417 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1418 int res = 0;
1419 unsigned long flags;
1420
1421 if (d40c->phy_chan == NULL) {
1422 chan_err(d40c, "Channel is not allocated!\n");
1423 return -EINVAL;
1424 }
1425
1426 if (!d40c->busy)
1427 return 0;
1428
1429 spin_lock_irqsave(&d40c->lock, flags);
1430 pm_runtime_get_sync(d40c->base->dev);
1431
1432 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1433
1434 pm_runtime_mark_last_busy(d40c->base->dev);
1435 pm_runtime_put_autosuspend(d40c->base->dev);
1436 spin_unlock_irqrestore(&d40c->lock, flags);
1437 return res;
1438}
1439
1440static int d40_resume(struct dma_chan *chan)
1441{
1442 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1443 int res = 0;
1444 unsigned long flags;
1445
1446 if (d40c->phy_chan == NULL) {
1447 chan_err(d40c, "Channel is not allocated!\n");
1448 return -EINVAL;
1449 }
1450
1451 if (!d40c->busy)
1452 return 0;
1453
1454 spin_lock_irqsave(&d40c->lock, flags);
1455 pm_runtime_get_sync(d40c->base->dev);
1456
1457
1458 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1459 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1460
1461 pm_runtime_mark_last_busy(d40c->base->dev);
1462 pm_runtime_put_autosuspend(d40c->base->dev);
1463 spin_unlock_irqrestore(&d40c->lock, flags);
1464 return res;
1465}
1466
1467static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1468{
1469 struct d40_chan *d40c = container_of(tx->chan,
1470 struct d40_chan,
1471 chan);
1472 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1473 unsigned long flags;
1474 dma_cookie_t cookie;
1475
1476 spin_lock_irqsave(&d40c->lock, flags);
1477 cookie = dma_cookie_assign(tx);
1478 d40_desc_queue(d40c, d40d);
1479 spin_unlock_irqrestore(&d40c->lock, flags);
1480
1481 return cookie;
1482}
1483
1484static int d40_start(struct d40_chan *d40c)
1485{
1486 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1487}
1488
1489static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1490{
1491 struct d40_desc *d40d;
1492 int err;
1493
1494
1495 d40d = d40_first_queued(d40c);
1496
1497 if (d40d != NULL) {
1498 if (!d40c->busy) {
1499 d40c->busy = true;
1500 pm_runtime_get_sync(d40c->base->dev);
1501 }
1502
1503
1504 d40_desc_remove(d40d);
1505
1506
1507 d40_desc_submit(d40c, d40d);
1508
1509
1510 d40_desc_load(d40c, d40d);
1511
1512
1513 err = d40_start(d40c);
1514
1515 if (err)
1516 return NULL;
1517 }
1518
1519 return d40d;
1520}
1521
1522
1523static void dma_tc_handle(struct d40_chan *d40c)
1524{
1525 struct d40_desc *d40d;
1526
1527
1528 d40d = d40_first_active_get(d40c);
1529
1530 if (d40d == NULL)
1531 return;
1532
1533 if (d40d->cyclic) {
1534
1535
1536
1537
1538
1539
1540 if (d40d->lli_current < d40d->lli_len
1541 && !d40_tx_is_linked(d40c)
1542 && !d40_residue(d40c)) {
1543 d40_lcla_free_all(d40c, d40d);
1544 d40_desc_load(d40c, d40d);
1545 (void) d40_start(d40c);
1546
1547 if (d40d->lli_current == d40d->lli_len)
1548 d40d->lli_current = 0;
1549 }
1550 } else {
1551 d40_lcla_free_all(d40c, d40d);
1552
1553 if (d40d->lli_current < d40d->lli_len) {
1554 d40_desc_load(d40c, d40d);
1555
1556 (void) d40_start(d40c);
1557 return;
1558 }
1559
1560 if (d40_queue_start(d40c) == NULL) {
1561 d40c->busy = false;
1562
1563 pm_runtime_mark_last_busy(d40c->base->dev);
1564 pm_runtime_put_autosuspend(d40c->base->dev);
1565 }
1566
1567 d40_desc_remove(d40d);
1568 d40_desc_done(d40c, d40d);
1569 }
1570
1571 d40c->pending_tx++;
1572 tasklet_schedule(&d40c->tasklet);
1573
1574}
1575
1576static void dma_tasklet(unsigned long data)
1577{
1578 struct d40_chan *d40c = (struct d40_chan *) data;
1579 struct d40_desc *d40d;
1580 unsigned long flags;
1581 bool callback_active;
1582 struct dmaengine_desc_callback cb;
1583
1584 spin_lock_irqsave(&d40c->lock, flags);
1585
1586
1587 d40d = d40_first_done(d40c);
1588 if (d40d == NULL) {
1589
1590 d40d = d40_first_active_get(d40c);
1591 if (d40d == NULL || !d40d->cyclic)
1592 goto check_pending_tx;
1593 }
1594
1595 if (!d40d->cyclic)
1596 dma_cookie_complete(&d40d->txd);
1597
1598
1599
1600
1601
1602 if (d40c->pending_tx == 0) {
1603 spin_unlock_irqrestore(&d40c->lock, flags);
1604 return;
1605 }
1606
1607
1608 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1609 dmaengine_desc_get_callback(&d40d->txd, &cb);
1610
1611 if (!d40d->cyclic) {
1612 if (async_tx_test_ack(&d40d->txd)) {
1613 d40_desc_remove(d40d);
1614 d40_desc_free(d40c, d40d);
1615 } else if (!d40d->is_in_client_list) {
1616 d40_desc_remove(d40d);
1617 d40_lcla_free_all(d40c, d40d);
1618 list_add_tail(&d40d->node, &d40c->client);
1619 d40d->is_in_client_list = true;
1620 }
1621 }
1622
1623 d40c->pending_tx--;
1624
1625 if (d40c->pending_tx)
1626 tasklet_schedule(&d40c->tasklet);
1627
1628 spin_unlock_irqrestore(&d40c->lock, flags);
1629
1630 if (callback_active)
1631 dmaengine_desc_callback_invoke(&cb, NULL);
1632
1633 return;
1634 check_pending_tx:
1635
1636 if (d40c->pending_tx > 0)
1637 d40c->pending_tx--;
1638 spin_unlock_irqrestore(&d40c->lock, flags);
1639}
1640
1641static irqreturn_t d40_handle_interrupt(int irq, void *data)
1642{
1643 int i;
1644 u32 idx;
1645 u32 row;
1646 long chan = -1;
1647 struct d40_chan *d40c;
1648 unsigned long flags;
1649 struct d40_base *base = data;
1650 u32 *regs = base->regs_interrupt;
1651 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1652 u32 il_size = base->gen_dmac.il_size;
1653
1654 spin_lock_irqsave(&base->interrupt_lock, flags);
1655
1656
1657 for (i = 0; i < il_size; i++)
1658 regs[i] = readl(base->virtbase + il[i].src);
1659
1660 for (;;) {
1661
1662 chan = find_next_bit((unsigned long *)regs,
1663 BITS_PER_LONG * il_size, chan + 1);
1664
1665
1666 if (chan == BITS_PER_LONG * il_size)
1667 break;
1668
1669 row = chan / BITS_PER_LONG;
1670 idx = chan & (BITS_PER_LONG - 1);
1671
1672 if (il[row].offset == D40_PHY_CHAN)
1673 d40c = base->lookup_phy_chans[idx];
1674 else
1675 d40c = base->lookup_log_chans[il[row].offset + idx];
1676
1677 if (!d40c) {
1678
1679
1680
1681
1682 continue;
1683 }
1684
1685
1686 writel(BIT(idx), base->virtbase + il[row].clr);
1687
1688 spin_lock(&d40c->lock);
1689
1690 if (!il[row].is_error)
1691 dma_tc_handle(d40c);
1692 else
1693 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1694 chan, il[row].offset, idx);
1695
1696 spin_unlock(&d40c->lock);
1697 }
1698
1699 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1700
1701 return IRQ_HANDLED;
1702}
1703
1704static int d40_validate_conf(struct d40_chan *d40c,
1705 struct stedma40_chan_cfg *conf)
1706{
1707 int res = 0;
1708 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1709
1710 if (!conf->dir) {
1711 chan_err(d40c, "Invalid direction.\n");
1712 res = -EINVAL;
1713 }
1714
1715 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1716 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1717 (conf->dev_type < 0)) {
1718 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1719 res = -EINVAL;
1720 }
1721
1722 if (conf->dir == DMA_DEV_TO_DEV) {
1723
1724
1725
1726
1727 chan_err(d40c, "periph to periph not supported\n");
1728 res = -EINVAL;
1729 }
1730
1731 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1732 conf->src_info.data_width !=
1733 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1734 conf->dst_info.data_width) {
1735
1736
1737
1738
1739
1740 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1741 res = -EINVAL;
1742 }
1743
1744 return res;
1745}
1746
1747static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1748 bool is_src, int log_event_line, bool is_log,
1749 bool *first_user)
1750{
1751 unsigned long flags;
1752 spin_lock_irqsave(&phy->lock, flags);
1753
1754 *first_user = ((phy->allocated_src | phy->allocated_dst)
1755 == D40_ALLOC_FREE);
1756
1757 if (!is_log) {
1758
1759 if (phy->allocated_src == D40_ALLOC_FREE &&
1760 phy->allocated_dst == D40_ALLOC_FREE) {
1761 phy->allocated_dst = D40_ALLOC_PHY;
1762 phy->allocated_src = D40_ALLOC_PHY;
1763 goto found_unlock;
1764 } else
1765 goto not_found_unlock;
1766 }
1767
1768
1769 if (is_src) {
1770 if (phy->allocated_src == D40_ALLOC_PHY)
1771 goto not_found_unlock;
1772
1773 if (phy->allocated_src == D40_ALLOC_FREE)
1774 phy->allocated_src = D40_ALLOC_LOG_FREE;
1775
1776 if (!(phy->allocated_src & BIT(log_event_line))) {
1777 phy->allocated_src |= BIT(log_event_line);
1778 goto found_unlock;
1779 } else
1780 goto not_found_unlock;
1781 } else {
1782 if (phy->allocated_dst == D40_ALLOC_PHY)
1783 goto not_found_unlock;
1784
1785 if (phy->allocated_dst == D40_ALLOC_FREE)
1786 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1787
1788 if (!(phy->allocated_dst & BIT(log_event_line))) {
1789 phy->allocated_dst |= BIT(log_event_line);
1790 goto found_unlock;
1791 }
1792 }
1793 not_found_unlock:
1794 spin_unlock_irqrestore(&phy->lock, flags);
1795 return false;
1796 found_unlock:
1797 spin_unlock_irqrestore(&phy->lock, flags);
1798 return true;
1799}
1800
1801static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1802 int log_event_line)
1803{
1804 unsigned long flags;
1805 bool is_free = false;
1806
1807 spin_lock_irqsave(&phy->lock, flags);
1808 if (!log_event_line) {
1809 phy->allocated_dst = D40_ALLOC_FREE;
1810 phy->allocated_src = D40_ALLOC_FREE;
1811 is_free = true;
1812 goto unlock;
1813 }
1814
1815
1816 if (is_src) {
1817 phy->allocated_src &= ~BIT(log_event_line);
1818 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1819 phy->allocated_src = D40_ALLOC_FREE;
1820 } else {
1821 phy->allocated_dst &= ~BIT(log_event_line);
1822 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1823 phy->allocated_dst = D40_ALLOC_FREE;
1824 }
1825
1826 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1827 D40_ALLOC_FREE);
1828 unlock:
1829 spin_unlock_irqrestore(&phy->lock, flags);
1830
1831 return is_free;
1832}
1833
1834static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1835{
1836 int dev_type = d40c->dma_cfg.dev_type;
1837 int event_group;
1838 int event_line;
1839 struct d40_phy_res *phys;
1840 int i;
1841 int j;
1842 int log_num;
1843 int num_phy_chans;
1844 bool is_src;
1845 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1846
1847 phys = d40c->base->phy_res;
1848 num_phy_chans = d40c->base->num_phy_chans;
1849
1850 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1851 log_num = 2 * dev_type;
1852 is_src = true;
1853 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1854 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1855
1856 log_num = 2 * dev_type + 1;
1857 is_src = false;
1858 } else
1859 return -EINVAL;
1860
1861 event_group = D40_TYPE_TO_GROUP(dev_type);
1862 event_line = D40_TYPE_TO_EVENT(dev_type);
1863
1864 if (!is_log) {
1865 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1866
1867 if (d40c->dma_cfg.use_fixed_channel) {
1868 i = d40c->dma_cfg.phy_channel;
1869 if (d40_alloc_mask_set(&phys[i], is_src,
1870 0, is_log,
1871 first_phy_user))
1872 goto found_phy;
1873 } else {
1874 for (i = 0; i < num_phy_chans; i++) {
1875 if (d40_alloc_mask_set(&phys[i], is_src,
1876 0, is_log,
1877 first_phy_user))
1878 goto found_phy;
1879 }
1880 }
1881 } else
1882 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1883 int phy_num = j + event_group * 2;
1884 for (i = phy_num; i < phy_num + 2; i++) {
1885 if (d40_alloc_mask_set(&phys[i],
1886 is_src,
1887 0,
1888 is_log,
1889 first_phy_user))
1890 goto found_phy;
1891 }
1892 }
1893 return -EINVAL;
1894found_phy:
1895 d40c->phy_chan = &phys[i];
1896 d40c->log_num = D40_PHY_CHAN;
1897 goto out;
1898 }
1899 if (dev_type == -1)
1900 return -EINVAL;
1901
1902
1903 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1904 int phy_num = j + event_group * 2;
1905
1906 if (d40c->dma_cfg.use_fixed_channel) {
1907 i = d40c->dma_cfg.phy_channel;
1908
1909 if ((i != phy_num) && (i != phy_num + 1)) {
1910 dev_err(chan2dev(d40c),
1911 "invalid fixed phy channel %d\n", i);
1912 return -EINVAL;
1913 }
1914
1915 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1916 is_log, first_phy_user))
1917 goto found_log;
1918
1919 dev_err(chan2dev(d40c),
1920 "could not allocate fixed phy channel %d\n", i);
1921 return -EINVAL;
1922 }
1923
1924
1925
1926
1927
1928
1929 if (is_src) {
1930 for (i = phy_num; i < phy_num + 2; i++) {
1931 if (d40_alloc_mask_set(&phys[i], is_src,
1932 event_line, is_log,
1933 first_phy_user))
1934 goto found_log;
1935 }
1936 } else {
1937 for (i = phy_num + 1; i >= phy_num; i--) {
1938 if (d40_alloc_mask_set(&phys[i], is_src,
1939 event_line, is_log,
1940 first_phy_user))
1941 goto found_log;
1942 }
1943 }
1944 }
1945 return -EINVAL;
1946
1947found_log:
1948 d40c->phy_chan = &phys[i];
1949 d40c->log_num = log_num;
1950out:
1951
1952 if (is_log)
1953 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1954 else
1955 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1956
1957 return 0;
1958
1959}
1960
1961static int d40_config_memcpy(struct d40_chan *d40c)
1962{
1963 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1964
1965 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1966 d40c->dma_cfg = dma40_memcpy_conf_log;
1967 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1968
1969 d40_log_cfg(&d40c->dma_cfg,
1970 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1971
1972 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1973 dma_has_cap(DMA_SLAVE, cap)) {
1974 d40c->dma_cfg = dma40_memcpy_conf_phy;
1975
1976
1977 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1978
1979
1980 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1981 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1982
1983 } else {
1984 chan_err(d40c, "No memcpy\n");
1985 return -EINVAL;
1986 }
1987
1988 return 0;
1989}
1990
1991static int d40_free_dma(struct d40_chan *d40c)
1992{
1993
1994 int res = 0;
1995 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1996 struct d40_phy_res *phy = d40c->phy_chan;
1997 bool is_src;
1998
1999
2000 d40_term_all(d40c);
2001
2002 if (phy == NULL) {
2003 chan_err(d40c, "phy == null\n");
2004 return -EINVAL;
2005 }
2006
2007 if (phy->allocated_src == D40_ALLOC_FREE &&
2008 phy->allocated_dst == D40_ALLOC_FREE) {
2009 chan_err(d40c, "channel already free\n");
2010 return -EINVAL;
2011 }
2012
2013 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2014 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2015 is_src = false;
2016 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2017 is_src = true;
2018 else {
2019 chan_err(d40c, "Unknown direction\n");
2020 return -EINVAL;
2021 }
2022
2023 pm_runtime_get_sync(d40c->base->dev);
2024 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2025 if (res) {
2026 chan_err(d40c, "stop failed\n");
2027 goto mark_last_busy;
2028 }
2029
2030 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2031
2032 if (chan_is_logical(d40c))
2033 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2034 else
2035 d40c->base->lookup_phy_chans[phy->num] = NULL;
2036
2037 if (d40c->busy) {
2038 pm_runtime_mark_last_busy(d40c->base->dev);
2039 pm_runtime_put_autosuspend(d40c->base->dev);
2040 }
2041
2042 d40c->busy = false;
2043 d40c->phy_chan = NULL;
2044 d40c->configured = false;
2045 mark_last_busy:
2046 pm_runtime_mark_last_busy(d40c->base->dev);
2047 pm_runtime_put_autosuspend(d40c->base->dev);
2048 return res;
2049}
2050
2051static bool d40_is_paused(struct d40_chan *d40c)
2052{
2053 void __iomem *chanbase = chan_base(d40c);
2054 bool is_paused = false;
2055 unsigned long flags;
2056 void __iomem *active_reg;
2057 u32 status;
2058 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2059
2060 spin_lock_irqsave(&d40c->lock, flags);
2061
2062 if (chan_is_physical(d40c)) {
2063 if (d40c->phy_chan->num % 2 == 0)
2064 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2065 else
2066 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2067
2068 status = (readl(active_reg) &
2069 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2070 D40_CHAN_POS(d40c->phy_chan->num);
2071 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2072 is_paused = true;
2073 goto unlock;
2074 }
2075
2076 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2077 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2078 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2079 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2080 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2081 } else {
2082 chan_err(d40c, "Unknown direction\n");
2083 goto unlock;
2084 }
2085
2086 status = (status & D40_EVENTLINE_MASK(event)) >>
2087 D40_EVENTLINE_POS(event);
2088
2089 if (status != D40_DMA_RUN)
2090 is_paused = true;
2091 unlock:
2092 spin_unlock_irqrestore(&d40c->lock, flags);
2093 return is_paused;
2094
2095}
2096
2097static u32 stedma40_residue(struct dma_chan *chan)
2098{
2099 struct d40_chan *d40c =
2100 container_of(chan, struct d40_chan, chan);
2101 u32 bytes_left;
2102 unsigned long flags;
2103
2104 spin_lock_irqsave(&d40c->lock, flags);
2105 bytes_left = d40_residue(d40c);
2106 spin_unlock_irqrestore(&d40c->lock, flags);
2107
2108 return bytes_left;
2109}
2110
2111static int
2112d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2113 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2114 unsigned int sg_len, dma_addr_t src_dev_addr,
2115 dma_addr_t dst_dev_addr)
2116{
2117 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2118 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2119 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2120 int ret;
2121
2122 ret = d40_log_sg_to_lli(sg_src, sg_len,
2123 src_dev_addr,
2124 desc->lli_log.src,
2125 chan->log_def.lcsp1,
2126 src_info->data_width,
2127 dst_info->data_width);
2128
2129 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2130 dst_dev_addr,
2131 desc->lli_log.dst,
2132 chan->log_def.lcsp3,
2133 dst_info->data_width,
2134 src_info->data_width);
2135
2136 return ret < 0 ? ret : 0;
2137}
2138
2139static int
2140d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2141 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2142 unsigned int sg_len, dma_addr_t src_dev_addr,
2143 dma_addr_t dst_dev_addr)
2144{
2145 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2146 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2147 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2148 unsigned long flags = 0;
2149 int ret;
2150
2151 if (desc->cyclic)
2152 flags |= LLI_CYCLIC | LLI_TERM_INT;
2153
2154 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2155 desc->lli_phy.src,
2156 virt_to_phys(desc->lli_phy.src),
2157 chan->src_def_cfg,
2158 src_info, dst_info, flags);
2159
2160 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2161 desc->lli_phy.dst,
2162 virt_to_phys(desc->lli_phy.dst),
2163 chan->dst_def_cfg,
2164 dst_info, src_info, flags);
2165
2166 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2167 desc->lli_pool.size, DMA_TO_DEVICE);
2168
2169 return ret < 0 ? ret : 0;
2170}
2171
2172static struct d40_desc *
2173d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2174 unsigned int sg_len, unsigned long dma_flags)
2175{
2176 struct stedma40_chan_cfg *cfg;
2177 struct d40_desc *desc;
2178 int ret;
2179
2180 desc = d40_desc_get(chan);
2181 if (!desc)
2182 return NULL;
2183
2184 cfg = &chan->dma_cfg;
2185 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2186 cfg->dst_info.data_width);
2187 if (desc->lli_len < 0) {
2188 chan_err(chan, "Unaligned size\n");
2189 goto free_desc;
2190 }
2191
2192 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2193 if (ret < 0) {
2194 chan_err(chan, "Could not allocate lli\n");
2195 goto free_desc;
2196 }
2197
2198 desc->lli_current = 0;
2199 desc->txd.flags = dma_flags;
2200 desc->txd.tx_submit = d40_tx_submit;
2201
2202 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2203
2204 return desc;
2205 free_desc:
2206 d40_desc_free(chan, desc);
2207 return NULL;
2208}
2209
2210static struct dma_async_tx_descriptor *
2211d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2212 struct scatterlist *sg_dst, unsigned int sg_len,
2213 enum dma_transfer_direction direction, unsigned long dma_flags)
2214{
2215 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2216 dma_addr_t src_dev_addr;
2217 dma_addr_t dst_dev_addr;
2218 struct d40_desc *desc;
2219 unsigned long flags;
2220 int ret;
2221
2222 if (!chan->phy_chan) {
2223 chan_err(chan, "Cannot prepare unallocated channel\n");
2224 return NULL;
2225 }
2226
2227 d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2228
2229 spin_lock_irqsave(&chan->lock, flags);
2230
2231 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2232 if (desc == NULL)
2233 goto unlock;
2234
2235 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2236 desc->cyclic = true;
2237
2238 src_dev_addr = 0;
2239 dst_dev_addr = 0;
2240 if (direction == DMA_DEV_TO_MEM)
2241 src_dev_addr = chan->runtime_addr;
2242 else if (direction == DMA_MEM_TO_DEV)
2243 dst_dev_addr = chan->runtime_addr;
2244
2245 if (chan_is_logical(chan))
2246 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2247 sg_len, src_dev_addr, dst_dev_addr);
2248 else
2249 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2250 sg_len, src_dev_addr, dst_dev_addr);
2251
2252 if (ret) {
2253 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2254 chan_is_logical(chan) ? "log" : "phy", ret);
2255 goto free_desc;
2256 }
2257
2258
2259
2260
2261
2262 list_add_tail(&desc->node, &chan->prepare_queue);
2263
2264 spin_unlock_irqrestore(&chan->lock, flags);
2265
2266 return &desc->txd;
2267 free_desc:
2268 d40_desc_free(chan, desc);
2269 unlock:
2270 spin_unlock_irqrestore(&chan->lock, flags);
2271 return NULL;
2272}
2273
2274bool stedma40_filter(struct dma_chan *chan, void *data)
2275{
2276 struct stedma40_chan_cfg *info = data;
2277 struct d40_chan *d40c =
2278 container_of(chan, struct d40_chan, chan);
2279 int err;
2280
2281 if (data) {
2282 err = d40_validate_conf(d40c, info);
2283 if (!err)
2284 d40c->dma_cfg = *info;
2285 } else
2286 err = d40_config_memcpy(d40c);
2287
2288 if (!err)
2289 d40c->configured = true;
2290
2291 return err == 0;
2292}
2293EXPORT_SYMBOL(stedma40_filter);
2294
2295static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2296{
2297 bool realtime = d40c->dma_cfg.realtime;
2298 bool highprio = d40c->dma_cfg.high_priority;
2299 u32 rtreg;
2300 u32 event = D40_TYPE_TO_EVENT(dev_type);
2301 u32 group = D40_TYPE_TO_GROUP(dev_type);
2302 u32 bit = BIT(event);
2303 u32 prioreg;
2304 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2305
2306 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2307
2308
2309
2310
2311
2312
2313
2314
2315 if (!src && chan_is_logical(d40c))
2316 highprio = false;
2317
2318 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2319
2320
2321 if (!src)
2322 bit <<= 16;
2323
2324 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2325 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2326}
2327
2328static void d40_set_prio_realtime(struct d40_chan *d40c)
2329{
2330 if (d40c->base->rev < 3)
2331 return;
2332
2333 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2334 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2335 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2336
2337 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2338 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2339 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2340}
2341
2342#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2343#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2344#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2345#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2346#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2347
2348static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2349 struct of_dma *ofdma)
2350{
2351 struct stedma40_chan_cfg cfg;
2352 dma_cap_mask_t cap;
2353 u32 flags;
2354
2355 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2356
2357 dma_cap_zero(cap);
2358 dma_cap_set(DMA_SLAVE, cap);
2359
2360 cfg.dev_type = dma_spec->args[0];
2361 flags = dma_spec->args[2];
2362
2363 switch (D40_DT_FLAGS_MODE(flags)) {
2364 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2365 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2366 }
2367
2368 switch (D40_DT_FLAGS_DIR(flags)) {
2369 case 0:
2370 cfg.dir = DMA_MEM_TO_DEV;
2371 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2372 break;
2373 case 1:
2374 cfg.dir = DMA_DEV_TO_MEM;
2375 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2376 break;
2377 }
2378
2379 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2380 cfg.phy_channel = dma_spec->args[1];
2381 cfg.use_fixed_channel = true;
2382 }
2383
2384 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2385 cfg.high_priority = true;
2386
2387 return dma_request_channel(cap, stedma40_filter, &cfg);
2388}
2389
2390
2391static int d40_alloc_chan_resources(struct dma_chan *chan)
2392{
2393 int err;
2394 unsigned long flags;
2395 struct d40_chan *d40c =
2396 container_of(chan, struct d40_chan, chan);
2397 bool is_free_phy;
2398 spin_lock_irqsave(&d40c->lock, flags);
2399
2400 dma_cookie_init(chan);
2401
2402
2403 if (!d40c->configured) {
2404 err = d40_config_memcpy(d40c);
2405 if (err) {
2406 chan_err(d40c, "Failed to configure memcpy channel\n");
2407 goto mark_last_busy;
2408 }
2409 }
2410
2411 err = d40_allocate_channel(d40c, &is_free_phy);
2412 if (err) {
2413 chan_err(d40c, "Failed to allocate channel\n");
2414 d40c->configured = false;
2415 goto mark_last_busy;
2416 }
2417
2418 pm_runtime_get_sync(d40c->base->dev);
2419
2420 d40_set_prio_realtime(d40c);
2421
2422 if (chan_is_logical(d40c)) {
2423 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2424 d40c->lcpa = d40c->base->lcpa_base +
2425 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2426 else
2427 d40c->lcpa = d40c->base->lcpa_base +
2428 d40c->dma_cfg.dev_type *
2429 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2430
2431
2432 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2433 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2434 }
2435
2436 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2437 chan_is_logical(d40c) ? "logical" : "physical",
2438 d40c->phy_chan->num,
2439 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2440
2441
2442
2443
2444
2445
2446
2447 if (is_free_phy)
2448 d40_config_write(d40c);
2449 mark_last_busy:
2450 pm_runtime_mark_last_busy(d40c->base->dev);
2451 pm_runtime_put_autosuspend(d40c->base->dev);
2452 spin_unlock_irqrestore(&d40c->lock, flags);
2453 return err;
2454}
2455
2456static void d40_free_chan_resources(struct dma_chan *chan)
2457{
2458 struct d40_chan *d40c =
2459 container_of(chan, struct d40_chan, chan);
2460 int err;
2461 unsigned long flags;
2462
2463 if (d40c->phy_chan == NULL) {
2464 chan_err(d40c, "Cannot free unallocated channel\n");
2465 return;
2466 }
2467
2468 spin_lock_irqsave(&d40c->lock, flags);
2469
2470 err = d40_free_dma(d40c);
2471
2472 if (err)
2473 chan_err(d40c, "Failed to free channel\n");
2474 spin_unlock_irqrestore(&d40c->lock, flags);
2475}
2476
2477static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2478 dma_addr_t dst,
2479 dma_addr_t src,
2480 size_t size,
2481 unsigned long dma_flags)
2482{
2483 struct scatterlist dst_sg;
2484 struct scatterlist src_sg;
2485
2486 sg_init_table(&dst_sg, 1);
2487 sg_init_table(&src_sg, 1);
2488
2489 sg_dma_address(&dst_sg) = dst;
2490 sg_dma_address(&src_sg) = src;
2491
2492 sg_dma_len(&dst_sg) = size;
2493 sg_dma_len(&src_sg) = size;
2494
2495 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2496 DMA_MEM_TO_MEM, dma_flags);
2497}
2498
2499static struct dma_async_tx_descriptor *
2500d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2501 unsigned int sg_len, enum dma_transfer_direction direction,
2502 unsigned long dma_flags, void *context)
2503{
2504 if (!is_slave_direction(direction))
2505 return NULL;
2506
2507 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2508}
2509
2510static struct dma_async_tx_descriptor *
2511dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2512 size_t buf_len, size_t period_len,
2513 enum dma_transfer_direction direction, unsigned long flags)
2514{
2515 unsigned int periods = buf_len / period_len;
2516 struct dma_async_tx_descriptor *txd;
2517 struct scatterlist *sg;
2518 int i;
2519
2520 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2521 if (!sg)
2522 return NULL;
2523
2524 for (i = 0; i < periods; i++) {
2525 sg_dma_address(&sg[i]) = dma_addr;
2526 sg_dma_len(&sg[i]) = period_len;
2527 dma_addr += period_len;
2528 }
2529
2530 sg_chain(sg, periods + 1, sg);
2531
2532 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2533 DMA_PREP_INTERRUPT);
2534
2535 kfree(sg);
2536
2537 return txd;
2538}
2539
2540static enum dma_status d40_tx_status(struct dma_chan *chan,
2541 dma_cookie_t cookie,
2542 struct dma_tx_state *txstate)
2543{
2544 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2545 enum dma_status ret;
2546
2547 if (d40c->phy_chan == NULL) {
2548 chan_err(d40c, "Cannot read status of unallocated channel\n");
2549 return -EINVAL;
2550 }
2551
2552 ret = dma_cookie_status(chan, cookie, txstate);
2553 if (ret != DMA_COMPLETE && txstate)
2554 dma_set_residue(txstate, stedma40_residue(chan));
2555
2556 if (d40_is_paused(d40c))
2557 ret = DMA_PAUSED;
2558
2559 return ret;
2560}
2561
2562static void d40_issue_pending(struct dma_chan *chan)
2563{
2564 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2565 unsigned long flags;
2566
2567 if (d40c->phy_chan == NULL) {
2568 chan_err(d40c, "Channel is not allocated!\n");
2569 return;
2570 }
2571
2572 spin_lock_irqsave(&d40c->lock, flags);
2573
2574 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2575
2576
2577 if (!d40c->busy)
2578 (void) d40_queue_start(d40c);
2579
2580 spin_unlock_irqrestore(&d40c->lock, flags);
2581}
2582
2583static int d40_terminate_all(struct dma_chan *chan)
2584{
2585 unsigned long flags;
2586 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2587 int ret;
2588
2589 if (d40c->phy_chan == NULL) {
2590 chan_err(d40c, "Channel is not allocated!\n");
2591 return -EINVAL;
2592 }
2593
2594 spin_lock_irqsave(&d40c->lock, flags);
2595
2596 pm_runtime_get_sync(d40c->base->dev);
2597 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2598 if (ret)
2599 chan_err(d40c, "Failed to stop channel\n");
2600
2601 d40_term_all(d40c);
2602 pm_runtime_mark_last_busy(d40c->base->dev);
2603 pm_runtime_put_autosuspend(d40c->base->dev);
2604 if (d40c->busy) {
2605 pm_runtime_mark_last_busy(d40c->base->dev);
2606 pm_runtime_put_autosuspend(d40c->base->dev);
2607 }
2608 d40c->busy = false;
2609
2610 spin_unlock_irqrestore(&d40c->lock, flags);
2611 return 0;
2612}
2613
2614static int
2615dma40_config_to_halfchannel(struct d40_chan *d40c,
2616 struct stedma40_half_channel_info *info,
2617 u32 maxburst)
2618{
2619 int psize;
2620
2621 if (chan_is_logical(d40c)) {
2622 if (maxburst >= 16)
2623 psize = STEDMA40_PSIZE_LOG_16;
2624 else if (maxburst >= 8)
2625 psize = STEDMA40_PSIZE_LOG_8;
2626 else if (maxburst >= 4)
2627 psize = STEDMA40_PSIZE_LOG_4;
2628 else
2629 psize = STEDMA40_PSIZE_LOG_1;
2630 } else {
2631 if (maxburst >= 16)
2632 psize = STEDMA40_PSIZE_PHY_16;
2633 else if (maxburst >= 8)
2634 psize = STEDMA40_PSIZE_PHY_8;
2635 else if (maxburst >= 4)
2636 psize = STEDMA40_PSIZE_PHY_4;
2637 else
2638 psize = STEDMA40_PSIZE_PHY_1;
2639 }
2640
2641 info->psize = psize;
2642 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2643
2644 return 0;
2645}
2646
2647static int d40_set_runtime_config(struct dma_chan *chan,
2648 struct dma_slave_config *config)
2649{
2650 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2651
2652 memcpy(&d40c->slave_config, config, sizeof(*config));
2653
2654 return 0;
2655}
2656
2657
2658static int d40_set_runtime_config_write(struct dma_chan *chan,
2659 struct dma_slave_config *config,
2660 enum dma_transfer_direction direction)
2661{
2662 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2663 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2664 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2665 dma_addr_t config_addr;
2666 u32 src_maxburst, dst_maxburst;
2667 int ret;
2668
2669 if (d40c->phy_chan == NULL) {
2670 chan_err(d40c, "Channel is not allocated!\n");
2671 return -EINVAL;
2672 }
2673
2674 src_addr_width = config->src_addr_width;
2675 src_maxburst = config->src_maxburst;
2676 dst_addr_width = config->dst_addr_width;
2677 dst_maxburst = config->dst_maxburst;
2678
2679 if (direction == DMA_DEV_TO_MEM) {
2680 config_addr = config->src_addr;
2681
2682 if (cfg->dir != DMA_DEV_TO_MEM)
2683 dev_dbg(d40c->base->dev,
2684 "channel was not configured for peripheral "
2685 "to memory transfer (%d) overriding\n",
2686 cfg->dir);
2687 cfg->dir = DMA_DEV_TO_MEM;
2688
2689
2690 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2691 dst_addr_width = src_addr_width;
2692 if (dst_maxburst == 0)
2693 dst_maxburst = src_maxburst;
2694
2695 } else if (direction == DMA_MEM_TO_DEV) {
2696 config_addr = config->dst_addr;
2697
2698 if (cfg->dir != DMA_MEM_TO_DEV)
2699 dev_dbg(d40c->base->dev,
2700 "channel was not configured for memory "
2701 "to peripheral transfer (%d) overriding\n",
2702 cfg->dir);
2703 cfg->dir = DMA_MEM_TO_DEV;
2704
2705
2706 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2707 src_addr_width = dst_addr_width;
2708 if (src_maxburst == 0)
2709 src_maxburst = dst_maxburst;
2710 } else {
2711 dev_err(d40c->base->dev,
2712 "unrecognized channel direction %d\n",
2713 direction);
2714 return -EINVAL;
2715 }
2716
2717 if (config_addr <= 0) {
2718 dev_err(d40c->base->dev, "no address supplied\n");
2719 return -EINVAL;
2720 }
2721
2722 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2723 dev_err(d40c->base->dev,
2724 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2725 src_maxburst,
2726 src_addr_width,
2727 dst_maxburst,
2728 dst_addr_width);
2729 return -EINVAL;
2730 }
2731
2732 if (src_maxburst > 16) {
2733 src_maxburst = 16;
2734 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2735 } else if (dst_maxburst > 16) {
2736 dst_maxburst = 16;
2737 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2738 }
2739
2740
2741 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2742 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2743 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2744 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2745 !is_power_of_2(src_addr_width) ||
2746 !is_power_of_2(dst_addr_width))
2747 return -EINVAL;
2748
2749 cfg->src_info.data_width = src_addr_width;
2750 cfg->dst_info.data_width = dst_addr_width;
2751
2752 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2753 src_maxburst);
2754 if (ret)
2755 return ret;
2756
2757 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2758 dst_maxburst);
2759 if (ret)
2760 return ret;
2761
2762
2763 if (chan_is_logical(d40c))
2764 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2765 else
2766 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2767
2768
2769 d40c->runtime_addr = config_addr;
2770 d40c->runtime_direction = direction;
2771 dev_dbg(d40c->base->dev,
2772 "configured channel %s for %s, data width %d/%d, "
2773 "maxburst %d/%d elements, LE, no flow control\n",
2774 dma_chan_name(chan),
2775 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2776 src_addr_width, dst_addr_width,
2777 src_maxburst, dst_maxburst);
2778
2779 return 0;
2780}
2781
2782
2783
2784static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2785 struct d40_chan *chans, int offset,
2786 int num_chans)
2787{
2788 int i = 0;
2789 struct d40_chan *d40c;
2790
2791 INIT_LIST_HEAD(&dma->channels);
2792
2793 for (i = offset; i < offset + num_chans; i++) {
2794 d40c = &chans[i];
2795 d40c->base = base;
2796 d40c->chan.device = dma;
2797
2798 spin_lock_init(&d40c->lock);
2799
2800 d40c->log_num = D40_PHY_CHAN;
2801
2802 INIT_LIST_HEAD(&d40c->done);
2803 INIT_LIST_HEAD(&d40c->active);
2804 INIT_LIST_HEAD(&d40c->queue);
2805 INIT_LIST_HEAD(&d40c->pending_queue);
2806 INIT_LIST_HEAD(&d40c->client);
2807 INIT_LIST_HEAD(&d40c->prepare_queue);
2808
2809 tasklet_init(&d40c->tasklet, dma_tasklet,
2810 (unsigned long) d40c);
2811
2812 list_add_tail(&d40c->chan.device_node,
2813 &dma->channels);
2814 }
2815}
2816
2817static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2818{
2819 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2820 dev->device_prep_slave_sg = d40_prep_slave_sg;
2821 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2822 }
2823
2824 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2825 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2826 dev->directions = BIT(DMA_MEM_TO_MEM);
2827
2828
2829
2830
2831 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2832 }
2833
2834 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2835 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2836
2837 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2838 dev->device_free_chan_resources = d40_free_chan_resources;
2839 dev->device_issue_pending = d40_issue_pending;
2840 dev->device_tx_status = d40_tx_status;
2841 dev->device_config = d40_set_runtime_config;
2842 dev->device_pause = d40_pause;
2843 dev->device_resume = d40_resume;
2844 dev->device_terminate_all = d40_terminate_all;
2845 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2846 dev->dev = base->dev;
2847}
2848
2849static int __init d40_dmaengine_init(struct d40_base *base,
2850 int num_reserved_chans)
2851{
2852 int err ;
2853
2854 d40_chan_init(base, &base->dma_slave, base->log_chans,
2855 0, base->num_log_chans);
2856
2857 dma_cap_zero(base->dma_slave.cap_mask);
2858 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2859 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2860
2861 d40_ops_init(base, &base->dma_slave);
2862
2863 err = dmaenginem_async_device_register(&base->dma_slave);
2864
2865 if (err) {
2866 d40_err(base->dev, "Failed to register slave channels\n");
2867 goto exit;
2868 }
2869
2870 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2871 base->num_log_chans, base->num_memcpy_chans);
2872
2873 dma_cap_zero(base->dma_memcpy.cap_mask);
2874 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2875
2876 d40_ops_init(base, &base->dma_memcpy);
2877
2878 err = dmaenginem_async_device_register(&base->dma_memcpy);
2879
2880 if (err) {
2881 d40_err(base->dev,
2882 "Failed to register memcpy only channels\n");
2883 goto exit;
2884 }
2885
2886 d40_chan_init(base, &base->dma_both, base->phy_chans,
2887 0, num_reserved_chans);
2888
2889 dma_cap_zero(base->dma_both.cap_mask);
2890 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2891 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2892 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2893
2894 d40_ops_init(base, &base->dma_both);
2895 err = dmaenginem_async_device_register(&base->dma_both);
2896
2897 if (err) {
2898 d40_err(base->dev,
2899 "Failed to register logical and physical capable channels\n");
2900 goto exit;
2901 }
2902 return 0;
2903 exit:
2904 return err;
2905}
2906
2907
2908#ifdef CONFIG_PM_SLEEP
2909static int dma40_suspend(struct device *dev)
2910{
2911 struct d40_base *base = dev_get_drvdata(dev);
2912 int ret;
2913
2914 ret = pm_runtime_force_suspend(dev);
2915 if (ret)
2916 return ret;
2917
2918 if (base->lcpa_regulator)
2919 ret = regulator_disable(base->lcpa_regulator);
2920 return ret;
2921}
2922
2923static int dma40_resume(struct device *dev)
2924{
2925 struct d40_base *base = dev_get_drvdata(dev);
2926 int ret = 0;
2927
2928 if (base->lcpa_regulator) {
2929 ret = regulator_enable(base->lcpa_regulator);
2930 if (ret)
2931 return ret;
2932 }
2933
2934 return pm_runtime_force_resume(dev);
2935}
2936#endif
2937
2938#ifdef CONFIG_PM
2939static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2940 u32 *regaddr, int num, bool save)
2941{
2942 int i;
2943
2944 for (i = 0; i < num; i++) {
2945 void __iomem *addr = baseaddr + regaddr[i];
2946
2947 if (save)
2948 backup[i] = readl_relaxed(addr);
2949 else
2950 writel_relaxed(backup[i], addr);
2951 }
2952}
2953
2954static void d40_save_restore_registers(struct d40_base *base, bool save)
2955{
2956 int i;
2957
2958
2959 for (i = 0; i < base->num_phy_chans; i++) {
2960 void __iomem *addr;
2961 int idx;
2962
2963 if (base->phy_res[i].reserved)
2964 continue;
2965
2966 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2967 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2968
2969 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2970 d40_backup_regs_chan,
2971 ARRAY_SIZE(d40_backup_regs_chan),
2972 save);
2973 }
2974
2975
2976 dma40_backup(base->virtbase, base->reg_val_backup,
2977 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2978 save);
2979
2980
2981 if (base->gen_dmac.backup)
2982 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2983 base->gen_dmac.backup,
2984 base->gen_dmac.backup_size,
2985 save);
2986}
2987
2988static int dma40_runtime_suspend(struct device *dev)
2989{
2990 struct d40_base *base = dev_get_drvdata(dev);
2991
2992 d40_save_restore_registers(base, true);
2993
2994
2995 if (base->rev != 1)
2996 writel_relaxed(base->gcc_pwr_off_mask,
2997 base->virtbase + D40_DREG_GCC);
2998
2999 return 0;
3000}
3001
3002static int dma40_runtime_resume(struct device *dev)
3003{
3004 struct d40_base *base = dev_get_drvdata(dev);
3005
3006 d40_save_restore_registers(base, false);
3007
3008 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3009 base->virtbase + D40_DREG_GCC);
3010 return 0;
3011}
3012#endif
3013
3014static const struct dev_pm_ops dma40_pm_ops = {
3015 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3016 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3017 dma40_runtime_resume,
3018 NULL)
3019};
3020
3021
3022
3023static int __init d40_phy_res_init(struct d40_base *base)
3024{
3025 int i;
3026 int num_phy_chans_avail = 0;
3027 u32 val[2];
3028 int odd_even_bit = -2;
3029 int gcc = D40_DREG_GCC_ENA;
3030
3031 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3032 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3033
3034 for (i = 0; i < base->num_phy_chans; i++) {
3035 base->phy_res[i].num = i;
3036 odd_even_bit += 2 * ((i % 2) == 0);
3037 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3038
3039 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3040 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3041 base->phy_res[i].reserved = true;
3042 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3043 D40_DREG_GCC_SRC);
3044 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3045 D40_DREG_GCC_DST);
3046
3047
3048 } else {
3049 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3050 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3051 base->phy_res[i].reserved = false;
3052 num_phy_chans_avail++;
3053 }
3054 spin_lock_init(&base->phy_res[i].lock);
3055 }
3056
3057
3058 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3059 int chan = base->plat_data->disabled_channels[i];
3060
3061 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3062 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3063 base->phy_res[chan].reserved = true;
3064 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3065 D40_DREG_GCC_SRC);
3066 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3067 D40_DREG_GCC_DST);
3068 num_phy_chans_avail--;
3069 }
3070
3071
3072 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3073 int chan = base->plat_data->soft_lli_chans[i];
3074
3075 base->phy_res[chan].use_soft_lli = true;
3076 }
3077
3078 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3079 num_phy_chans_avail, base->num_phy_chans);
3080
3081
3082 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3083
3084 for (i = 0; i < base->num_phy_chans; i++) {
3085
3086 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3087 (val[0] & 0x3) != 1)
3088 dev_info(base->dev,
3089 "[%s] INFO: channel %d is misconfigured (%d)\n",
3090 __func__, i, val[0] & 0x3);
3091
3092 val[0] = val[0] >> 2;
3093 }
3094
3095
3096
3097
3098
3099
3100
3101 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3102 base->gcc_pwr_off_mask = gcc;
3103
3104 return num_phy_chans_avail;
3105}
3106
3107static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3108{
3109 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3110 struct clk *clk;
3111 void __iomem *virtbase;
3112 struct resource *res;
3113 struct d40_base *base;
3114 int num_log_chans;
3115 int num_phy_chans;
3116 int num_memcpy_chans;
3117 int clk_ret = -EINVAL;
3118 int i;
3119 u32 pid;
3120 u32 cid;
3121 u8 rev;
3122
3123 clk = clk_get(&pdev->dev, NULL);
3124 if (IS_ERR(clk)) {
3125 d40_err(&pdev->dev, "No matching clock found\n");
3126 goto check_prepare_enabled;
3127 }
3128
3129 clk_ret = clk_prepare_enable(clk);
3130 if (clk_ret) {
3131 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3132 goto disable_unprepare;
3133 }
3134
3135
3136 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3137 if (!res)
3138 goto disable_unprepare;
3139
3140 if (request_mem_region(res->start, resource_size(res),
3141 D40_NAME " I/O base") == NULL)
3142 goto release_region;
3143
3144 virtbase = ioremap(res->start, resource_size(res));
3145 if (!virtbase)
3146 goto release_region;
3147
3148
3149 for (pid = 0, i = 0; i < 4; i++)
3150 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3151 & 255) << (i * 8);
3152 for (cid = 0, i = 0; i < 4; i++)
3153 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3154 & 255) << (i * 8);
3155
3156 if (cid != AMBA_CID) {
3157 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3158 goto unmap_io;
3159 }
3160 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3161 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3162 AMBA_MANF_BITS(pid),
3163 AMBA_VENDOR_ST);
3164 goto unmap_io;
3165 }
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175 rev = AMBA_REV_BITS(pid);
3176 if (rev < 2) {
3177 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3178 goto unmap_io;
3179 }
3180
3181
3182 if (plat_data->num_of_phy_chans)
3183 num_phy_chans = plat_data->num_of_phy_chans;
3184 else
3185 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3186
3187
3188 if (plat_data->num_of_memcpy_chans)
3189 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3190 else
3191 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3192
3193 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3194
3195 dev_info(&pdev->dev,
3196 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3197 rev, &res->start, num_phy_chans, num_log_chans);
3198
3199 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3200 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3201 sizeof(struct d40_chan), GFP_KERNEL);
3202
3203 if (base == NULL)
3204 goto unmap_io;
3205
3206 base->rev = rev;
3207 base->clk = clk;
3208 base->num_memcpy_chans = num_memcpy_chans;
3209 base->num_phy_chans = num_phy_chans;
3210 base->num_log_chans = num_log_chans;
3211 base->phy_start = res->start;
3212 base->phy_size = resource_size(res);
3213 base->virtbase = virtbase;
3214 base->plat_data = plat_data;
3215 base->dev = &pdev->dev;
3216 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3217 base->log_chans = &base->phy_chans[num_phy_chans];
3218
3219 if (base->plat_data->num_of_phy_chans == 14) {
3220 base->gen_dmac.backup = d40_backup_regs_v4b;
3221 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3222 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3223 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3224 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3225 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3226 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3227 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3228 base->gen_dmac.il = il_v4b;
3229 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3230 base->gen_dmac.init_reg = dma_init_reg_v4b;
3231 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3232 } else {
3233 if (base->rev >= 3) {
3234 base->gen_dmac.backup = d40_backup_regs_v4a;
3235 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3236 }
3237 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3238 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3239 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3240 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3241 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3242 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3243 base->gen_dmac.il = il_v4a;
3244 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3245 base->gen_dmac.init_reg = dma_init_reg_v4a;
3246 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3247 }
3248
3249 base->phy_res = kcalloc(num_phy_chans,
3250 sizeof(*base->phy_res),
3251 GFP_KERNEL);
3252 if (!base->phy_res)
3253 goto free_base;
3254
3255 base->lookup_phy_chans = kcalloc(num_phy_chans,
3256 sizeof(*base->lookup_phy_chans),
3257 GFP_KERNEL);
3258 if (!base->lookup_phy_chans)
3259 goto free_phy_res;
3260
3261 base->lookup_log_chans = kcalloc(num_log_chans,
3262 sizeof(*base->lookup_log_chans),
3263 GFP_KERNEL);
3264 if (!base->lookup_log_chans)
3265 goto free_phy_chans;
3266
3267 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3268 sizeof(d40_backup_regs_chan),
3269 GFP_KERNEL);
3270 if (!base->reg_val_backup_chan)
3271 goto free_log_chans;
3272
3273 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3274 * D40_LCLA_LINK_PER_EVENT_GRP,
3275 sizeof(*base->lcla_pool.alloc_map),
3276 GFP_KERNEL);
3277 if (!base->lcla_pool.alloc_map)
3278 goto free_backup_chan;
3279
3280 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3281 sizeof(*base->regs_interrupt),
3282 GFP_KERNEL);
3283 if (!base->regs_interrupt)
3284 goto free_map;
3285
3286 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3287 0, SLAB_HWCACHE_ALIGN,
3288 NULL);
3289 if (base->desc_slab == NULL)
3290 goto free_regs;
3291
3292
3293 return base;
3294 free_regs:
3295 kfree(base->regs_interrupt);
3296 free_map:
3297 kfree(base->lcla_pool.alloc_map);
3298 free_backup_chan:
3299 kfree(base->reg_val_backup_chan);
3300 free_log_chans:
3301 kfree(base->lookup_log_chans);
3302 free_phy_chans:
3303 kfree(base->lookup_phy_chans);
3304 free_phy_res:
3305 kfree(base->phy_res);
3306 free_base:
3307 kfree(base);
3308 unmap_io:
3309 iounmap(virtbase);
3310 release_region:
3311 release_mem_region(res->start, resource_size(res));
3312 check_prepare_enabled:
3313 if (!clk_ret)
3314 disable_unprepare:
3315 clk_disable_unprepare(clk);
3316 if (!IS_ERR(clk))
3317 clk_put(clk);
3318 return NULL;
3319}
3320
3321static void __init d40_hw_init(struct d40_base *base)
3322{
3323
3324 int i;
3325 u32 prmseo[2] = {0, 0};
3326 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3327 u32 pcmis = 0;
3328 u32 pcicr = 0;
3329 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3330 u32 reg_size = base->gen_dmac.init_reg_size;
3331
3332 for (i = 0; i < reg_size; i++)
3333 writel(dma_init_reg[i].val,
3334 base->virtbase + dma_init_reg[i].reg);
3335
3336
3337 for (i = 0; i < base->num_phy_chans; i++) {
3338
3339 activeo[i % 2] = activeo[i % 2] << 2;
3340
3341 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3342 == D40_ALLOC_PHY) {
3343 activeo[i % 2] |= 3;
3344 continue;
3345 }
3346
3347
3348 pcmis = (pcmis << 1) | 1;
3349
3350
3351 pcicr = (pcicr << 1) | 1;
3352
3353
3354 prmseo[i % 2] = prmseo[i % 2] << 2;
3355 prmseo[i % 2] |= 1;
3356
3357 }
3358
3359 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3360 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3361 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3362 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3363
3364
3365 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3366
3367
3368 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3369
3370
3371 base->gen_dmac.init_reg = NULL;
3372 base->gen_dmac.init_reg_size = 0;
3373}
3374
3375static int __init d40_lcla_allocate(struct d40_base *base)
3376{
3377 struct d40_lcla_pool *pool = &base->lcla_pool;
3378 unsigned long *page_list;
3379 int i, j;
3380 int ret;
3381
3382
3383
3384
3385
3386
3387 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3388 sizeof(*page_list),
3389 GFP_KERNEL);
3390 if (!page_list)
3391 return -ENOMEM;
3392
3393
3394 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3395
3396 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3397 page_list[i] = __get_free_pages(GFP_KERNEL,
3398 base->lcla_pool.pages);
3399 if (!page_list[i]) {
3400
3401 d40_err(base->dev, "Failed to allocate %d pages.\n",
3402 base->lcla_pool.pages);
3403 ret = -ENOMEM;
3404
3405 for (j = 0; j < i; j++)
3406 free_pages(page_list[j], base->lcla_pool.pages);
3407 goto free_page_list;
3408 }
3409
3410 if ((virt_to_phys((void *)page_list[i]) &
3411 (LCLA_ALIGNMENT - 1)) == 0)
3412 break;
3413 }
3414
3415 for (j = 0; j < i; j++)
3416 free_pages(page_list[j], base->lcla_pool.pages);
3417
3418 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3419 base->lcla_pool.base = (void *)page_list[i];
3420 } else {
3421
3422
3423
3424
3425 dev_warn(base->dev,
3426 "[%s] Failed to get %d pages @ 18 bit align.\n",
3427 __func__, base->lcla_pool.pages);
3428 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3429 base->num_phy_chans +
3430 LCLA_ALIGNMENT,
3431 GFP_KERNEL);
3432 if (!base->lcla_pool.base_unaligned) {
3433 ret = -ENOMEM;
3434 goto free_page_list;
3435 }
3436
3437 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3438 LCLA_ALIGNMENT);
3439 }
3440
3441 pool->dma_addr = dma_map_single(base->dev, pool->base,
3442 SZ_1K * base->num_phy_chans,
3443 DMA_TO_DEVICE);
3444 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3445 pool->dma_addr = 0;
3446 ret = -ENOMEM;
3447 goto free_page_list;
3448 }
3449
3450 writel(virt_to_phys(base->lcla_pool.base),
3451 base->virtbase + D40_DREG_LCLA);
3452 ret = 0;
3453 free_page_list:
3454 kfree(page_list);
3455 return ret;
3456}
3457
3458static int __init d40_of_probe(struct platform_device *pdev,
3459 struct device_node *np)
3460{
3461 struct stedma40_platform_data *pdata;
3462 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3463 const __be32 *list;
3464
3465 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3466 if (!pdata)
3467 return -ENOMEM;
3468
3469
3470 of_property_read_u32(np, "dma-channels", &num_phy);
3471 if (num_phy > 0)
3472 pdata->num_of_phy_chans = num_phy;
3473
3474 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3475 num_memcpy /= sizeof(*list);
3476
3477 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3478 d40_err(&pdev->dev,
3479 "Invalid number of memcpy channels specified (%d)\n",
3480 num_memcpy);
3481 return -EINVAL;
3482 }
3483 pdata->num_of_memcpy_chans = num_memcpy;
3484
3485 of_property_read_u32_array(np, "memcpy-channels",
3486 dma40_memcpy_channels,
3487 num_memcpy);
3488
3489 list = of_get_property(np, "disabled-channels", &num_disabled);
3490 num_disabled /= sizeof(*list);
3491
3492 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3493 d40_err(&pdev->dev,
3494 "Invalid number of disabled channels specified (%d)\n",
3495 num_disabled);
3496 return -EINVAL;
3497 }
3498
3499 of_property_read_u32_array(np, "disabled-channels",
3500 pdata->disabled_channels,
3501 num_disabled);
3502 pdata->disabled_channels[num_disabled] = -1;
3503
3504 pdev->dev.platform_data = pdata;
3505
3506 return 0;
3507}
3508
3509static int __init d40_probe(struct platform_device *pdev)
3510{
3511 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3512 struct device_node *np = pdev->dev.of_node;
3513 int ret = -ENOENT;
3514 struct d40_base *base;
3515 struct resource *res;
3516 int num_reserved_chans;
3517 u32 val;
3518
3519 if (!plat_data) {
3520 if (np) {
3521 if (d40_of_probe(pdev, np)) {
3522 ret = -ENOMEM;
3523 goto report_failure;
3524 }
3525 } else {
3526 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3527 goto report_failure;
3528 }
3529 }
3530
3531 base = d40_hw_detect_init(pdev);
3532 if (!base)
3533 goto report_failure;
3534
3535 num_reserved_chans = d40_phy_res_init(base);
3536
3537 platform_set_drvdata(pdev, base);
3538
3539 spin_lock_init(&base->interrupt_lock);
3540 spin_lock_init(&base->execmd_lock);
3541
3542
3543 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3544 if (!res) {
3545 ret = -ENOENT;
3546 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3547 goto destroy_cache;
3548 }
3549 base->lcpa_size = resource_size(res);
3550 base->phy_lcpa = res->start;
3551
3552 if (request_mem_region(res->start, resource_size(res),
3553 D40_NAME " I/O lcpa") == NULL) {
3554 ret = -EBUSY;
3555 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3556 goto destroy_cache;
3557 }
3558
3559
3560 val = readl(base->virtbase + D40_DREG_LCPA);
3561 if (res->start != val && val != 0) {
3562 dev_warn(&pdev->dev,
3563 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3564 __func__, val, &res->start);
3565 } else
3566 writel(res->start, base->virtbase + D40_DREG_LCPA);
3567
3568 base->lcpa_base = ioremap(res->start, resource_size(res));
3569 if (!base->lcpa_base) {
3570 ret = -ENOMEM;
3571 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3572 goto destroy_cache;
3573 }
3574
3575 if (base->plat_data->use_esram_lcla) {
3576 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3577 "lcla_esram");
3578 if (!res) {
3579 ret = -ENOENT;
3580 d40_err(&pdev->dev,
3581 "No \"lcla_esram\" memory resource\n");
3582 goto destroy_cache;
3583 }
3584 base->lcla_pool.base = ioremap(res->start,
3585 resource_size(res));
3586 if (!base->lcla_pool.base) {
3587 ret = -ENOMEM;
3588 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3589 goto destroy_cache;
3590 }
3591 writel(res->start, base->virtbase + D40_DREG_LCLA);
3592
3593 } else {
3594 ret = d40_lcla_allocate(base);
3595 if (ret) {
3596 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3597 goto destroy_cache;
3598 }
3599 }
3600
3601 spin_lock_init(&base->lcla_pool.lock);
3602
3603 base->irq = platform_get_irq(pdev, 0);
3604
3605 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3606 if (ret) {
3607 d40_err(&pdev->dev, "No IRQ defined\n");
3608 goto destroy_cache;
3609 }
3610
3611 if (base->plat_data->use_esram_lcla) {
3612
3613 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3614 if (IS_ERR(base->lcpa_regulator)) {
3615 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3616 ret = PTR_ERR(base->lcpa_regulator);
3617 base->lcpa_regulator = NULL;
3618 goto destroy_cache;
3619 }
3620
3621 ret = regulator_enable(base->lcpa_regulator);
3622 if (ret) {
3623 d40_err(&pdev->dev,
3624 "Failed to enable lcpa_regulator\n");
3625 regulator_put(base->lcpa_regulator);
3626 base->lcpa_regulator = NULL;
3627 goto destroy_cache;
3628 }
3629 }
3630
3631 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3632
3633 pm_runtime_irq_safe(base->dev);
3634 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3635 pm_runtime_use_autosuspend(base->dev);
3636 pm_runtime_mark_last_busy(base->dev);
3637 pm_runtime_set_active(base->dev);
3638 pm_runtime_enable(base->dev);
3639
3640 ret = d40_dmaengine_init(base, num_reserved_chans);
3641 if (ret)
3642 goto destroy_cache;
3643
3644 base->dev->dma_parms = &base->dma_parms;
3645 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3646 if (ret) {
3647 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3648 goto destroy_cache;
3649 }
3650
3651 d40_hw_init(base);
3652
3653 if (np) {
3654 ret = of_dma_controller_register(np, d40_xlate, NULL);
3655 if (ret)
3656 dev_err(&pdev->dev,
3657 "could not register of_dma_controller\n");
3658 }
3659
3660 dev_info(base->dev, "initialized\n");
3661 return 0;
3662 destroy_cache:
3663 kmem_cache_destroy(base->desc_slab);
3664 if (base->virtbase)
3665 iounmap(base->virtbase);
3666
3667 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3668 iounmap(base->lcla_pool.base);
3669 base->lcla_pool.base = NULL;
3670 }
3671
3672 if (base->lcla_pool.dma_addr)
3673 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3674 SZ_1K * base->num_phy_chans,
3675 DMA_TO_DEVICE);
3676
3677 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3678 free_pages((unsigned long)base->lcla_pool.base,
3679 base->lcla_pool.pages);
3680
3681 kfree(base->lcla_pool.base_unaligned);
3682
3683 if (base->phy_lcpa)
3684 release_mem_region(base->phy_lcpa,
3685 base->lcpa_size);
3686 if (base->phy_start)
3687 release_mem_region(base->phy_start,
3688 base->phy_size);
3689 if (base->clk) {
3690 clk_disable_unprepare(base->clk);
3691 clk_put(base->clk);
3692 }
3693
3694 if (base->lcpa_regulator) {
3695 regulator_disable(base->lcpa_regulator);
3696 regulator_put(base->lcpa_regulator);
3697 }
3698
3699 kfree(base->lcla_pool.alloc_map);
3700 kfree(base->lookup_log_chans);
3701 kfree(base->lookup_phy_chans);
3702 kfree(base->phy_res);
3703 kfree(base);
3704 report_failure:
3705 d40_err(&pdev->dev, "probe failed\n");
3706 return ret;
3707}
3708
3709static const struct of_device_id d40_match[] = {
3710 { .compatible = "stericsson,dma40", },
3711 {}
3712};
3713
3714static struct platform_driver d40_driver = {
3715 .driver = {
3716 .name = D40_NAME,
3717 .pm = &dma40_pm_ops,
3718 .of_match_table = d40_match,
3719 },
3720};
3721
3722static int __init stedma40_init(void)
3723{
3724 return platform_driver_probe(&d40_driver, d40_probe);
3725}
3726subsys_initcall(stedma40_init);
3727