1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/log2.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/amba/bus.h>
24#include <linux/regulator/consumer.h>
25#include <linux/platform_data/dma-ste-dma40.h>
26
27#include "dmaengine.h"
28#include "ste_dma40_ll.h"
29
30#define D40_NAME "dma40"
31
32#define D40_PHY_CHAN -1
33
34
35#define D40_CHAN_POS(chan) (2 * (chan / 2))
36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39#define D40_SUSPEND_MAX_IT 500
40
41
42#define DMA40_AUTOSUSPEND_DELAY 100
43
44
45#define LCLA_ALIGNMENT 0x40000
46
47
48#define D40_LCLA_LINK_PER_EVENT_GRP 128
49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52#define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55#define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58#define D40_ALLOC_FREE BIT(31)
59#define D40_ALLOC_PHY BIT(30)
60#define D40_ALLOC_LOG_FREE 0
61
62#define D40_MEMCPY_MAX_CHANS 8
63
64
65#define DB8500_DMA_MEMCPY_EV_0 51
66#define DB8500_DMA_MEMCPY_EV_1 56
67#define DB8500_DMA_MEMCPY_EV_2 57
68#define DB8500_DMA_MEMCPY_EV_3 58
69#define DB8500_DMA_MEMCPY_EV_4 59
70#define DB8500_DMA_MEMCPY_EV_5 60
71
72static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79};
80
81
82static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93};
94
95
96static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107};
108
109
110
111
112
113
114
115
116
117enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122};
123
124
125
126
127
128
129
130
131
132
133enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138};
139
140
141
142
143
144
145static u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152};
153
154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185};
186
187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210};
211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214static u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223};
224
225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242};
243
244
245static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256};
257
258static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271};
272
273
274
275
276
277
278
279struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282};
283
284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301};
302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322};
323
324
325
326
327
328
329
330
331
332
333
334
335struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341};
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378};
379
380
381
382
383
384
385
386
387
388
389
390
391struct d40_lcla_pool {
392 void *base;
393 dma_addr_t dma_addr;
394 void *base_unaligned;
395 int pages;
396 spinlock_t lock;
397 struct d40_desc **alloc_map;
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct d40_phy_res {
415 spinlock_t lock;
416 bool reserved;
417 int num;
418 u32 allocated_src;
419 u32 allocated_dst;
420 bool use_soft_lli;
421};
422
423struct d40_base;
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct d40_chan {
457 spinlock_t lock;
458 int log_num;
459 int pending_tx;
460 bool busy;
461 struct d40_phy_res *phy_chan;
462 struct dma_chan chan;
463 struct tasklet_struct tasklet;
464 struct list_head client;
465 struct list_head pending_queue;
466 struct list_head active;
467 struct list_head done;
468 struct list_head queue;
469 struct list_head prepare_queue;
470 struct stedma40_chan_cfg dma_cfg;
471 bool configured;
472 struct d40_base *base;
473
474 u32 src_def_cfg;
475 u32 dst_def_cfg;
476 struct d40_def_lcsp log_def;
477 struct d40_log_lli_full *lcpa;
478
479 dma_addr_t runtime_addr;
480 enum dma_transfer_direction runtime_direction;
481};
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct d40_gen_dmac {
501 u32 *backup;
502 u32 backup_size;
503 u32 realtime_en;
504 u32 realtime_clear;
505 u32 high_prio_en;
506 u32 high_prio_clear;
507 u32 interrupt_en;
508 u32 interrupt_clear;
509 struct d40_interrupt_lookup *il;
510 u32 il_size;
511 struct d40_reg_val *init_reg;
512 u32 init_reg_size;
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563struct d40_base {
564 spinlock_t interrupt_lock;
565 spinlock_t execmd_lock;
566 struct device *dev;
567 void __iomem *virtbase;
568 u8 rev:4;
569 struct clk *clk;
570 phys_addr_t phy_start;
571 resource_size_t phy_size;
572 int irq;
573 int num_memcpy_chans;
574 int num_phy_chans;
575 int num_log_chans;
576 struct device_dma_parameters dma_parms;
577 struct dma_device dma_both;
578 struct dma_device dma_slave;
579 struct dma_device dma_memcpy;
580 struct d40_chan *phy_chans;
581 struct d40_chan *log_chans;
582 struct d40_chan **lookup_log_chans;
583 struct d40_chan **lookup_phy_chans;
584 struct stedma40_platform_data *plat_data;
585 struct regulator *lcpa_regulator;
586
587 struct d40_phy_res *phy_res;
588 struct d40_lcla_pool lcla_pool;
589 void *lcpa_base;
590 dma_addr_t phy_lcpa;
591 resource_size_t lcpa_size;
592 struct kmem_cache *desc_slab;
593 u32 reg_val_backup[BACKUP_REGS_SZ];
594 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
595 u32 *reg_val_backup_chan;
596 u16 gcc_pwr_off_mask;
597 bool initialized;
598 struct d40_gen_dmac gen_dmac;
599};
600
601static struct device *chan2dev(struct d40_chan *d40c)
602{
603 return &d40c->chan.dev->device;
604}
605
606static bool chan_is_physical(struct d40_chan *chan)
607{
608 return chan->log_num == D40_PHY_CHAN;
609}
610
611static bool chan_is_logical(struct d40_chan *chan)
612{
613 return !chan_is_physical(chan);
614}
615
616static void __iomem *chan_base(struct d40_chan *chan)
617{
618 return chan->base->virtbase + D40_DREG_PCBASE +
619 chan->phy_chan->num * D40_DREG_PCDELTA;
620}
621
622#define d40_err(dev, format, arg...) \
623 dev_err(dev, "[%s] " format, __func__, ## arg)
624
625#define chan_err(d40c, format, arg...) \
626 d40_err(chan2dev(d40c), format, ## arg)
627
628static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
629 int lli_len)
630{
631 bool is_log = chan_is_logical(d40c);
632 u32 align;
633 void *base;
634
635 if (is_log)
636 align = sizeof(struct d40_log_lli);
637 else
638 align = sizeof(struct d40_phy_lli);
639
640 if (lli_len == 1) {
641 base = d40d->lli_pool.pre_alloc_lli;
642 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
643 d40d->lli_pool.base = NULL;
644 } else {
645 d40d->lli_pool.size = lli_len * 2 * align;
646
647 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
648 d40d->lli_pool.base = base;
649
650 if (d40d->lli_pool.base == NULL)
651 return -ENOMEM;
652 }
653
654 if (is_log) {
655 d40d->lli_log.src = PTR_ALIGN(base, align);
656 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
657
658 d40d->lli_pool.dma_addr = 0;
659 } else {
660 d40d->lli_phy.src = PTR_ALIGN(base, align);
661 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
662
663 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
664 d40d->lli_phy.src,
665 d40d->lli_pool.size,
666 DMA_TO_DEVICE);
667
668 if (dma_mapping_error(d40c->base->dev,
669 d40d->lli_pool.dma_addr)) {
670 kfree(d40d->lli_pool.base);
671 d40d->lli_pool.base = NULL;
672 d40d->lli_pool.dma_addr = 0;
673 return -ENOMEM;
674 }
675 }
676
677 return 0;
678}
679
680static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
681{
682 if (d40d->lli_pool.dma_addr)
683 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
684 d40d->lli_pool.size, DMA_TO_DEVICE);
685
686 kfree(d40d->lli_pool.base);
687 d40d->lli_pool.base = NULL;
688 d40d->lli_pool.size = 0;
689 d40d->lli_log.src = NULL;
690 d40d->lli_log.dst = NULL;
691 d40d->lli_phy.src = NULL;
692 d40d->lli_phy.dst = NULL;
693}
694
695static int d40_lcla_alloc_one(struct d40_chan *d40c,
696 struct d40_desc *d40d)
697{
698 unsigned long flags;
699 int i;
700 int ret = -EINVAL;
701
702 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
703
704
705
706
707
708 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
709 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
710
711 if (!d40c->base->lcla_pool.alloc_map[idx]) {
712 d40c->base->lcla_pool.alloc_map[idx] = d40d;
713 d40d->lcla_alloc++;
714 ret = i;
715 break;
716 }
717 }
718
719 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
720
721 return ret;
722}
723
724static int d40_lcla_free_all(struct d40_chan *d40c,
725 struct d40_desc *d40d)
726{
727 unsigned long flags;
728 int i;
729 int ret = -EINVAL;
730
731 if (chan_is_physical(d40c))
732 return 0;
733
734 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
735
736 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
737 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
738
739 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
740 d40c->base->lcla_pool.alloc_map[idx] = NULL;
741 d40d->lcla_alloc--;
742 if (d40d->lcla_alloc == 0) {
743 ret = 0;
744 break;
745 }
746 }
747 }
748
749 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
750
751 return ret;
752
753}
754
755static void d40_desc_remove(struct d40_desc *d40d)
756{
757 list_del(&d40d->node);
758}
759
760static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
761{
762 struct d40_desc *desc = NULL;
763
764 if (!list_empty(&d40c->client)) {
765 struct d40_desc *d;
766 struct d40_desc *_d;
767
768 list_for_each_entry_safe(d, _d, &d40c->client, node) {
769 if (async_tx_test_ack(&d->txd)) {
770 d40_desc_remove(d);
771 desc = d;
772 memset(desc, 0, sizeof(*desc));
773 break;
774 }
775 }
776 }
777
778 if (!desc)
779 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
780
781 if (desc)
782 INIT_LIST_HEAD(&desc->node);
783
784 return desc;
785}
786
787static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
788{
789
790 d40_pool_lli_free(d40c, d40d);
791 d40_lcla_free_all(d40c, d40d);
792 kmem_cache_free(d40c->base->desc_slab, d40d);
793}
794
795static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
796{
797 list_add_tail(&desc->node, &d40c->active);
798}
799
800static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
801{
802 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
803 struct d40_phy_lli *lli_src = desc->lli_phy.src;
804 void __iomem *base = chan_base(chan);
805
806 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
807 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
808 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
809 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
810
811 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
812 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
813 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
814 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
815}
816
817static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
818{
819 list_add_tail(&desc->node, &d40c->done);
820}
821
822static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
823{
824 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
825 struct d40_log_lli_bidir *lli = &desc->lli_log;
826 int lli_current = desc->lli_current;
827 int lli_len = desc->lli_len;
828 bool cyclic = desc->cyclic;
829 int curr_lcla = -EINVAL;
830 int first_lcla = 0;
831 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
832 bool linkback;
833
834
835
836
837
838 linkback = cyclic && lli_current == 0;
839
840
841
842
843
844 if (linkback || (lli_len - lli_current > 1)) {
845
846
847
848
849
850
851 if (!(chan->phy_chan->use_soft_lli &&
852 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
853 curr_lcla = d40_lcla_alloc_one(chan, desc);
854
855 first_lcla = curr_lcla;
856 }
857
858
859
860
861
862
863
864 if (!linkback || curr_lcla == -EINVAL) {
865 unsigned int flags = 0;
866
867 if (curr_lcla == -EINVAL)
868 flags |= LLI_TERM_INT;
869
870 d40_log_lli_lcpa_write(chan->lcpa,
871 &lli->dst[lli_current],
872 &lli->src[lli_current],
873 curr_lcla,
874 flags);
875 lli_current++;
876 }
877
878 if (curr_lcla < 0)
879 goto out;
880
881 for (; lli_current < lli_len; lli_current++) {
882 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
883 8 * curr_lcla * 2;
884 struct d40_log_lli *lcla = pool->base + lcla_offset;
885 unsigned int flags = 0;
886 int next_lcla;
887
888 if (lli_current + 1 < lli_len)
889 next_lcla = d40_lcla_alloc_one(chan, desc);
890 else
891 next_lcla = linkback ? first_lcla : -EINVAL;
892
893 if (cyclic || next_lcla == -EINVAL)
894 flags |= LLI_TERM_INT;
895
896 if (linkback && curr_lcla == first_lcla) {
897
898 d40_log_lli_lcpa_write(chan->lcpa,
899 &lli->dst[lli_current],
900 &lli->src[lli_current],
901 next_lcla, flags);
902 }
903
904
905
906
907
908 d40_log_lli_lcla_write(lcla,
909 &lli->dst[lli_current],
910 &lli->src[lli_current],
911 next_lcla, flags);
912
913
914
915
916
917 if (!use_esram_lcla) {
918 dma_sync_single_range_for_device(chan->base->dev,
919 pool->dma_addr, lcla_offset,
920 2 * sizeof(struct d40_log_lli),
921 DMA_TO_DEVICE);
922 }
923 curr_lcla = next_lcla;
924
925 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
926 lli_current++;
927 break;
928 }
929 }
930
931out:
932 desc->lli_current = lli_current;
933}
934
935static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
936{
937 if (chan_is_physical(d40c)) {
938 d40_phy_lli_load(d40c, d40d);
939 d40d->lli_current = d40d->lli_len;
940 } else
941 d40_log_lli_to_lcxa(d40c, d40d);
942}
943
944static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
945{
946 struct d40_desc *d;
947
948 if (list_empty(&d40c->active))
949 return NULL;
950
951 d = list_first_entry(&d40c->active,
952 struct d40_desc,
953 node);
954 return d;
955}
956
957
958static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
959{
960 d40_desc_remove(desc);
961 desc->is_in_client_list = false;
962 list_add_tail(&desc->node, &d40c->pending_queue);
963}
964
965static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
966{
967 struct d40_desc *d;
968
969 if (list_empty(&d40c->pending_queue))
970 return NULL;
971
972 d = list_first_entry(&d40c->pending_queue,
973 struct d40_desc,
974 node);
975 return d;
976}
977
978static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
979{
980 struct d40_desc *d;
981
982 if (list_empty(&d40c->queue))
983 return NULL;
984
985 d = list_first_entry(&d40c->queue,
986 struct d40_desc,
987 node);
988 return d;
989}
990
991static struct d40_desc *d40_first_done(struct d40_chan *d40c)
992{
993 if (list_empty(&d40c->done))
994 return NULL;
995
996 return list_first_entry(&d40c->done, struct d40_desc, node);
997}
998
999static int d40_psize_2_burst_size(bool is_log, int psize)
1000{
1001 if (is_log) {
1002 if (psize == STEDMA40_PSIZE_LOG_1)
1003 return 1;
1004 } else {
1005 if (psize == STEDMA40_PSIZE_PHY_1)
1006 return 1;
1007 }
1008
1009 return 2 << psize;
1010}
1011
1012
1013
1014
1015
1016
1017
1018static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1019{
1020 int dmalen;
1021 u32 max_w = max(data_width1, data_width2);
1022 u32 min_w = min(data_width1, data_width2);
1023 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1024
1025 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1026 seg_max -= max_w;
1027
1028 if (!IS_ALIGNED(size, max_w))
1029 return -EINVAL;
1030
1031 if (size <= seg_max)
1032 dmalen = 1;
1033 else {
1034 dmalen = size / seg_max;
1035 if (dmalen * seg_max < size)
1036 dmalen++;
1037 }
1038 return dmalen;
1039}
1040
1041static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1042 u32 data_width1, u32 data_width2)
1043{
1044 struct scatterlist *sg;
1045 int i;
1046 int len = 0;
1047 int ret;
1048
1049 for_each_sg(sgl, sg, sg_len, i) {
1050 ret = d40_size_2_dmalen(sg_dma_len(sg),
1051 data_width1, data_width2);
1052 if (ret < 0)
1053 return ret;
1054 len += ret;
1055 }
1056 return len;
1057}
1058
1059
1060#ifdef CONFIG_PM
1061static void dma40_backup(void __iomem *baseaddr, u32 *backup,
1062 u32 *regaddr, int num, bool save)
1063{
1064 int i;
1065
1066 for (i = 0; i < num; i++) {
1067 void __iomem *addr = baseaddr + regaddr[i];
1068
1069 if (save)
1070 backup[i] = readl_relaxed(addr);
1071 else
1072 writel_relaxed(backup[i], addr);
1073 }
1074}
1075
1076static void d40_save_restore_registers(struct d40_base *base, bool save)
1077{
1078 int i;
1079
1080
1081 for (i = 0; i < base->num_phy_chans; i++) {
1082 void __iomem *addr;
1083 int idx;
1084
1085 if (base->phy_res[i].reserved)
1086 continue;
1087
1088 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
1089 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
1090
1091 dma40_backup(addr, &base->reg_val_backup_chan[idx],
1092 d40_backup_regs_chan,
1093 ARRAY_SIZE(d40_backup_regs_chan),
1094 save);
1095 }
1096
1097
1098 dma40_backup(base->virtbase, base->reg_val_backup,
1099 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
1100 save);
1101
1102
1103 if (base->gen_dmac.backup)
1104 dma40_backup(base->virtbase, base->reg_val_backup_v4,
1105 base->gen_dmac.backup,
1106 base->gen_dmac.backup_size,
1107 save);
1108}
1109#else
1110static void d40_save_restore_registers(struct d40_base *base, bool save)
1111{
1112}
1113#endif
1114
1115static int __d40_execute_command_phy(struct d40_chan *d40c,
1116 enum d40_command command)
1117{
1118 u32 status;
1119 int i;
1120 void __iomem *active_reg;
1121 int ret = 0;
1122 unsigned long flags;
1123 u32 wmask;
1124
1125 if (command == D40_DMA_STOP) {
1126 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1127 if (ret)
1128 return ret;
1129 }
1130
1131 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1132
1133 if (d40c->phy_chan->num % 2 == 0)
1134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1135 else
1136 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1137
1138 if (command == D40_DMA_SUSPEND_REQ) {
1139 status = (readl(active_reg) &
1140 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1141 D40_CHAN_POS(d40c->phy_chan->num);
1142
1143 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1144 goto done;
1145 }
1146
1147 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1148 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1149 active_reg);
1150
1151 if (command == D40_DMA_SUSPEND_REQ) {
1152
1153 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1154 status = (readl(active_reg) &
1155 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1156 D40_CHAN_POS(d40c->phy_chan->num);
1157
1158 cpu_relax();
1159
1160
1161
1162
1163 udelay(3);
1164
1165 if (status == D40_DMA_STOP ||
1166 status == D40_DMA_SUSPENDED)
1167 break;
1168 }
1169
1170 if (i == D40_SUSPEND_MAX_IT) {
1171 chan_err(d40c,
1172 "unable to suspend the chl %d (log: %d) status %x\n",
1173 d40c->phy_chan->num, d40c->log_num,
1174 status);
1175 dump_stack();
1176 ret = -EBUSY;
1177 }
1178
1179 }
1180done:
1181 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1182 return ret;
1183}
1184
1185static void d40_term_all(struct d40_chan *d40c)
1186{
1187 struct d40_desc *d40d;
1188 struct d40_desc *_d;
1189
1190
1191 while ((d40d = d40_first_done(d40c))) {
1192 d40_desc_remove(d40d);
1193 d40_desc_free(d40c, d40d);
1194 }
1195
1196
1197 while ((d40d = d40_first_active_get(d40c))) {
1198 d40_desc_remove(d40d);
1199 d40_desc_free(d40c, d40d);
1200 }
1201
1202
1203 while ((d40d = d40_first_queued(d40c))) {
1204 d40_desc_remove(d40d);
1205 d40_desc_free(d40c, d40d);
1206 }
1207
1208
1209 while ((d40d = d40_first_pending(d40c))) {
1210 d40_desc_remove(d40d);
1211 d40_desc_free(d40c, d40d);
1212 }
1213
1214
1215 if (!list_empty(&d40c->client))
1216 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1217 d40_desc_remove(d40d);
1218 d40_desc_free(d40c, d40d);
1219 }
1220
1221
1222 if (!list_empty(&d40c->prepare_queue))
1223 list_for_each_entry_safe(d40d, _d,
1224 &d40c->prepare_queue, node) {
1225 d40_desc_remove(d40d);
1226 d40_desc_free(d40c, d40d);
1227 }
1228
1229 d40c->pending_tx = 0;
1230}
1231
1232static void __d40_config_set_event(struct d40_chan *d40c,
1233 enum d40_events event_type, u32 event,
1234 int reg)
1235{
1236 void __iomem *addr = chan_base(d40c) + reg;
1237 int tries;
1238 u32 status;
1239
1240 switch (event_type) {
1241
1242 case D40_DEACTIVATE_EVENTLINE:
1243
1244 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1245 | ~D40_EVENTLINE_MASK(event), addr);
1246 break;
1247
1248 case D40_SUSPEND_REQ_EVENTLINE:
1249 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1250 D40_EVENTLINE_POS(event);
1251
1252 if (status == D40_DEACTIVATE_EVENTLINE ||
1253 status == D40_SUSPEND_REQ_EVENTLINE)
1254 break;
1255
1256 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1257 | ~D40_EVENTLINE_MASK(event), addr);
1258
1259 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1260
1261 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1262 D40_EVENTLINE_POS(event);
1263
1264 cpu_relax();
1265
1266
1267
1268
1269 udelay(3);
1270
1271 if (status == D40_DEACTIVATE_EVENTLINE)
1272 break;
1273 }
1274
1275 if (tries == D40_SUSPEND_MAX_IT) {
1276 chan_err(d40c,
1277 "unable to stop the event_line chl %d (log: %d)"
1278 "status %x\n", d40c->phy_chan->num,
1279 d40c->log_num, status);
1280 }
1281 break;
1282
1283 case D40_ACTIVATE_EVENTLINE:
1284
1285
1286
1287
1288
1289 tries = 100;
1290 while (--tries) {
1291 writel((D40_ACTIVATE_EVENTLINE <<
1292 D40_EVENTLINE_POS(event)) |
1293 ~D40_EVENTLINE_MASK(event), addr);
1294
1295 if (readl(addr) & D40_EVENTLINE_MASK(event))
1296 break;
1297 }
1298
1299 if (tries != 99)
1300 dev_dbg(chan2dev(d40c),
1301 "[%s] workaround enable S%cLNK (%d tries)\n",
1302 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1303 100 - tries);
1304
1305 WARN_ON(!tries);
1306 break;
1307
1308 case D40_ROUND_EVENTLINE:
1309 BUG();
1310 break;
1311
1312 }
1313}
1314
1315static void d40_config_set_event(struct d40_chan *d40c,
1316 enum d40_events event_type)
1317{
1318 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1319
1320
1321 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1322 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1323 __d40_config_set_event(d40c, event_type, event,
1324 D40_CHAN_REG_SSLNK);
1325
1326 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1327 __d40_config_set_event(d40c, event_type, event,
1328 D40_CHAN_REG_SDLNK);
1329}
1330
1331static u32 d40_chan_has_events(struct d40_chan *d40c)
1332{
1333 void __iomem *chanbase = chan_base(d40c);
1334 u32 val;
1335
1336 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1337 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1338
1339 return val;
1340}
1341
1342static int
1343__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1344{
1345 unsigned long flags;
1346 int ret = 0;
1347 u32 active_status;
1348 void __iomem *active_reg;
1349
1350 if (d40c->phy_chan->num % 2 == 0)
1351 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1352 else
1353 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1354
1355
1356 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1357
1358 switch (command) {
1359 case D40_DMA_STOP:
1360 case D40_DMA_SUSPEND_REQ:
1361
1362 active_status = (readl(active_reg) &
1363 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1364 D40_CHAN_POS(d40c->phy_chan->num);
1365
1366 if (active_status == D40_DMA_RUN)
1367 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1368 else
1369 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1370
1371 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1372 ret = __d40_execute_command_phy(d40c, command);
1373
1374 break;
1375
1376 case D40_DMA_RUN:
1377
1378 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1379 ret = __d40_execute_command_phy(d40c, command);
1380 break;
1381
1382 case D40_DMA_SUSPENDED:
1383 BUG();
1384 break;
1385 }
1386
1387 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1388 return ret;
1389}
1390
1391static int d40_channel_execute_command(struct d40_chan *d40c,
1392 enum d40_command command)
1393{
1394 if (chan_is_logical(d40c))
1395 return __d40_execute_command_log(d40c, command);
1396 else
1397 return __d40_execute_command_phy(d40c, command);
1398}
1399
1400static u32 d40_get_prmo(struct d40_chan *d40c)
1401{
1402 static const unsigned int phy_map[] = {
1403 [STEDMA40_PCHAN_BASIC_MODE]
1404 = D40_DREG_PRMO_PCHAN_BASIC,
1405 [STEDMA40_PCHAN_MODULO_MODE]
1406 = D40_DREG_PRMO_PCHAN_MODULO,
1407 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1408 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1409 };
1410 static const unsigned int log_map[] = {
1411 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1412 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1413 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1414 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1415 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1416 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1417 };
1418
1419 if (chan_is_physical(d40c))
1420 return phy_map[d40c->dma_cfg.mode_opt];
1421 else
1422 return log_map[d40c->dma_cfg.mode_opt];
1423}
1424
1425static void d40_config_write(struct d40_chan *d40c)
1426{
1427 u32 addr_base;
1428 u32 var;
1429
1430
1431 addr_base = (d40c->phy_chan->num % 2) * 4;
1432
1433 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1434 D40_CHAN_POS(d40c->phy_chan->num);
1435 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1436
1437
1438 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1439
1440 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1441
1442 if (chan_is_logical(d40c)) {
1443 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1444 & D40_SREG_ELEM_LOG_LIDX_MASK;
1445 void __iomem *chanbase = chan_base(d40c);
1446
1447
1448 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1449 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1450
1451
1452 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1453 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1454
1455
1456 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1457 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1458 }
1459}
1460
1461static u32 d40_residue(struct d40_chan *d40c)
1462{
1463 u32 num_elt;
1464
1465 if (chan_is_logical(d40c))
1466 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1467 >> D40_MEM_LCSP2_ECNT_POS;
1468 else {
1469 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1470 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1471 >> D40_SREG_ELEM_PHY_ECNT_POS;
1472 }
1473
1474 return num_elt * d40c->dma_cfg.dst_info.data_width;
1475}
1476
1477static bool d40_tx_is_linked(struct d40_chan *d40c)
1478{
1479 bool is_link;
1480
1481 if (chan_is_logical(d40c))
1482 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1483 else
1484 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1485 & D40_SREG_LNK_PHYS_LNK_MASK;
1486
1487 return is_link;
1488}
1489
1490static int d40_pause(struct d40_chan *d40c)
1491{
1492 int res = 0;
1493 unsigned long flags;
1494
1495 if (!d40c->busy)
1496 return 0;
1497
1498 pm_runtime_get_sync(d40c->base->dev);
1499 spin_lock_irqsave(&d40c->lock, flags);
1500
1501 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1502
1503 pm_runtime_mark_last_busy(d40c->base->dev);
1504 pm_runtime_put_autosuspend(d40c->base->dev);
1505 spin_unlock_irqrestore(&d40c->lock, flags);
1506 return res;
1507}
1508
1509static int d40_resume(struct d40_chan *d40c)
1510{
1511 int res = 0;
1512 unsigned long flags;
1513
1514 if (!d40c->busy)
1515 return 0;
1516
1517 spin_lock_irqsave(&d40c->lock, flags);
1518 pm_runtime_get_sync(d40c->base->dev);
1519
1520
1521 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1522 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1523
1524 pm_runtime_mark_last_busy(d40c->base->dev);
1525 pm_runtime_put_autosuspend(d40c->base->dev);
1526 spin_unlock_irqrestore(&d40c->lock, flags);
1527 return res;
1528}
1529
1530static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1531{
1532 struct d40_chan *d40c = container_of(tx->chan,
1533 struct d40_chan,
1534 chan);
1535 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1536 unsigned long flags;
1537 dma_cookie_t cookie;
1538
1539 spin_lock_irqsave(&d40c->lock, flags);
1540 cookie = dma_cookie_assign(tx);
1541 d40_desc_queue(d40c, d40d);
1542 spin_unlock_irqrestore(&d40c->lock, flags);
1543
1544 return cookie;
1545}
1546
1547static int d40_start(struct d40_chan *d40c)
1548{
1549 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1550}
1551
1552static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1553{
1554 struct d40_desc *d40d;
1555 int err;
1556
1557
1558 d40d = d40_first_queued(d40c);
1559
1560 if (d40d != NULL) {
1561 if (!d40c->busy) {
1562 d40c->busy = true;
1563 pm_runtime_get_sync(d40c->base->dev);
1564 }
1565
1566
1567 d40_desc_remove(d40d);
1568
1569
1570 d40_desc_submit(d40c, d40d);
1571
1572
1573 d40_desc_load(d40c, d40d);
1574
1575
1576 err = d40_start(d40c);
1577
1578 if (err)
1579 return NULL;
1580 }
1581
1582 return d40d;
1583}
1584
1585
1586static void dma_tc_handle(struct d40_chan *d40c)
1587{
1588 struct d40_desc *d40d;
1589
1590
1591 d40d = d40_first_active_get(d40c);
1592
1593 if (d40d == NULL)
1594 return;
1595
1596 if (d40d->cyclic) {
1597
1598
1599
1600
1601
1602
1603 if (d40d->lli_current < d40d->lli_len
1604 && !d40_tx_is_linked(d40c)
1605 && !d40_residue(d40c)) {
1606 d40_lcla_free_all(d40c, d40d);
1607 d40_desc_load(d40c, d40d);
1608 (void) d40_start(d40c);
1609
1610 if (d40d->lli_current == d40d->lli_len)
1611 d40d->lli_current = 0;
1612 }
1613 } else {
1614 d40_lcla_free_all(d40c, d40d);
1615
1616 if (d40d->lli_current < d40d->lli_len) {
1617 d40_desc_load(d40c, d40d);
1618
1619 (void) d40_start(d40c);
1620 return;
1621 }
1622
1623 if (d40_queue_start(d40c) == NULL) {
1624 d40c->busy = false;
1625
1626 pm_runtime_mark_last_busy(d40c->base->dev);
1627 pm_runtime_put_autosuspend(d40c->base->dev);
1628 }
1629
1630 d40_desc_remove(d40d);
1631 d40_desc_done(d40c, d40d);
1632 }
1633
1634 d40c->pending_tx++;
1635 tasklet_schedule(&d40c->tasklet);
1636
1637}
1638
1639static void dma_tasklet(unsigned long data)
1640{
1641 struct d40_chan *d40c = (struct d40_chan *) data;
1642 struct d40_desc *d40d;
1643 unsigned long flags;
1644 dma_async_tx_callback callback;
1645 void *callback_param;
1646
1647 spin_lock_irqsave(&d40c->lock, flags);
1648
1649
1650 d40d = d40_first_done(d40c);
1651 if (d40d == NULL) {
1652
1653 d40d = d40_first_active_get(d40c);
1654 if (d40d == NULL || !d40d->cyclic)
1655 goto err;
1656 }
1657
1658 if (!d40d->cyclic)
1659 dma_cookie_complete(&d40d->txd);
1660
1661
1662
1663
1664
1665 if (d40c->pending_tx == 0) {
1666 spin_unlock_irqrestore(&d40c->lock, flags);
1667 return;
1668 }
1669
1670
1671 callback = d40d->txd.callback;
1672 callback_param = d40d->txd.callback_param;
1673
1674 if (!d40d->cyclic) {
1675 if (async_tx_test_ack(&d40d->txd)) {
1676 d40_desc_remove(d40d);
1677 d40_desc_free(d40c, d40d);
1678 } else if (!d40d->is_in_client_list) {
1679 d40_desc_remove(d40d);
1680 d40_lcla_free_all(d40c, d40d);
1681 list_add_tail(&d40d->node, &d40c->client);
1682 d40d->is_in_client_list = true;
1683 }
1684 }
1685
1686 d40c->pending_tx--;
1687
1688 if (d40c->pending_tx)
1689 tasklet_schedule(&d40c->tasklet);
1690
1691 spin_unlock_irqrestore(&d40c->lock, flags);
1692
1693 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1694 callback(callback_param);
1695
1696 return;
1697
1698err:
1699
1700 if (d40c->pending_tx > 0)
1701 d40c->pending_tx--;
1702 spin_unlock_irqrestore(&d40c->lock, flags);
1703}
1704
1705static irqreturn_t d40_handle_interrupt(int irq, void *data)
1706{
1707 int i;
1708 u32 idx;
1709 u32 row;
1710 long chan = -1;
1711 struct d40_chan *d40c;
1712 unsigned long flags;
1713 struct d40_base *base = data;
1714 u32 regs[base->gen_dmac.il_size];
1715 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1716 u32 il_size = base->gen_dmac.il_size;
1717
1718 spin_lock_irqsave(&base->interrupt_lock, flags);
1719
1720
1721 for (i = 0; i < il_size; i++)
1722 regs[i] = readl(base->virtbase + il[i].src);
1723
1724 for (;;) {
1725
1726 chan = find_next_bit((unsigned long *)regs,
1727 BITS_PER_LONG * il_size, chan + 1);
1728
1729
1730 if (chan == BITS_PER_LONG * il_size)
1731 break;
1732
1733 row = chan / BITS_PER_LONG;
1734 idx = chan & (BITS_PER_LONG - 1);
1735
1736 if (il[row].offset == D40_PHY_CHAN)
1737 d40c = base->lookup_phy_chans[idx];
1738 else
1739 d40c = base->lookup_log_chans[il[row].offset + idx];
1740
1741 if (!d40c) {
1742
1743
1744
1745
1746 continue;
1747 }
1748
1749
1750 writel(BIT(idx), base->virtbase + il[row].clr);
1751
1752 spin_lock(&d40c->lock);
1753
1754 if (!il[row].is_error)
1755 dma_tc_handle(d40c);
1756 else
1757 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1758 chan, il[row].offset, idx);
1759
1760 spin_unlock(&d40c->lock);
1761 }
1762
1763 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1764
1765 return IRQ_HANDLED;
1766}
1767
1768static int d40_validate_conf(struct d40_chan *d40c,
1769 struct stedma40_chan_cfg *conf)
1770{
1771 int res = 0;
1772 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1773
1774 if (!conf->dir) {
1775 chan_err(d40c, "Invalid direction.\n");
1776 res = -EINVAL;
1777 }
1778
1779 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1780 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1781 (conf->dev_type < 0)) {
1782 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1783 res = -EINVAL;
1784 }
1785
1786 if (conf->dir == DMA_DEV_TO_DEV) {
1787
1788
1789
1790
1791 chan_err(d40c, "periph to periph not supported\n");
1792 res = -EINVAL;
1793 }
1794
1795 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1796 conf->src_info.data_width !=
1797 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1798 conf->dst_info.data_width) {
1799
1800
1801
1802
1803
1804 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1805 res = -EINVAL;
1806 }
1807
1808 return res;
1809}
1810
1811static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1812 bool is_src, int log_event_line, bool is_log,
1813 bool *first_user)
1814{
1815 unsigned long flags;
1816 spin_lock_irqsave(&phy->lock, flags);
1817
1818 *first_user = ((phy->allocated_src | phy->allocated_dst)
1819 == D40_ALLOC_FREE);
1820
1821 if (!is_log) {
1822
1823 if (phy->allocated_src == D40_ALLOC_FREE &&
1824 phy->allocated_dst == D40_ALLOC_FREE) {
1825 phy->allocated_dst = D40_ALLOC_PHY;
1826 phy->allocated_src = D40_ALLOC_PHY;
1827 goto found;
1828 } else
1829 goto not_found;
1830 }
1831
1832
1833 if (is_src) {
1834 if (phy->allocated_src == D40_ALLOC_PHY)
1835 goto not_found;
1836
1837 if (phy->allocated_src == D40_ALLOC_FREE)
1838 phy->allocated_src = D40_ALLOC_LOG_FREE;
1839
1840 if (!(phy->allocated_src & BIT(log_event_line))) {
1841 phy->allocated_src |= BIT(log_event_line);
1842 goto found;
1843 } else
1844 goto not_found;
1845 } else {
1846 if (phy->allocated_dst == D40_ALLOC_PHY)
1847 goto not_found;
1848
1849 if (phy->allocated_dst == D40_ALLOC_FREE)
1850 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1851
1852 if (!(phy->allocated_dst & BIT(log_event_line))) {
1853 phy->allocated_dst |= BIT(log_event_line);
1854 goto found;
1855 } else
1856 goto not_found;
1857 }
1858
1859not_found:
1860 spin_unlock_irqrestore(&phy->lock, flags);
1861 return false;
1862found:
1863 spin_unlock_irqrestore(&phy->lock, flags);
1864 return true;
1865}
1866
1867static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1868 int log_event_line)
1869{
1870 unsigned long flags;
1871 bool is_free = false;
1872
1873 spin_lock_irqsave(&phy->lock, flags);
1874 if (!log_event_line) {
1875 phy->allocated_dst = D40_ALLOC_FREE;
1876 phy->allocated_src = D40_ALLOC_FREE;
1877 is_free = true;
1878 goto out;
1879 }
1880
1881
1882 if (is_src) {
1883 phy->allocated_src &= ~BIT(log_event_line);
1884 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1885 phy->allocated_src = D40_ALLOC_FREE;
1886 } else {
1887 phy->allocated_dst &= ~BIT(log_event_line);
1888 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1889 phy->allocated_dst = D40_ALLOC_FREE;
1890 }
1891
1892 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1893 D40_ALLOC_FREE);
1894
1895out:
1896 spin_unlock_irqrestore(&phy->lock, flags);
1897
1898 return is_free;
1899}
1900
1901static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1902{
1903 int dev_type = d40c->dma_cfg.dev_type;
1904 int event_group;
1905 int event_line;
1906 struct d40_phy_res *phys;
1907 int i;
1908 int j;
1909 int log_num;
1910 int num_phy_chans;
1911 bool is_src;
1912 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1913
1914 phys = d40c->base->phy_res;
1915 num_phy_chans = d40c->base->num_phy_chans;
1916
1917 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1918 log_num = 2 * dev_type;
1919 is_src = true;
1920 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1921 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1922
1923 log_num = 2 * dev_type + 1;
1924 is_src = false;
1925 } else
1926 return -EINVAL;
1927
1928 event_group = D40_TYPE_TO_GROUP(dev_type);
1929 event_line = D40_TYPE_TO_EVENT(dev_type);
1930
1931 if (!is_log) {
1932 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1933
1934 if (d40c->dma_cfg.use_fixed_channel) {
1935 i = d40c->dma_cfg.phy_channel;
1936 if (d40_alloc_mask_set(&phys[i], is_src,
1937 0, is_log,
1938 first_phy_user))
1939 goto found_phy;
1940 } else {
1941 for (i = 0; i < num_phy_chans; i++) {
1942 if (d40_alloc_mask_set(&phys[i], is_src,
1943 0, is_log,
1944 first_phy_user))
1945 goto found_phy;
1946 }
1947 }
1948 } else
1949 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1950 int phy_num = j + event_group * 2;
1951 for (i = phy_num; i < phy_num + 2; i++) {
1952 if (d40_alloc_mask_set(&phys[i],
1953 is_src,
1954 0,
1955 is_log,
1956 first_phy_user))
1957 goto found_phy;
1958 }
1959 }
1960 return -EINVAL;
1961found_phy:
1962 d40c->phy_chan = &phys[i];
1963 d40c->log_num = D40_PHY_CHAN;
1964 goto out;
1965 }
1966 if (dev_type == -1)
1967 return -EINVAL;
1968
1969
1970 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1971 int phy_num = j + event_group * 2;
1972
1973 if (d40c->dma_cfg.use_fixed_channel) {
1974 i = d40c->dma_cfg.phy_channel;
1975
1976 if ((i != phy_num) && (i != phy_num + 1)) {
1977 dev_err(chan2dev(d40c),
1978 "invalid fixed phy channel %d\n", i);
1979 return -EINVAL;
1980 }
1981
1982 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1983 is_log, first_phy_user))
1984 goto found_log;
1985
1986 dev_err(chan2dev(d40c),
1987 "could not allocate fixed phy channel %d\n", i);
1988 return -EINVAL;
1989 }
1990
1991
1992
1993
1994
1995
1996 if (is_src) {
1997 for (i = phy_num; i < phy_num + 2; i++) {
1998 if (d40_alloc_mask_set(&phys[i], is_src,
1999 event_line, is_log,
2000 first_phy_user))
2001 goto found_log;
2002 }
2003 } else {
2004 for (i = phy_num + 1; i >= phy_num; i--) {
2005 if (d40_alloc_mask_set(&phys[i], is_src,
2006 event_line, is_log,
2007 first_phy_user))
2008 goto found_log;
2009 }
2010 }
2011 }
2012 return -EINVAL;
2013
2014found_log:
2015 d40c->phy_chan = &phys[i];
2016 d40c->log_num = log_num;
2017out:
2018
2019 if (is_log)
2020 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
2021 else
2022 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2023
2024 return 0;
2025
2026}
2027
2028static int d40_config_memcpy(struct d40_chan *d40c)
2029{
2030 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2031
2032 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
2033 d40c->dma_cfg = dma40_memcpy_conf_log;
2034 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
2035
2036 d40_log_cfg(&d40c->dma_cfg,
2037 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2038
2039 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2040 dma_has_cap(DMA_SLAVE, cap)) {
2041 d40c->dma_cfg = dma40_memcpy_conf_phy;
2042
2043
2044 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2045
2046
2047 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2048 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2049
2050 } else {
2051 chan_err(d40c, "No memcpy\n");
2052 return -EINVAL;
2053 }
2054
2055 return 0;
2056}
2057
2058static int d40_free_dma(struct d40_chan *d40c)
2059{
2060
2061 int res = 0;
2062 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2063 struct d40_phy_res *phy = d40c->phy_chan;
2064 bool is_src;
2065
2066
2067 d40_term_all(d40c);
2068
2069 if (phy == NULL) {
2070 chan_err(d40c, "phy == null\n");
2071 return -EINVAL;
2072 }
2073
2074 if (phy->allocated_src == D40_ALLOC_FREE &&
2075 phy->allocated_dst == D40_ALLOC_FREE) {
2076 chan_err(d40c, "channel already free\n");
2077 return -EINVAL;
2078 }
2079
2080 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2081 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2082 is_src = false;
2083 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2084 is_src = true;
2085 else {
2086 chan_err(d40c, "Unknown direction\n");
2087 return -EINVAL;
2088 }
2089
2090 pm_runtime_get_sync(d40c->base->dev);
2091 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2092 if (res) {
2093 chan_err(d40c, "stop failed\n");
2094 goto out;
2095 }
2096
2097 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2098
2099 if (chan_is_logical(d40c))
2100 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2101 else
2102 d40c->base->lookup_phy_chans[phy->num] = NULL;
2103
2104 if (d40c->busy) {
2105 pm_runtime_mark_last_busy(d40c->base->dev);
2106 pm_runtime_put_autosuspend(d40c->base->dev);
2107 }
2108
2109 d40c->busy = false;
2110 d40c->phy_chan = NULL;
2111 d40c->configured = false;
2112out:
2113
2114 pm_runtime_mark_last_busy(d40c->base->dev);
2115 pm_runtime_put_autosuspend(d40c->base->dev);
2116 return res;
2117}
2118
2119static bool d40_is_paused(struct d40_chan *d40c)
2120{
2121 void __iomem *chanbase = chan_base(d40c);
2122 bool is_paused = false;
2123 unsigned long flags;
2124 void __iomem *active_reg;
2125 u32 status;
2126 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2127
2128 spin_lock_irqsave(&d40c->lock, flags);
2129
2130 if (chan_is_physical(d40c)) {
2131 if (d40c->phy_chan->num % 2 == 0)
2132 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2133 else
2134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2135
2136 status = (readl(active_reg) &
2137 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2138 D40_CHAN_POS(d40c->phy_chan->num);
2139 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2140 is_paused = true;
2141
2142 goto _exit;
2143 }
2144
2145 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2146 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2147 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2148 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2149 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2150 } else {
2151 chan_err(d40c, "Unknown direction\n");
2152 goto _exit;
2153 }
2154
2155 status = (status & D40_EVENTLINE_MASK(event)) >>
2156 D40_EVENTLINE_POS(event);
2157
2158 if (status != D40_DMA_RUN)
2159 is_paused = true;
2160_exit:
2161 spin_unlock_irqrestore(&d40c->lock, flags);
2162 return is_paused;
2163
2164}
2165
2166static u32 stedma40_residue(struct dma_chan *chan)
2167{
2168 struct d40_chan *d40c =
2169 container_of(chan, struct d40_chan, chan);
2170 u32 bytes_left;
2171 unsigned long flags;
2172
2173 spin_lock_irqsave(&d40c->lock, flags);
2174 bytes_left = d40_residue(d40c);
2175 spin_unlock_irqrestore(&d40c->lock, flags);
2176
2177 return bytes_left;
2178}
2179
2180static int
2181d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2182 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2183 unsigned int sg_len, dma_addr_t src_dev_addr,
2184 dma_addr_t dst_dev_addr)
2185{
2186 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2187 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2188 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2189 int ret;
2190
2191 ret = d40_log_sg_to_lli(sg_src, sg_len,
2192 src_dev_addr,
2193 desc->lli_log.src,
2194 chan->log_def.lcsp1,
2195 src_info->data_width,
2196 dst_info->data_width);
2197
2198 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2199 dst_dev_addr,
2200 desc->lli_log.dst,
2201 chan->log_def.lcsp3,
2202 dst_info->data_width,
2203 src_info->data_width);
2204
2205 return ret < 0 ? ret : 0;
2206}
2207
2208static int
2209d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2210 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2211 unsigned int sg_len, dma_addr_t src_dev_addr,
2212 dma_addr_t dst_dev_addr)
2213{
2214 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2215 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2216 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2217 unsigned long flags = 0;
2218 int ret;
2219
2220 if (desc->cyclic)
2221 flags |= LLI_CYCLIC | LLI_TERM_INT;
2222
2223 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2224 desc->lli_phy.src,
2225 virt_to_phys(desc->lli_phy.src),
2226 chan->src_def_cfg,
2227 src_info, dst_info, flags);
2228
2229 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2230 desc->lli_phy.dst,
2231 virt_to_phys(desc->lli_phy.dst),
2232 chan->dst_def_cfg,
2233 dst_info, src_info, flags);
2234
2235 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2236 desc->lli_pool.size, DMA_TO_DEVICE);
2237
2238 return ret < 0 ? ret : 0;
2239}
2240
2241static struct d40_desc *
2242d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2243 unsigned int sg_len, unsigned long dma_flags)
2244{
2245 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2246 struct d40_desc *desc;
2247 int ret;
2248
2249 desc = d40_desc_get(chan);
2250 if (!desc)
2251 return NULL;
2252
2253 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2254 cfg->dst_info.data_width);
2255 if (desc->lli_len < 0) {
2256 chan_err(chan, "Unaligned size\n");
2257 goto err;
2258 }
2259
2260 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2261 if (ret < 0) {
2262 chan_err(chan, "Could not allocate lli\n");
2263 goto err;
2264 }
2265
2266 desc->lli_current = 0;
2267 desc->txd.flags = dma_flags;
2268 desc->txd.tx_submit = d40_tx_submit;
2269
2270 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2271
2272 return desc;
2273
2274err:
2275 d40_desc_free(chan, desc);
2276 return NULL;
2277}
2278
2279static struct dma_async_tx_descriptor *
2280d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2281 struct scatterlist *sg_dst, unsigned int sg_len,
2282 enum dma_transfer_direction direction, unsigned long dma_flags)
2283{
2284 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2285 dma_addr_t src_dev_addr = 0;
2286 dma_addr_t dst_dev_addr = 0;
2287 struct d40_desc *desc;
2288 unsigned long flags;
2289 int ret;
2290
2291 if (!chan->phy_chan) {
2292 chan_err(chan, "Cannot prepare unallocated channel\n");
2293 return NULL;
2294 }
2295
2296 spin_lock_irqsave(&chan->lock, flags);
2297
2298 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2299 if (desc == NULL)
2300 goto err;
2301
2302 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2303 desc->cyclic = true;
2304
2305 if (direction == DMA_DEV_TO_MEM)
2306 src_dev_addr = chan->runtime_addr;
2307 else if (direction == DMA_MEM_TO_DEV)
2308 dst_dev_addr = chan->runtime_addr;
2309
2310 if (chan_is_logical(chan))
2311 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2312 sg_len, src_dev_addr, dst_dev_addr);
2313 else
2314 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2315 sg_len, src_dev_addr, dst_dev_addr);
2316
2317 if (ret) {
2318 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2319 chan_is_logical(chan) ? "log" : "phy", ret);
2320 goto err;
2321 }
2322
2323
2324
2325
2326
2327 list_add_tail(&desc->node, &chan->prepare_queue);
2328
2329 spin_unlock_irqrestore(&chan->lock, flags);
2330
2331 return &desc->txd;
2332
2333err:
2334 if (desc)
2335 d40_desc_free(chan, desc);
2336 spin_unlock_irqrestore(&chan->lock, flags);
2337 return NULL;
2338}
2339
2340bool stedma40_filter(struct dma_chan *chan, void *data)
2341{
2342 struct stedma40_chan_cfg *info = data;
2343 struct d40_chan *d40c =
2344 container_of(chan, struct d40_chan, chan);
2345 int err;
2346
2347 if (data) {
2348 err = d40_validate_conf(d40c, info);
2349 if (!err)
2350 d40c->dma_cfg = *info;
2351 } else
2352 err = d40_config_memcpy(d40c);
2353
2354 if (!err)
2355 d40c->configured = true;
2356
2357 return err == 0;
2358}
2359EXPORT_SYMBOL(stedma40_filter);
2360
2361static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2362{
2363 bool realtime = d40c->dma_cfg.realtime;
2364 bool highprio = d40c->dma_cfg.high_priority;
2365 u32 rtreg;
2366 u32 event = D40_TYPE_TO_EVENT(dev_type);
2367 u32 group = D40_TYPE_TO_GROUP(dev_type);
2368 u32 bit = BIT(event);
2369 u32 prioreg;
2370 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2371
2372 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2373
2374
2375
2376
2377
2378
2379
2380
2381 if (!src && chan_is_logical(d40c))
2382 highprio = false;
2383
2384 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2385
2386
2387 if (!src)
2388 bit <<= 16;
2389
2390 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2391 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2392}
2393
2394static void d40_set_prio_realtime(struct d40_chan *d40c)
2395{
2396 if (d40c->base->rev < 3)
2397 return;
2398
2399 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2400 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2401 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2402
2403 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2404 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2405 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2406}
2407
2408#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2409#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2410#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2411#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2412
2413static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2414 struct of_dma *ofdma)
2415{
2416 struct stedma40_chan_cfg cfg;
2417 dma_cap_mask_t cap;
2418 u32 flags;
2419
2420 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2421
2422 dma_cap_zero(cap);
2423 dma_cap_set(DMA_SLAVE, cap);
2424
2425 cfg.dev_type = dma_spec->args[0];
2426 flags = dma_spec->args[2];
2427
2428 switch (D40_DT_FLAGS_MODE(flags)) {
2429 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2430 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2431 }
2432
2433 switch (D40_DT_FLAGS_DIR(flags)) {
2434 case 0:
2435 cfg.dir = DMA_MEM_TO_DEV;
2436 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2437 break;
2438 case 1:
2439 cfg.dir = DMA_DEV_TO_MEM;
2440 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2441 break;
2442 }
2443
2444 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2445 cfg.phy_channel = dma_spec->args[1];
2446 cfg.use_fixed_channel = true;
2447 }
2448
2449 return dma_request_channel(cap, stedma40_filter, &cfg);
2450}
2451
2452
2453static int d40_alloc_chan_resources(struct dma_chan *chan)
2454{
2455 int err;
2456 unsigned long flags;
2457 struct d40_chan *d40c =
2458 container_of(chan, struct d40_chan, chan);
2459 bool is_free_phy;
2460 spin_lock_irqsave(&d40c->lock, flags);
2461
2462 dma_cookie_init(chan);
2463
2464
2465 if (!d40c->configured) {
2466 err = d40_config_memcpy(d40c);
2467 if (err) {
2468 chan_err(d40c, "Failed to configure memcpy channel\n");
2469 goto fail;
2470 }
2471 }
2472
2473 err = d40_allocate_channel(d40c, &is_free_phy);
2474 if (err) {
2475 chan_err(d40c, "Failed to allocate channel\n");
2476 d40c->configured = false;
2477 goto fail;
2478 }
2479
2480 pm_runtime_get_sync(d40c->base->dev);
2481
2482 d40_set_prio_realtime(d40c);
2483
2484 if (chan_is_logical(d40c)) {
2485 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2486 d40c->lcpa = d40c->base->lcpa_base +
2487 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2488 else
2489 d40c->lcpa = d40c->base->lcpa_base +
2490 d40c->dma_cfg.dev_type *
2491 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2492
2493
2494 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2495 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2496 }
2497
2498 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2499 chan_is_logical(d40c) ? "logical" : "physical",
2500 d40c->phy_chan->num,
2501 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2502
2503
2504
2505
2506
2507
2508
2509 if (is_free_phy)
2510 d40_config_write(d40c);
2511fail:
2512 pm_runtime_mark_last_busy(d40c->base->dev);
2513 pm_runtime_put_autosuspend(d40c->base->dev);
2514 spin_unlock_irqrestore(&d40c->lock, flags);
2515 return err;
2516}
2517
2518static void d40_free_chan_resources(struct dma_chan *chan)
2519{
2520 struct d40_chan *d40c =
2521 container_of(chan, struct d40_chan, chan);
2522 int err;
2523 unsigned long flags;
2524
2525 if (d40c->phy_chan == NULL) {
2526 chan_err(d40c, "Cannot free unallocated channel\n");
2527 return;
2528 }
2529
2530 spin_lock_irqsave(&d40c->lock, flags);
2531
2532 err = d40_free_dma(d40c);
2533
2534 if (err)
2535 chan_err(d40c, "Failed to free channel\n");
2536 spin_unlock_irqrestore(&d40c->lock, flags);
2537}
2538
2539static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2540 dma_addr_t dst,
2541 dma_addr_t src,
2542 size_t size,
2543 unsigned long dma_flags)
2544{
2545 struct scatterlist dst_sg;
2546 struct scatterlist src_sg;
2547
2548 sg_init_table(&dst_sg, 1);
2549 sg_init_table(&src_sg, 1);
2550
2551 sg_dma_address(&dst_sg) = dst;
2552 sg_dma_address(&src_sg) = src;
2553
2554 sg_dma_len(&dst_sg) = size;
2555 sg_dma_len(&src_sg) = size;
2556
2557 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2558}
2559
2560static struct dma_async_tx_descriptor *
2561d40_prep_memcpy_sg(struct dma_chan *chan,
2562 struct scatterlist *dst_sg, unsigned int dst_nents,
2563 struct scatterlist *src_sg, unsigned int src_nents,
2564 unsigned long dma_flags)
2565{
2566 if (dst_nents != src_nents)
2567 return NULL;
2568
2569 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2570}
2571
2572static struct dma_async_tx_descriptor *
2573d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2574 unsigned int sg_len, enum dma_transfer_direction direction,
2575 unsigned long dma_flags, void *context)
2576{
2577 if (!is_slave_direction(direction))
2578 return NULL;
2579
2580 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2581}
2582
2583static struct dma_async_tx_descriptor *
2584dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2585 size_t buf_len, size_t period_len,
2586 enum dma_transfer_direction direction, unsigned long flags,
2587 void *context)
2588{
2589 unsigned int periods = buf_len / period_len;
2590 struct dma_async_tx_descriptor *txd;
2591 struct scatterlist *sg;
2592 int i;
2593
2594 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2595 if (!sg)
2596 return NULL;
2597
2598 for (i = 0; i < periods; i++) {
2599 sg_dma_address(&sg[i]) = dma_addr;
2600 sg_dma_len(&sg[i]) = period_len;
2601 dma_addr += period_len;
2602 }
2603
2604 sg[periods].offset = 0;
2605 sg_dma_len(&sg[periods]) = 0;
2606 sg[periods].page_link =
2607 ((unsigned long)sg | 0x01) & ~0x02;
2608
2609 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2610 DMA_PREP_INTERRUPT);
2611
2612 kfree(sg);
2613
2614 return txd;
2615}
2616
2617static enum dma_status d40_tx_status(struct dma_chan *chan,
2618 dma_cookie_t cookie,
2619 struct dma_tx_state *txstate)
2620{
2621 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2622 enum dma_status ret;
2623
2624 if (d40c->phy_chan == NULL) {
2625 chan_err(d40c, "Cannot read status of unallocated channel\n");
2626 return -EINVAL;
2627 }
2628
2629 ret = dma_cookie_status(chan, cookie, txstate);
2630 if (ret != DMA_COMPLETE)
2631 dma_set_residue(txstate, stedma40_residue(chan));
2632
2633 if (d40_is_paused(d40c))
2634 ret = DMA_PAUSED;
2635
2636 return ret;
2637}
2638
2639static void d40_issue_pending(struct dma_chan *chan)
2640{
2641 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2642 unsigned long flags;
2643
2644 if (d40c->phy_chan == NULL) {
2645 chan_err(d40c, "Channel is not allocated!\n");
2646 return;
2647 }
2648
2649 spin_lock_irqsave(&d40c->lock, flags);
2650
2651 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2652
2653
2654 if (!d40c->busy)
2655 (void) d40_queue_start(d40c);
2656
2657 spin_unlock_irqrestore(&d40c->lock, flags);
2658}
2659
2660static void d40_terminate_all(struct dma_chan *chan)
2661{
2662 unsigned long flags;
2663 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2664 int ret;
2665
2666 spin_lock_irqsave(&d40c->lock, flags);
2667
2668 pm_runtime_get_sync(d40c->base->dev);
2669 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2670 if (ret)
2671 chan_err(d40c, "Failed to stop channel\n");
2672
2673 d40_term_all(d40c);
2674 pm_runtime_mark_last_busy(d40c->base->dev);
2675 pm_runtime_put_autosuspend(d40c->base->dev);
2676 if (d40c->busy) {
2677 pm_runtime_mark_last_busy(d40c->base->dev);
2678 pm_runtime_put_autosuspend(d40c->base->dev);
2679 }
2680 d40c->busy = false;
2681
2682 spin_unlock_irqrestore(&d40c->lock, flags);
2683}
2684
2685static int
2686dma40_config_to_halfchannel(struct d40_chan *d40c,
2687 struct stedma40_half_channel_info *info,
2688 u32 maxburst)
2689{
2690 int psize;
2691
2692 if (chan_is_logical(d40c)) {
2693 if (maxburst >= 16)
2694 psize = STEDMA40_PSIZE_LOG_16;
2695 else if (maxburst >= 8)
2696 psize = STEDMA40_PSIZE_LOG_8;
2697 else if (maxburst >= 4)
2698 psize = STEDMA40_PSIZE_LOG_4;
2699 else
2700 psize = STEDMA40_PSIZE_LOG_1;
2701 } else {
2702 if (maxburst >= 16)
2703 psize = STEDMA40_PSIZE_PHY_16;
2704 else if (maxburst >= 8)
2705 psize = STEDMA40_PSIZE_PHY_8;
2706 else if (maxburst >= 4)
2707 psize = STEDMA40_PSIZE_PHY_4;
2708 else
2709 psize = STEDMA40_PSIZE_PHY_1;
2710 }
2711
2712 info->psize = psize;
2713 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2714
2715 return 0;
2716}
2717
2718
2719static int d40_set_runtime_config(struct dma_chan *chan,
2720 struct dma_slave_config *config)
2721{
2722 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2723 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2724 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2725 dma_addr_t config_addr;
2726 u32 src_maxburst, dst_maxburst;
2727 int ret;
2728
2729 src_addr_width = config->src_addr_width;
2730 src_maxburst = config->src_maxburst;
2731 dst_addr_width = config->dst_addr_width;
2732 dst_maxburst = config->dst_maxburst;
2733
2734 if (config->direction == DMA_DEV_TO_MEM) {
2735 config_addr = config->src_addr;
2736
2737 if (cfg->dir != DMA_DEV_TO_MEM)
2738 dev_dbg(d40c->base->dev,
2739 "channel was not configured for peripheral "
2740 "to memory transfer (%d) overriding\n",
2741 cfg->dir);
2742 cfg->dir = DMA_DEV_TO_MEM;
2743
2744
2745 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2746 dst_addr_width = src_addr_width;
2747 if (dst_maxburst == 0)
2748 dst_maxburst = src_maxburst;
2749
2750 } else if (config->direction == DMA_MEM_TO_DEV) {
2751 config_addr = config->dst_addr;
2752
2753 if (cfg->dir != DMA_MEM_TO_DEV)
2754 dev_dbg(d40c->base->dev,
2755 "channel was not configured for memory "
2756 "to peripheral transfer (%d) overriding\n",
2757 cfg->dir);
2758 cfg->dir = DMA_MEM_TO_DEV;
2759
2760
2761 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2762 src_addr_width = dst_addr_width;
2763 if (src_maxburst == 0)
2764 src_maxburst = dst_maxburst;
2765 } else {
2766 dev_err(d40c->base->dev,
2767 "unrecognized channel direction %d\n",
2768 config->direction);
2769 return -EINVAL;
2770 }
2771
2772 if (config_addr <= 0) {
2773 dev_err(d40c->base->dev, "no address supplied\n");
2774 return -EINVAL;
2775 }
2776
2777 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2778 dev_err(d40c->base->dev,
2779 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2780 src_maxburst,
2781 src_addr_width,
2782 dst_maxburst,
2783 dst_addr_width);
2784 return -EINVAL;
2785 }
2786
2787 if (src_maxburst > 16) {
2788 src_maxburst = 16;
2789 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2790 } else if (dst_maxburst > 16) {
2791 dst_maxburst = 16;
2792 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2793 }
2794
2795
2796 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2797 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2798 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2799 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2800 !is_power_of_2(src_addr_width) ||
2801 !is_power_of_2(dst_addr_width))
2802 return -EINVAL;
2803
2804 cfg->src_info.data_width = src_addr_width;
2805 cfg->dst_info.data_width = dst_addr_width;
2806
2807 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2808 src_maxburst);
2809 if (ret)
2810 return ret;
2811
2812 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2813 dst_maxburst);
2814 if (ret)
2815 return ret;
2816
2817
2818 if (chan_is_logical(d40c))
2819 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2820 else
2821 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2822
2823
2824 d40c->runtime_addr = config_addr;
2825 d40c->runtime_direction = config->direction;
2826 dev_dbg(d40c->base->dev,
2827 "configured channel %s for %s, data width %d/%d, "
2828 "maxburst %d/%d elements, LE, no flow control\n",
2829 dma_chan_name(chan),
2830 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2831 src_addr_width, dst_addr_width,
2832 src_maxburst, dst_maxburst);
2833
2834 return 0;
2835}
2836
2837static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2838 unsigned long arg)
2839{
2840 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2841
2842 if (d40c->phy_chan == NULL) {
2843 chan_err(d40c, "Channel is not allocated!\n");
2844 return -EINVAL;
2845 }
2846
2847 switch (cmd) {
2848 case DMA_TERMINATE_ALL:
2849 d40_terminate_all(chan);
2850 return 0;
2851 case DMA_PAUSE:
2852 return d40_pause(d40c);
2853 case DMA_RESUME:
2854 return d40_resume(d40c);
2855 case DMA_SLAVE_CONFIG:
2856 return d40_set_runtime_config(chan,
2857 (struct dma_slave_config *) arg);
2858 default:
2859 break;
2860 }
2861
2862
2863 return -ENXIO;
2864}
2865
2866
2867
2868static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2869 struct d40_chan *chans, int offset,
2870 int num_chans)
2871{
2872 int i = 0;
2873 struct d40_chan *d40c;
2874
2875 INIT_LIST_HEAD(&dma->channels);
2876
2877 for (i = offset; i < offset + num_chans; i++) {
2878 d40c = &chans[i];
2879 d40c->base = base;
2880 d40c->chan.device = dma;
2881
2882 spin_lock_init(&d40c->lock);
2883
2884 d40c->log_num = D40_PHY_CHAN;
2885
2886 INIT_LIST_HEAD(&d40c->done);
2887 INIT_LIST_HEAD(&d40c->active);
2888 INIT_LIST_HEAD(&d40c->queue);
2889 INIT_LIST_HEAD(&d40c->pending_queue);
2890 INIT_LIST_HEAD(&d40c->client);
2891 INIT_LIST_HEAD(&d40c->prepare_queue);
2892
2893 tasklet_init(&d40c->tasklet, dma_tasklet,
2894 (unsigned long) d40c);
2895
2896 list_add_tail(&d40c->chan.device_node,
2897 &dma->channels);
2898 }
2899}
2900
2901static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2902{
2903 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2904 dev->device_prep_slave_sg = d40_prep_slave_sg;
2905
2906 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2907 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2908
2909
2910
2911
2912
2913 dev->copy_align = 2;
2914 }
2915
2916 if (dma_has_cap(DMA_SG, dev->cap_mask))
2917 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2918
2919 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2920 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2921
2922 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2923 dev->device_free_chan_resources = d40_free_chan_resources;
2924 dev->device_issue_pending = d40_issue_pending;
2925 dev->device_tx_status = d40_tx_status;
2926 dev->device_control = d40_control;
2927 dev->dev = base->dev;
2928}
2929
2930static int __init d40_dmaengine_init(struct d40_base *base,
2931 int num_reserved_chans)
2932{
2933 int err ;
2934
2935 d40_chan_init(base, &base->dma_slave, base->log_chans,
2936 0, base->num_log_chans);
2937
2938 dma_cap_zero(base->dma_slave.cap_mask);
2939 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2940 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2941
2942 d40_ops_init(base, &base->dma_slave);
2943
2944 err = dma_async_device_register(&base->dma_slave);
2945
2946 if (err) {
2947 d40_err(base->dev, "Failed to register slave channels\n");
2948 goto failure1;
2949 }
2950
2951 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2952 base->num_log_chans, base->num_memcpy_chans);
2953
2954 dma_cap_zero(base->dma_memcpy.cap_mask);
2955 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2956 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2957
2958 d40_ops_init(base, &base->dma_memcpy);
2959
2960 err = dma_async_device_register(&base->dma_memcpy);
2961
2962 if (err) {
2963 d40_err(base->dev,
2964 "Failed to regsiter memcpy only channels\n");
2965 goto failure2;
2966 }
2967
2968 d40_chan_init(base, &base->dma_both, base->phy_chans,
2969 0, num_reserved_chans);
2970
2971 dma_cap_zero(base->dma_both.cap_mask);
2972 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2973 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2974 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2975 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2976
2977 d40_ops_init(base, &base->dma_both);
2978 err = dma_async_device_register(&base->dma_both);
2979
2980 if (err) {
2981 d40_err(base->dev,
2982 "Failed to register logical and physical capable channels\n");
2983 goto failure3;
2984 }
2985 return 0;
2986failure3:
2987 dma_async_device_unregister(&base->dma_memcpy);
2988failure2:
2989 dma_async_device_unregister(&base->dma_slave);
2990failure1:
2991 return err;
2992}
2993
2994
2995#ifdef CONFIG_PM
2996static int dma40_pm_suspend(struct device *dev)
2997{
2998 struct platform_device *pdev = to_platform_device(dev);
2999 struct d40_base *base = platform_get_drvdata(pdev);
3000 int ret = 0;
3001
3002 if (base->lcpa_regulator)
3003 ret = regulator_disable(base->lcpa_regulator);
3004 return ret;
3005}
3006
3007static int dma40_runtime_suspend(struct device *dev)
3008{
3009 struct platform_device *pdev = to_platform_device(dev);
3010 struct d40_base *base = platform_get_drvdata(pdev);
3011
3012 d40_save_restore_registers(base, true);
3013
3014
3015 if (base->rev != 1)
3016 writel_relaxed(base->gcc_pwr_off_mask,
3017 base->virtbase + D40_DREG_GCC);
3018
3019 return 0;
3020}
3021
3022static int dma40_runtime_resume(struct device *dev)
3023{
3024 struct platform_device *pdev = to_platform_device(dev);
3025 struct d40_base *base = platform_get_drvdata(pdev);
3026
3027 if (base->initialized)
3028 d40_save_restore_registers(base, false);
3029
3030 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3031 base->virtbase + D40_DREG_GCC);
3032 return 0;
3033}
3034
3035static int dma40_resume(struct device *dev)
3036{
3037 struct platform_device *pdev = to_platform_device(dev);
3038 struct d40_base *base = platform_get_drvdata(pdev);
3039 int ret = 0;
3040
3041 if (base->lcpa_regulator)
3042 ret = regulator_enable(base->lcpa_regulator);
3043
3044 return ret;
3045}
3046
3047static const struct dev_pm_ops dma40_pm_ops = {
3048 .suspend = dma40_pm_suspend,
3049 .runtime_suspend = dma40_runtime_suspend,
3050 .runtime_resume = dma40_runtime_resume,
3051 .resume = dma40_resume,
3052};
3053#define DMA40_PM_OPS (&dma40_pm_ops)
3054#else
3055#define DMA40_PM_OPS NULL
3056#endif
3057
3058
3059
3060static int __init d40_phy_res_init(struct d40_base *base)
3061{
3062 int i;
3063 int num_phy_chans_avail = 0;
3064 u32 val[2];
3065 int odd_even_bit = -2;
3066 int gcc = D40_DREG_GCC_ENA;
3067
3068 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3069 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3070
3071 for (i = 0; i < base->num_phy_chans; i++) {
3072 base->phy_res[i].num = i;
3073 odd_even_bit += 2 * ((i % 2) == 0);
3074 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3075
3076 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3077 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3078 base->phy_res[i].reserved = true;
3079 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3080 D40_DREG_GCC_SRC);
3081 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3082 D40_DREG_GCC_DST);
3083
3084
3085 } else {
3086 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3087 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3088 base->phy_res[i].reserved = false;
3089 num_phy_chans_avail++;
3090 }
3091 spin_lock_init(&base->phy_res[i].lock);
3092 }
3093
3094
3095 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3096 int chan = base->plat_data->disabled_channels[i];
3097
3098 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3099 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3100 base->phy_res[chan].reserved = true;
3101 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3102 D40_DREG_GCC_SRC);
3103 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3104 D40_DREG_GCC_DST);
3105 num_phy_chans_avail--;
3106 }
3107
3108
3109 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3110 int chan = base->plat_data->soft_lli_chans[i];
3111
3112 base->phy_res[chan].use_soft_lli = true;
3113 }
3114
3115 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3116 num_phy_chans_avail, base->num_phy_chans);
3117
3118
3119 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3120
3121 for (i = 0; i < base->num_phy_chans; i++) {
3122
3123 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3124 (val[0] & 0x3) != 1)
3125 dev_info(base->dev,
3126 "[%s] INFO: channel %d is misconfigured (%d)\n",
3127 __func__, i, val[0] & 0x3);
3128
3129 val[0] = val[0] >> 2;
3130 }
3131
3132
3133
3134
3135
3136
3137
3138 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3139 base->gcc_pwr_off_mask = gcc;
3140
3141 return num_phy_chans_avail;
3142}
3143
3144static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3145{
3146 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3147 struct clk *clk = NULL;
3148 void __iomem *virtbase = NULL;
3149 struct resource *res = NULL;
3150 struct d40_base *base = NULL;
3151 int num_log_chans = 0;
3152 int num_phy_chans;
3153 int num_memcpy_chans;
3154 int clk_ret = -EINVAL;
3155 int i;
3156 u32 pid;
3157 u32 cid;
3158 u8 rev;
3159
3160 clk = clk_get(&pdev->dev, NULL);
3161 if (IS_ERR(clk)) {
3162 d40_err(&pdev->dev, "No matching clock found\n");
3163 goto failure;
3164 }
3165
3166 clk_ret = clk_prepare_enable(clk);
3167 if (clk_ret) {
3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3169 goto failure;
3170 }
3171
3172
3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3174 if (!res)
3175 goto failure;
3176
3177 if (request_mem_region(res->start, resource_size(res),
3178 D40_NAME " I/O base") == NULL)
3179 goto failure;
3180
3181 virtbase = ioremap(res->start, resource_size(res));
3182 if (!virtbase)
3183 goto failure;
3184
3185
3186 for (pid = 0, i = 0; i < 4; i++)
3187 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3188 & 255) << (i * 8);
3189 for (cid = 0, i = 0; i < 4; i++)
3190 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3191 & 255) << (i * 8);
3192
3193 if (cid != AMBA_CID) {
3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3195 goto failure;
3196 }
3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3199 AMBA_MANF_BITS(pid),
3200 AMBA_VENDOR_ST);
3201 goto failure;
3202 }
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 rev = AMBA_REV_BITS(pid);
3213 if (rev < 2) {
3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3215 goto failure;
3216 }
3217
3218
3219 if (plat_data->num_of_phy_chans)
3220 num_phy_chans = plat_data->num_of_phy_chans;
3221 else
3222 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3223
3224
3225 if (plat_data->num_of_memcpy_chans)
3226 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3227 else
3228 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3229
3230 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3231
3232 dev_info(&pdev->dev,
3233 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3234 rev, &res->start, num_phy_chans, num_log_chans);
3235
3236 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3237 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3238 sizeof(struct d40_chan), GFP_KERNEL);
3239
3240 if (base == NULL) {
3241 d40_err(&pdev->dev, "Out of memory\n");
3242 goto failure;
3243 }
3244
3245 base->rev = rev;
3246 base->clk = clk;
3247 base->num_memcpy_chans = num_memcpy_chans;
3248 base->num_phy_chans = num_phy_chans;
3249 base->num_log_chans = num_log_chans;
3250 base->phy_start = res->start;
3251 base->phy_size = resource_size(res);
3252 base->virtbase = virtbase;
3253 base->plat_data = plat_data;
3254 base->dev = &pdev->dev;
3255 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3256 base->log_chans = &base->phy_chans[num_phy_chans];
3257
3258 if (base->plat_data->num_of_phy_chans == 14) {
3259 base->gen_dmac.backup = d40_backup_regs_v4b;
3260 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3261 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3262 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3263 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3264 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3265 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3266 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3267 base->gen_dmac.il = il_v4b;
3268 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3269 base->gen_dmac.init_reg = dma_init_reg_v4b;
3270 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3271 } else {
3272 if (base->rev >= 3) {
3273 base->gen_dmac.backup = d40_backup_regs_v4a;
3274 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3275 }
3276 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3277 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3278 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3279 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3280 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3281 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3282 base->gen_dmac.il = il_v4a;
3283 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3284 base->gen_dmac.init_reg = dma_init_reg_v4a;
3285 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3286 }
3287
3288 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3289 GFP_KERNEL);
3290 if (!base->phy_res)
3291 goto failure;
3292
3293 base->lookup_phy_chans = kzalloc(num_phy_chans *
3294 sizeof(struct d40_chan *),
3295 GFP_KERNEL);
3296 if (!base->lookup_phy_chans)
3297 goto failure;
3298
3299 base->lookup_log_chans = kzalloc(num_log_chans *
3300 sizeof(struct d40_chan *),
3301 GFP_KERNEL);
3302 if (!base->lookup_log_chans)
3303 goto failure;
3304
3305 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3306 sizeof(d40_backup_regs_chan),
3307 GFP_KERNEL);
3308 if (!base->reg_val_backup_chan)
3309 goto failure;
3310
3311 base->lcla_pool.alloc_map =
3312 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3313 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
3314 if (!base->lcla_pool.alloc_map)
3315 goto failure;
3316
3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3318 0, SLAB_HWCACHE_ALIGN,
3319 NULL);
3320 if (base->desc_slab == NULL)
3321 goto failure;
3322
3323 return base;
3324
3325failure:
3326 if (!clk_ret)
3327 clk_disable_unprepare(clk);
3328 if (!IS_ERR(clk))
3329 clk_put(clk);
3330 if (virtbase)
3331 iounmap(virtbase);
3332 if (res)
3333 release_mem_region(res->start,
3334 resource_size(res));
3335 if (virtbase)
3336 iounmap(virtbase);
3337
3338 if (base) {
3339 kfree(base->lcla_pool.alloc_map);
3340 kfree(base->reg_val_backup_chan);
3341 kfree(base->lookup_log_chans);
3342 kfree(base->lookup_phy_chans);
3343 kfree(base->phy_res);
3344 kfree(base);
3345 }
3346
3347 return NULL;
3348}
3349
3350static void __init d40_hw_init(struct d40_base *base)
3351{
3352
3353 int i;
3354 u32 prmseo[2] = {0, 0};
3355 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3356 u32 pcmis = 0;
3357 u32 pcicr = 0;
3358 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3359 u32 reg_size = base->gen_dmac.init_reg_size;
3360
3361 for (i = 0; i < reg_size; i++)
3362 writel(dma_init_reg[i].val,
3363 base->virtbase + dma_init_reg[i].reg);
3364
3365
3366 for (i = 0; i < base->num_phy_chans; i++) {
3367
3368 activeo[i % 2] = activeo[i % 2] << 2;
3369
3370 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3371 == D40_ALLOC_PHY) {
3372 activeo[i % 2] |= 3;
3373 continue;
3374 }
3375
3376
3377 pcmis = (pcmis << 1) | 1;
3378
3379
3380 pcicr = (pcicr << 1) | 1;
3381
3382
3383 prmseo[i % 2] = prmseo[i % 2] << 2;
3384 prmseo[i % 2] |= 1;
3385
3386 }
3387
3388 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3389 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3390 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3391 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3392
3393
3394 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3395
3396
3397 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3398
3399
3400 base->gen_dmac.init_reg = NULL;
3401 base->gen_dmac.init_reg_size = 0;
3402}
3403
3404static int __init d40_lcla_allocate(struct d40_base *base)
3405{
3406 struct d40_lcla_pool *pool = &base->lcla_pool;
3407 unsigned long *page_list;
3408 int i, j;
3409 int ret = 0;
3410
3411
3412
3413
3414
3415
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3417 GFP_KERNEL);
3418
3419 if (!page_list) {
3420 ret = -ENOMEM;
3421 goto failure;
3422 }
3423
3424
3425 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3426
3427 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3428 page_list[i] = __get_free_pages(GFP_KERNEL,
3429 base->lcla_pool.pages);
3430 if (!page_list[i]) {
3431
3432 d40_err(base->dev, "Failed to allocate %d pages.\n",
3433 base->lcla_pool.pages);
3434
3435 for (j = 0; j < i; j++)
3436 free_pages(page_list[j], base->lcla_pool.pages);
3437 goto failure;
3438 }
3439
3440 if ((virt_to_phys((void *)page_list[i]) &
3441 (LCLA_ALIGNMENT - 1)) == 0)
3442 break;
3443 }
3444
3445 for (j = 0; j < i; j++)
3446 free_pages(page_list[j], base->lcla_pool.pages);
3447
3448 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3449 base->lcla_pool.base = (void *)page_list[i];
3450 } else {
3451
3452
3453
3454
3455 dev_warn(base->dev,
3456 "[%s] Failed to get %d pages @ 18 bit align.\n",
3457 __func__, base->lcla_pool.pages);
3458 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3459 base->num_phy_chans +
3460 LCLA_ALIGNMENT,
3461 GFP_KERNEL);
3462 if (!base->lcla_pool.base_unaligned) {
3463 ret = -ENOMEM;
3464 goto failure;
3465 }
3466
3467 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3468 LCLA_ALIGNMENT);
3469 }
3470
3471 pool->dma_addr = dma_map_single(base->dev, pool->base,
3472 SZ_1K * base->num_phy_chans,
3473 DMA_TO_DEVICE);
3474 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3475 pool->dma_addr = 0;
3476 ret = -ENOMEM;
3477 goto failure;
3478 }
3479
3480 writel(virt_to_phys(base->lcla_pool.base),
3481 base->virtbase + D40_DREG_LCLA);
3482failure:
3483 kfree(page_list);
3484 return ret;
3485}
3486
3487static int __init d40_of_probe(struct platform_device *pdev,
3488 struct device_node *np)
3489{
3490 struct stedma40_platform_data *pdata;
3491 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3492 const __be32 *list;
3493
3494 pdata = devm_kzalloc(&pdev->dev,
3495 sizeof(struct stedma40_platform_data),
3496 GFP_KERNEL);
3497 if (!pdata)
3498 return -ENOMEM;
3499
3500
3501 of_property_read_u32(np, "dma-channels", &num_phy);
3502 if (num_phy > 0)
3503 pdata->num_of_phy_chans = num_phy;
3504
3505 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3506 num_memcpy /= sizeof(*list);
3507
3508 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3509 d40_err(&pdev->dev,
3510 "Invalid number of memcpy channels specified (%d)\n",
3511 num_memcpy);
3512 return -EINVAL;
3513 }
3514 pdata->num_of_memcpy_chans = num_memcpy;
3515
3516 of_property_read_u32_array(np, "memcpy-channels",
3517 dma40_memcpy_channels,
3518 num_memcpy);
3519
3520 list = of_get_property(np, "disabled-channels", &num_disabled);
3521 num_disabled /= sizeof(*list);
3522
3523 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3524 d40_err(&pdev->dev,
3525 "Invalid number of disabled channels specified (%d)\n",
3526 num_disabled);
3527 return -EINVAL;
3528 }
3529
3530 of_property_read_u32_array(np, "disabled-channels",
3531 pdata->disabled_channels,
3532 num_disabled);
3533 pdata->disabled_channels[num_disabled] = -1;
3534
3535 pdev->dev.platform_data = pdata;
3536
3537 return 0;
3538}
3539
3540static int __init d40_probe(struct platform_device *pdev)
3541{
3542 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3543 struct device_node *np = pdev->dev.of_node;
3544 int ret = -ENOENT;
3545 struct d40_base *base = NULL;
3546 struct resource *res = NULL;
3547 int num_reserved_chans;
3548 u32 val;
3549
3550 if (!plat_data) {
3551 if (np) {
3552 if(d40_of_probe(pdev, np)) {
3553 ret = -ENOMEM;
3554 goto failure;
3555 }
3556 } else {
3557 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3558 goto failure;
3559 }
3560 }
3561
3562 base = d40_hw_detect_init(pdev);
3563 if (!base)
3564 goto failure;
3565
3566 num_reserved_chans = d40_phy_res_init(base);
3567
3568 platform_set_drvdata(pdev, base);
3569
3570 spin_lock_init(&base->interrupt_lock);
3571 spin_lock_init(&base->execmd_lock);
3572
3573
3574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3575 if (!res) {
3576 ret = -ENOENT;
3577 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3578 goto failure;
3579 }
3580 base->lcpa_size = resource_size(res);
3581 base->phy_lcpa = res->start;
3582
3583 if (request_mem_region(res->start, resource_size(res),
3584 D40_NAME " I/O lcpa") == NULL) {
3585 ret = -EBUSY;
3586 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3587 goto failure;
3588 }
3589
3590
3591 val = readl(base->virtbase + D40_DREG_LCPA);
3592 if (res->start != val && val != 0) {
3593 dev_warn(&pdev->dev,
3594 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3595 __func__, val, &res->start);
3596 } else
3597 writel(res->start, base->virtbase + D40_DREG_LCPA);
3598
3599 base->lcpa_base = ioremap(res->start, resource_size(res));
3600 if (!base->lcpa_base) {
3601 ret = -ENOMEM;
3602 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3603 goto failure;
3604 }
3605
3606 if (base->plat_data->use_esram_lcla) {
3607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3608 "lcla_esram");
3609 if (!res) {
3610 ret = -ENOENT;
3611 d40_err(&pdev->dev,
3612 "No \"lcla_esram\" memory resource\n");
3613 goto failure;
3614 }
3615 base->lcla_pool.base = ioremap(res->start,
3616 resource_size(res));
3617 if (!base->lcla_pool.base) {
3618 ret = -ENOMEM;
3619 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3620 goto failure;
3621 }
3622 writel(res->start, base->virtbase + D40_DREG_LCLA);
3623
3624 } else {
3625 ret = d40_lcla_allocate(base);
3626 if (ret) {
3627 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3628 goto failure;
3629 }
3630 }
3631
3632 spin_lock_init(&base->lcla_pool.lock);
3633
3634 base->irq = platform_get_irq(pdev, 0);
3635
3636 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3637 if (ret) {
3638 d40_err(&pdev->dev, "No IRQ defined\n");
3639 goto failure;
3640 }
3641
3642 pm_runtime_irq_safe(base->dev);
3643 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3644 pm_runtime_use_autosuspend(base->dev);
3645 pm_runtime_enable(base->dev);
3646 pm_runtime_resume(base->dev);
3647
3648 if (base->plat_data->use_esram_lcla) {
3649
3650 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3651 if (IS_ERR(base->lcpa_regulator)) {
3652 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3653 ret = PTR_ERR(base->lcpa_regulator);
3654 base->lcpa_regulator = NULL;
3655 goto failure;
3656 }
3657
3658 ret = regulator_enable(base->lcpa_regulator);
3659 if (ret) {
3660 d40_err(&pdev->dev,
3661 "Failed to enable lcpa_regulator\n");
3662 regulator_put(base->lcpa_regulator);
3663 base->lcpa_regulator = NULL;
3664 goto failure;
3665 }
3666 }
3667
3668 base->initialized = true;
3669 ret = d40_dmaengine_init(base, num_reserved_chans);
3670 if (ret)
3671 goto failure;
3672
3673 base->dev->dma_parms = &base->dma_parms;
3674 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3675 if (ret) {
3676 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3677 goto failure;
3678 }
3679
3680 d40_hw_init(base);
3681
3682 if (np) {
3683 ret = of_dma_controller_register(np, d40_xlate, NULL);
3684 if (ret)
3685 dev_err(&pdev->dev,
3686 "could not register of_dma_controller\n");
3687 }
3688
3689 dev_info(base->dev, "initialized\n");
3690 return 0;
3691
3692failure:
3693 if (base) {
3694 if (base->desc_slab)
3695 kmem_cache_destroy(base->desc_slab);
3696 if (base->virtbase)
3697 iounmap(base->virtbase);
3698
3699 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3700 iounmap(base->lcla_pool.base);
3701 base->lcla_pool.base = NULL;
3702 }
3703
3704 if (base->lcla_pool.dma_addr)
3705 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3706 SZ_1K * base->num_phy_chans,
3707 DMA_TO_DEVICE);
3708
3709 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3710 free_pages((unsigned long)base->lcla_pool.base,
3711 base->lcla_pool.pages);
3712
3713 kfree(base->lcla_pool.base_unaligned);
3714
3715 if (base->phy_lcpa)
3716 release_mem_region(base->phy_lcpa,
3717 base->lcpa_size);
3718 if (base->phy_start)
3719 release_mem_region(base->phy_start,
3720 base->phy_size);
3721 if (base->clk) {
3722 clk_disable_unprepare(base->clk);
3723 clk_put(base->clk);
3724 }
3725
3726 if (base->lcpa_regulator) {
3727 regulator_disable(base->lcpa_regulator);
3728 regulator_put(base->lcpa_regulator);
3729 }
3730
3731 kfree(base->lcla_pool.alloc_map);
3732 kfree(base->lookup_log_chans);
3733 kfree(base->lookup_phy_chans);
3734 kfree(base->phy_res);
3735 kfree(base);
3736 }
3737
3738 d40_err(&pdev->dev, "probe failed\n");
3739 return ret;
3740}
3741
3742static const struct of_device_id d40_match[] = {
3743 { .compatible = "stericsson,dma40", },
3744 {}
3745};
3746
3747static struct platform_driver d40_driver = {
3748 .driver = {
3749 .owner = THIS_MODULE,
3750 .name = D40_NAME,
3751 .pm = DMA40_PM_OPS,
3752 .of_match_table = d40_match,
3753 },
3754};
3755
3756static int __init stedma40_init(void)
3757{
3758 return platform_driver_probe(&d40_driver, d40_probe);
3759}
3760subsys_initcall(stedma40_init);
3761