1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/log2.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/amba/bus.h>
24#include <linux/regulator/consumer.h>
25#include <linux/platform_data/dma-ste-dma40.h>
26
27#include "dmaengine.h"
28#include "ste_dma40_ll.h"
29
30#define D40_NAME "dma40"
31
32#define D40_PHY_CHAN -1
33
34
35#define D40_CHAN_POS(chan) (2 * (chan / 2))
36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39#define D40_SUSPEND_MAX_IT 500
40
41
42#define DMA40_AUTOSUSPEND_DELAY 100
43
44
45#define LCLA_ALIGNMENT 0x40000
46
47
48#define D40_LCLA_LINK_PER_EVENT_GRP 128
49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52#define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55#define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58#define D40_ALLOC_FREE BIT(31)
59#define D40_ALLOC_PHY BIT(30)
60#define D40_ALLOC_LOG_FREE 0
61
62#define D40_MEMCPY_MAX_CHANS 8
63
64
65#define DB8500_DMA_MEMCPY_EV_0 51
66#define DB8500_DMA_MEMCPY_EV_1 56
67#define DB8500_DMA_MEMCPY_EV_2 57
68#define DB8500_DMA_MEMCPY_EV_3 58
69#define DB8500_DMA_MEMCPY_EV_4 59
70#define DB8500_DMA_MEMCPY_EV_5 60
71
72static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79};
80
81
82static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93};
94
95
96static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107};
108
109
110
111
112
113
114
115
116
117enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122};
123
124
125
126
127
128
129
130
131
132
133enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138};
139
140
141
142
143
144
145static u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152};
153
154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185};
186
187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210};
211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214static u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223};
224
225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242};
243
244
245static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256};
257
258static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271};
272
273
274
275
276
277
278
279struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282};
283
284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301};
302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322};
323
324
325
326
327
328
329
330
331
332
333
334
335struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341};
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378};
379
380
381
382
383
384
385
386
387
388
389
390
391struct d40_lcla_pool {
392 void *base;
393 dma_addr_t dma_addr;
394 void *base_unaligned;
395 int pages;
396 spinlock_t lock;
397 struct d40_desc **alloc_map;
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct d40_phy_res {
415 spinlock_t lock;
416 bool reserved;
417 int num;
418 u32 allocated_src;
419 u32 allocated_dst;
420 bool use_soft_lli;
421};
422
423struct d40_base;
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct d40_chan {
457 spinlock_t lock;
458 int log_num;
459 int pending_tx;
460 bool busy;
461 struct d40_phy_res *phy_chan;
462 struct dma_chan chan;
463 struct tasklet_struct tasklet;
464 struct list_head client;
465 struct list_head pending_queue;
466 struct list_head active;
467 struct list_head done;
468 struct list_head queue;
469 struct list_head prepare_queue;
470 struct stedma40_chan_cfg dma_cfg;
471 bool configured;
472 struct d40_base *base;
473
474 u32 src_def_cfg;
475 u32 dst_def_cfg;
476 struct d40_def_lcsp log_def;
477 struct d40_log_lli_full *lcpa;
478
479 dma_addr_t runtime_addr;
480 enum dma_transfer_direction runtime_direction;
481};
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct d40_gen_dmac {
501 u32 *backup;
502 u32 backup_size;
503 u32 realtime_en;
504 u32 realtime_clear;
505 u32 high_prio_en;
506 u32 high_prio_clear;
507 u32 interrupt_en;
508 u32 interrupt_clear;
509 struct d40_interrupt_lookup *il;
510 u32 il_size;
511 struct d40_reg_val *init_reg;
512 u32 init_reg_size;
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562struct d40_base {
563 spinlock_t interrupt_lock;
564 spinlock_t execmd_lock;
565 struct device *dev;
566 void __iomem *virtbase;
567 u8 rev:4;
568 struct clk *clk;
569 phys_addr_t phy_start;
570 resource_size_t phy_size;
571 int irq;
572 int num_memcpy_chans;
573 int num_phy_chans;
574 int num_log_chans;
575 struct device_dma_parameters dma_parms;
576 struct dma_device dma_both;
577 struct dma_device dma_slave;
578 struct dma_device dma_memcpy;
579 struct d40_chan *phy_chans;
580 struct d40_chan *log_chans;
581 struct d40_chan **lookup_log_chans;
582 struct d40_chan **lookup_phy_chans;
583 struct stedma40_platform_data *plat_data;
584 struct regulator *lcpa_regulator;
585
586 struct d40_phy_res *phy_res;
587 struct d40_lcla_pool lcla_pool;
588 void *lcpa_base;
589 dma_addr_t phy_lcpa;
590 resource_size_t lcpa_size;
591 struct kmem_cache *desc_slab;
592 u32 reg_val_backup[BACKUP_REGS_SZ];
593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
594 u32 *reg_val_backup_chan;
595 u16 gcc_pwr_off_mask;
596 struct d40_gen_dmac gen_dmac;
597};
598
599static struct device *chan2dev(struct d40_chan *d40c)
600{
601 return &d40c->chan.dev->device;
602}
603
604static bool chan_is_physical(struct d40_chan *chan)
605{
606 return chan->log_num == D40_PHY_CHAN;
607}
608
609static bool chan_is_logical(struct d40_chan *chan)
610{
611 return !chan_is_physical(chan);
612}
613
614static void __iomem *chan_base(struct d40_chan *chan)
615{
616 return chan->base->virtbase + D40_DREG_PCBASE +
617 chan->phy_chan->num * D40_DREG_PCDELTA;
618}
619
620#define d40_err(dev, format, arg...) \
621 dev_err(dev, "[%s] " format, __func__, ## arg)
622
623#define chan_err(d40c, format, arg...) \
624 d40_err(chan2dev(d40c), format, ## arg)
625
626static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
627 int lli_len)
628{
629 bool is_log = chan_is_logical(d40c);
630 u32 align;
631 void *base;
632
633 if (is_log)
634 align = sizeof(struct d40_log_lli);
635 else
636 align = sizeof(struct d40_phy_lli);
637
638 if (lli_len == 1) {
639 base = d40d->lli_pool.pre_alloc_lli;
640 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
641 d40d->lli_pool.base = NULL;
642 } else {
643 d40d->lli_pool.size = lli_len * 2 * align;
644
645 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
646 d40d->lli_pool.base = base;
647
648 if (d40d->lli_pool.base == NULL)
649 return -ENOMEM;
650 }
651
652 if (is_log) {
653 d40d->lli_log.src = PTR_ALIGN(base, align);
654 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
655
656 d40d->lli_pool.dma_addr = 0;
657 } else {
658 d40d->lli_phy.src = PTR_ALIGN(base, align);
659 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
660
661 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
662 d40d->lli_phy.src,
663 d40d->lli_pool.size,
664 DMA_TO_DEVICE);
665
666 if (dma_mapping_error(d40c->base->dev,
667 d40d->lli_pool.dma_addr)) {
668 kfree(d40d->lli_pool.base);
669 d40d->lli_pool.base = NULL;
670 d40d->lli_pool.dma_addr = 0;
671 return -ENOMEM;
672 }
673 }
674
675 return 0;
676}
677
678static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
679{
680 if (d40d->lli_pool.dma_addr)
681 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
682 d40d->lli_pool.size, DMA_TO_DEVICE);
683
684 kfree(d40d->lli_pool.base);
685 d40d->lli_pool.base = NULL;
686 d40d->lli_pool.size = 0;
687 d40d->lli_log.src = NULL;
688 d40d->lli_log.dst = NULL;
689 d40d->lli_phy.src = NULL;
690 d40d->lli_phy.dst = NULL;
691}
692
693static int d40_lcla_alloc_one(struct d40_chan *d40c,
694 struct d40_desc *d40d)
695{
696 unsigned long flags;
697 int i;
698 int ret = -EINVAL;
699
700 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
701
702
703
704
705
706 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
707 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
708
709 if (!d40c->base->lcla_pool.alloc_map[idx]) {
710 d40c->base->lcla_pool.alloc_map[idx] = d40d;
711 d40d->lcla_alloc++;
712 ret = i;
713 break;
714 }
715 }
716
717 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
718
719 return ret;
720}
721
722static int d40_lcla_free_all(struct d40_chan *d40c,
723 struct d40_desc *d40d)
724{
725 unsigned long flags;
726 int i;
727 int ret = -EINVAL;
728
729 if (chan_is_physical(d40c))
730 return 0;
731
732 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
733
734 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
735 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
736
737 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
738 d40c->base->lcla_pool.alloc_map[idx] = NULL;
739 d40d->lcla_alloc--;
740 if (d40d->lcla_alloc == 0) {
741 ret = 0;
742 break;
743 }
744 }
745 }
746
747 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
748
749 return ret;
750
751}
752
753static void d40_desc_remove(struct d40_desc *d40d)
754{
755 list_del(&d40d->node);
756}
757
758static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
759{
760 struct d40_desc *desc = NULL;
761
762 if (!list_empty(&d40c->client)) {
763 struct d40_desc *d;
764 struct d40_desc *_d;
765
766 list_for_each_entry_safe(d, _d, &d40c->client, node) {
767 if (async_tx_test_ack(&d->txd)) {
768 d40_desc_remove(d);
769 desc = d;
770 memset(desc, 0, sizeof(*desc));
771 break;
772 }
773 }
774 }
775
776 if (!desc)
777 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
778
779 if (desc)
780 INIT_LIST_HEAD(&desc->node);
781
782 return desc;
783}
784
785static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
786{
787
788 d40_pool_lli_free(d40c, d40d);
789 d40_lcla_free_all(d40c, d40d);
790 kmem_cache_free(d40c->base->desc_slab, d40d);
791}
792
793static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
794{
795 list_add_tail(&desc->node, &d40c->active);
796}
797
798static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
799{
800 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
801 struct d40_phy_lli *lli_src = desc->lli_phy.src;
802 void __iomem *base = chan_base(chan);
803
804 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
805 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
806 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
807 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
808
809 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
810 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
811 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
812 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
813}
814
815static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
816{
817 list_add_tail(&desc->node, &d40c->done);
818}
819
820static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
821{
822 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
823 struct d40_log_lli_bidir *lli = &desc->lli_log;
824 int lli_current = desc->lli_current;
825 int lli_len = desc->lli_len;
826 bool cyclic = desc->cyclic;
827 int curr_lcla = -EINVAL;
828 int first_lcla = 0;
829 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
830 bool linkback;
831
832
833
834
835
836 linkback = cyclic && lli_current == 0;
837
838
839
840
841
842 if (linkback || (lli_len - lli_current > 1)) {
843
844
845
846
847
848
849 if (!(chan->phy_chan->use_soft_lli &&
850 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
851 curr_lcla = d40_lcla_alloc_one(chan, desc);
852
853 first_lcla = curr_lcla;
854 }
855
856
857
858
859
860
861
862 if (!linkback || curr_lcla == -EINVAL) {
863 unsigned int flags = 0;
864
865 if (curr_lcla == -EINVAL)
866 flags |= LLI_TERM_INT;
867
868 d40_log_lli_lcpa_write(chan->lcpa,
869 &lli->dst[lli_current],
870 &lli->src[lli_current],
871 curr_lcla,
872 flags);
873 lli_current++;
874 }
875
876 if (curr_lcla < 0)
877 goto set_current;
878
879 for (; lli_current < lli_len; lli_current++) {
880 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
881 8 * curr_lcla * 2;
882 struct d40_log_lli *lcla = pool->base + lcla_offset;
883 unsigned int flags = 0;
884 int next_lcla;
885
886 if (lli_current + 1 < lli_len)
887 next_lcla = d40_lcla_alloc_one(chan, desc);
888 else
889 next_lcla = linkback ? first_lcla : -EINVAL;
890
891 if (cyclic || next_lcla == -EINVAL)
892 flags |= LLI_TERM_INT;
893
894 if (linkback && curr_lcla == first_lcla) {
895
896 d40_log_lli_lcpa_write(chan->lcpa,
897 &lli->dst[lli_current],
898 &lli->src[lli_current],
899 next_lcla, flags);
900 }
901
902
903
904
905
906 d40_log_lli_lcla_write(lcla,
907 &lli->dst[lli_current],
908 &lli->src[lli_current],
909 next_lcla, flags);
910
911
912
913
914
915 if (!use_esram_lcla) {
916 dma_sync_single_range_for_device(chan->base->dev,
917 pool->dma_addr, lcla_offset,
918 2 * sizeof(struct d40_log_lli),
919 DMA_TO_DEVICE);
920 }
921 curr_lcla = next_lcla;
922
923 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
924 lli_current++;
925 break;
926 }
927 }
928 set_current:
929 desc->lli_current = lli_current;
930}
931
932static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
933{
934 if (chan_is_physical(d40c)) {
935 d40_phy_lli_load(d40c, d40d);
936 d40d->lli_current = d40d->lli_len;
937 } else
938 d40_log_lli_to_lcxa(d40c, d40d);
939}
940
941static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
942{
943 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
944}
945
946
947static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
948{
949 d40_desc_remove(desc);
950 desc->is_in_client_list = false;
951 list_add_tail(&desc->node, &d40c->pending_queue);
952}
953
954static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
955{
956 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
957 node);
958}
959
960static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
961{
962 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
963}
964
965static struct d40_desc *d40_first_done(struct d40_chan *d40c)
966{
967 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
968}
969
970static int d40_psize_2_burst_size(bool is_log, int psize)
971{
972 if (is_log) {
973 if (psize == STEDMA40_PSIZE_LOG_1)
974 return 1;
975 } else {
976 if (psize == STEDMA40_PSIZE_PHY_1)
977 return 1;
978 }
979
980 return 2 << psize;
981}
982
983
984
985
986
987
988
989static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
990{
991 int dmalen;
992 u32 max_w = max(data_width1, data_width2);
993 u32 min_w = min(data_width1, data_width2);
994 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
995
996 if (seg_max > STEDMA40_MAX_SEG_SIZE)
997 seg_max -= max_w;
998
999 if (!IS_ALIGNED(size, max_w))
1000 return -EINVAL;
1001
1002 if (size <= seg_max)
1003 dmalen = 1;
1004 else {
1005 dmalen = size / seg_max;
1006 if (dmalen * seg_max < size)
1007 dmalen++;
1008 }
1009 return dmalen;
1010}
1011
1012static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1013 u32 data_width1, u32 data_width2)
1014{
1015 struct scatterlist *sg;
1016 int i;
1017 int len = 0;
1018 int ret;
1019
1020 for_each_sg(sgl, sg, sg_len, i) {
1021 ret = d40_size_2_dmalen(sg_dma_len(sg),
1022 data_width1, data_width2);
1023 if (ret < 0)
1024 return ret;
1025 len += ret;
1026 }
1027 return len;
1028}
1029
1030static int __d40_execute_command_phy(struct d40_chan *d40c,
1031 enum d40_command command)
1032{
1033 u32 status;
1034 int i;
1035 void __iomem *active_reg;
1036 int ret = 0;
1037 unsigned long flags;
1038 u32 wmask;
1039
1040 if (command == D40_DMA_STOP) {
1041 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1042 if (ret)
1043 return ret;
1044 }
1045
1046 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1047
1048 if (d40c->phy_chan->num % 2 == 0)
1049 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1050 else
1051 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1052
1053 if (command == D40_DMA_SUSPEND_REQ) {
1054 status = (readl(active_reg) &
1055 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1056 D40_CHAN_POS(d40c->phy_chan->num);
1057
1058 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1059 goto unlock;
1060 }
1061
1062 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1063 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1064 active_reg);
1065
1066 if (command == D40_DMA_SUSPEND_REQ) {
1067
1068 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1069 status = (readl(active_reg) &
1070 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1071 D40_CHAN_POS(d40c->phy_chan->num);
1072
1073 cpu_relax();
1074
1075
1076
1077
1078 udelay(3);
1079
1080 if (status == D40_DMA_STOP ||
1081 status == D40_DMA_SUSPENDED)
1082 break;
1083 }
1084
1085 if (i == D40_SUSPEND_MAX_IT) {
1086 chan_err(d40c,
1087 "unable to suspend the chl %d (log: %d) status %x\n",
1088 d40c->phy_chan->num, d40c->log_num,
1089 status);
1090 dump_stack();
1091 ret = -EBUSY;
1092 }
1093
1094 }
1095 unlock:
1096 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1097 return ret;
1098}
1099
1100static void d40_term_all(struct d40_chan *d40c)
1101{
1102 struct d40_desc *d40d;
1103 struct d40_desc *_d;
1104
1105
1106 while ((d40d = d40_first_done(d40c))) {
1107 d40_desc_remove(d40d);
1108 d40_desc_free(d40c, d40d);
1109 }
1110
1111
1112 while ((d40d = d40_first_active_get(d40c))) {
1113 d40_desc_remove(d40d);
1114 d40_desc_free(d40c, d40d);
1115 }
1116
1117
1118 while ((d40d = d40_first_queued(d40c))) {
1119 d40_desc_remove(d40d);
1120 d40_desc_free(d40c, d40d);
1121 }
1122
1123
1124 while ((d40d = d40_first_pending(d40c))) {
1125 d40_desc_remove(d40d);
1126 d40_desc_free(d40c, d40d);
1127 }
1128
1129
1130 if (!list_empty(&d40c->client))
1131 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1132 d40_desc_remove(d40d);
1133 d40_desc_free(d40c, d40d);
1134 }
1135
1136
1137 if (!list_empty(&d40c->prepare_queue))
1138 list_for_each_entry_safe(d40d, _d,
1139 &d40c->prepare_queue, node) {
1140 d40_desc_remove(d40d);
1141 d40_desc_free(d40c, d40d);
1142 }
1143
1144 d40c->pending_tx = 0;
1145}
1146
1147static void __d40_config_set_event(struct d40_chan *d40c,
1148 enum d40_events event_type, u32 event,
1149 int reg)
1150{
1151 void __iomem *addr = chan_base(d40c) + reg;
1152 int tries;
1153 u32 status;
1154
1155 switch (event_type) {
1156
1157 case D40_DEACTIVATE_EVENTLINE:
1158
1159 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1160 | ~D40_EVENTLINE_MASK(event), addr);
1161 break;
1162
1163 case D40_SUSPEND_REQ_EVENTLINE:
1164 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1165 D40_EVENTLINE_POS(event);
1166
1167 if (status == D40_DEACTIVATE_EVENTLINE ||
1168 status == D40_SUSPEND_REQ_EVENTLINE)
1169 break;
1170
1171 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1172 | ~D40_EVENTLINE_MASK(event), addr);
1173
1174 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1175
1176 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1177 D40_EVENTLINE_POS(event);
1178
1179 cpu_relax();
1180
1181
1182
1183
1184 udelay(3);
1185
1186 if (status == D40_DEACTIVATE_EVENTLINE)
1187 break;
1188 }
1189
1190 if (tries == D40_SUSPEND_MAX_IT) {
1191 chan_err(d40c,
1192 "unable to stop the event_line chl %d (log: %d)"
1193 "status %x\n", d40c->phy_chan->num,
1194 d40c->log_num, status);
1195 }
1196 break;
1197
1198 case D40_ACTIVATE_EVENTLINE:
1199
1200
1201
1202
1203
1204 tries = 100;
1205 while (--tries) {
1206 writel((D40_ACTIVATE_EVENTLINE <<
1207 D40_EVENTLINE_POS(event)) |
1208 ~D40_EVENTLINE_MASK(event), addr);
1209
1210 if (readl(addr) & D40_EVENTLINE_MASK(event))
1211 break;
1212 }
1213
1214 if (tries != 99)
1215 dev_dbg(chan2dev(d40c),
1216 "[%s] workaround enable S%cLNK (%d tries)\n",
1217 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1218 100 - tries);
1219
1220 WARN_ON(!tries);
1221 break;
1222
1223 case D40_ROUND_EVENTLINE:
1224 BUG();
1225 break;
1226
1227 }
1228}
1229
1230static void d40_config_set_event(struct d40_chan *d40c,
1231 enum d40_events event_type)
1232{
1233 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1234
1235
1236 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1237 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1238 __d40_config_set_event(d40c, event_type, event,
1239 D40_CHAN_REG_SSLNK);
1240
1241 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1242 __d40_config_set_event(d40c, event_type, event,
1243 D40_CHAN_REG_SDLNK);
1244}
1245
1246static u32 d40_chan_has_events(struct d40_chan *d40c)
1247{
1248 void __iomem *chanbase = chan_base(d40c);
1249 u32 val;
1250
1251 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1252 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1253
1254 return val;
1255}
1256
1257static int
1258__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1259{
1260 unsigned long flags;
1261 int ret = 0;
1262 u32 active_status;
1263 void __iomem *active_reg;
1264
1265 if (d40c->phy_chan->num % 2 == 0)
1266 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1267 else
1268 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1269
1270
1271 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1272
1273 switch (command) {
1274 case D40_DMA_STOP:
1275 case D40_DMA_SUSPEND_REQ:
1276
1277 active_status = (readl(active_reg) &
1278 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1279 D40_CHAN_POS(d40c->phy_chan->num);
1280
1281 if (active_status == D40_DMA_RUN)
1282 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1283 else
1284 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1285
1286 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1287 ret = __d40_execute_command_phy(d40c, command);
1288
1289 break;
1290
1291 case D40_DMA_RUN:
1292
1293 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1294 ret = __d40_execute_command_phy(d40c, command);
1295 break;
1296
1297 case D40_DMA_SUSPENDED:
1298 BUG();
1299 break;
1300 }
1301
1302 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1303 return ret;
1304}
1305
1306static int d40_channel_execute_command(struct d40_chan *d40c,
1307 enum d40_command command)
1308{
1309 if (chan_is_logical(d40c))
1310 return __d40_execute_command_log(d40c, command);
1311 else
1312 return __d40_execute_command_phy(d40c, command);
1313}
1314
1315static u32 d40_get_prmo(struct d40_chan *d40c)
1316{
1317 static const unsigned int phy_map[] = {
1318 [STEDMA40_PCHAN_BASIC_MODE]
1319 = D40_DREG_PRMO_PCHAN_BASIC,
1320 [STEDMA40_PCHAN_MODULO_MODE]
1321 = D40_DREG_PRMO_PCHAN_MODULO,
1322 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1323 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1324 };
1325 static const unsigned int log_map[] = {
1326 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1327 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1328 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1329 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1330 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1331 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1332 };
1333
1334 if (chan_is_physical(d40c))
1335 return phy_map[d40c->dma_cfg.mode_opt];
1336 else
1337 return log_map[d40c->dma_cfg.mode_opt];
1338}
1339
1340static void d40_config_write(struct d40_chan *d40c)
1341{
1342 u32 addr_base;
1343 u32 var;
1344
1345
1346 addr_base = (d40c->phy_chan->num % 2) * 4;
1347
1348 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1349 D40_CHAN_POS(d40c->phy_chan->num);
1350 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1351
1352
1353 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1354
1355 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1356
1357 if (chan_is_logical(d40c)) {
1358 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1359 & D40_SREG_ELEM_LOG_LIDX_MASK;
1360 void __iomem *chanbase = chan_base(d40c);
1361
1362
1363 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1364 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1365
1366
1367 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1368 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1369
1370
1371 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1372 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1373 }
1374}
1375
1376static u32 d40_residue(struct d40_chan *d40c)
1377{
1378 u32 num_elt;
1379
1380 if (chan_is_logical(d40c))
1381 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1382 >> D40_MEM_LCSP2_ECNT_POS;
1383 else {
1384 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1385 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1386 >> D40_SREG_ELEM_PHY_ECNT_POS;
1387 }
1388
1389 return num_elt * d40c->dma_cfg.dst_info.data_width;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394 bool is_link;
1395
1396 if (chan_is_logical(d40c))
1397 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1398 else
1399 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1400 & D40_SREG_LNK_PHYS_LNK_MASK;
1401
1402 return is_link;
1403}
1404
1405static int d40_pause(struct dma_chan *chan)
1406{
1407 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1408 int res = 0;
1409 unsigned long flags;
1410
1411 if (d40c->phy_chan == NULL) {
1412 chan_err(d40c, "Channel is not allocated!\n");
1413 return -EINVAL;
1414 }
1415
1416 if (!d40c->busy)
1417 return 0;
1418
1419 spin_lock_irqsave(&d40c->lock, flags);
1420 pm_runtime_get_sync(d40c->base->dev);
1421
1422 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1423
1424 pm_runtime_mark_last_busy(d40c->base->dev);
1425 pm_runtime_put_autosuspend(d40c->base->dev);
1426 spin_unlock_irqrestore(&d40c->lock, flags);
1427 return res;
1428}
1429
1430static int d40_resume(struct dma_chan *chan)
1431{
1432 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1433 int res = 0;
1434 unsigned long flags;
1435
1436 if (d40c->phy_chan == NULL) {
1437 chan_err(d40c, "Channel is not allocated!\n");
1438 return -EINVAL;
1439 }
1440
1441 if (!d40c->busy)
1442 return 0;
1443
1444 spin_lock_irqsave(&d40c->lock, flags);
1445 pm_runtime_get_sync(d40c->base->dev);
1446
1447
1448 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1449 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1450
1451 pm_runtime_mark_last_busy(d40c->base->dev);
1452 pm_runtime_put_autosuspend(d40c->base->dev);
1453 spin_unlock_irqrestore(&d40c->lock, flags);
1454 return res;
1455}
1456
1457static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1458{
1459 struct d40_chan *d40c = container_of(tx->chan,
1460 struct d40_chan,
1461 chan);
1462 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1463 unsigned long flags;
1464 dma_cookie_t cookie;
1465
1466 spin_lock_irqsave(&d40c->lock, flags);
1467 cookie = dma_cookie_assign(tx);
1468 d40_desc_queue(d40c, d40d);
1469 spin_unlock_irqrestore(&d40c->lock, flags);
1470
1471 return cookie;
1472}
1473
1474static int d40_start(struct d40_chan *d40c)
1475{
1476 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1477}
1478
1479static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1480{
1481 struct d40_desc *d40d;
1482 int err;
1483
1484
1485 d40d = d40_first_queued(d40c);
1486
1487 if (d40d != NULL) {
1488 if (!d40c->busy) {
1489 d40c->busy = true;
1490 pm_runtime_get_sync(d40c->base->dev);
1491 }
1492
1493
1494 d40_desc_remove(d40d);
1495
1496
1497 d40_desc_submit(d40c, d40d);
1498
1499
1500 d40_desc_load(d40c, d40d);
1501
1502
1503 err = d40_start(d40c);
1504
1505 if (err)
1506 return NULL;
1507 }
1508
1509 return d40d;
1510}
1511
1512
1513static void dma_tc_handle(struct d40_chan *d40c)
1514{
1515 struct d40_desc *d40d;
1516
1517
1518 d40d = d40_first_active_get(d40c);
1519
1520 if (d40d == NULL)
1521 return;
1522
1523 if (d40d->cyclic) {
1524
1525
1526
1527
1528
1529
1530 if (d40d->lli_current < d40d->lli_len
1531 && !d40_tx_is_linked(d40c)
1532 && !d40_residue(d40c)) {
1533 d40_lcla_free_all(d40c, d40d);
1534 d40_desc_load(d40c, d40d);
1535 (void) d40_start(d40c);
1536
1537 if (d40d->lli_current == d40d->lli_len)
1538 d40d->lli_current = 0;
1539 }
1540 } else {
1541 d40_lcla_free_all(d40c, d40d);
1542
1543 if (d40d->lli_current < d40d->lli_len) {
1544 d40_desc_load(d40c, d40d);
1545
1546 (void) d40_start(d40c);
1547 return;
1548 }
1549
1550 if (d40_queue_start(d40c) == NULL) {
1551 d40c->busy = false;
1552
1553 pm_runtime_mark_last_busy(d40c->base->dev);
1554 pm_runtime_put_autosuspend(d40c->base->dev);
1555 }
1556
1557 d40_desc_remove(d40d);
1558 d40_desc_done(d40c, d40d);
1559 }
1560
1561 d40c->pending_tx++;
1562 tasklet_schedule(&d40c->tasklet);
1563
1564}
1565
1566static void dma_tasklet(unsigned long data)
1567{
1568 struct d40_chan *d40c = (struct d40_chan *) data;
1569 struct d40_desc *d40d;
1570 unsigned long flags;
1571 bool callback_active;
1572 struct dmaengine_desc_callback cb;
1573
1574 spin_lock_irqsave(&d40c->lock, flags);
1575
1576
1577 d40d = d40_first_done(d40c);
1578 if (d40d == NULL) {
1579
1580 d40d = d40_first_active_get(d40c);
1581 if (d40d == NULL || !d40d->cyclic)
1582 goto check_pending_tx;
1583 }
1584
1585 if (!d40d->cyclic)
1586 dma_cookie_complete(&d40d->txd);
1587
1588
1589
1590
1591
1592 if (d40c->pending_tx == 0) {
1593 spin_unlock_irqrestore(&d40c->lock, flags);
1594 return;
1595 }
1596
1597
1598 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1599 dmaengine_desc_get_callback(&d40d->txd, &cb);
1600
1601 if (!d40d->cyclic) {
1602 if (async_tx_test_ack(&d40d->txd)) {
1603 d40_desc_remove(d40d);
1604 d40_desc_free(d40c, d40d);
1605 } else if (!d40d->is_in_client_list) {
1606 d40_desc_remove(d40d);
1607 d40_lcla_free_all(d40c, d40d);
1608 list_add_tail(&d40d->node, &d40c->client);
1609 d40d->is_in_client_list = true;
1610 }
1611 }
1612
1613 d40c->pending_tx--;
1614
1615 if (d40c->pending_tx)
1616 tasklet_schedule(&d40c->tasklet);
1617
1618 spin_unlock_irqrestore(&d40c->lock, flags);
1619
1620 if (callback_active)
1621 dmaengine_desc_callback_invoke(&cb, NULL);
1622
1623 return;
1624 check_pending_tx:
1625
1626 if (d40c->pending_tx > 0)
1627 d40c->pending_tx--;
1628 spin_unlock_irqrestore(&d40c->lock, flags);
1629}
1630
1631static irqreturn_t d40_handle_interrupt(int irq, void *data)
1632{
1633 int i;
1634 u32 idx;
1635 u32 row;
1636 long chan = -1;
1637 struct d40_chan *d40c;
1638 unsigned long flags;
1639 struct d40_base *base = data;
1640 u32 regs[base->gen_dmac.il_size];
1641 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1642 u32 il_size = base->gen_dmac.il_size;
1643
1644 spin_lock_irqsave(&base->interrupt_lock, flags);
1645
1646
1647 for (i = 0; i < il_size; i++)
1648 regs[i] = readl(base->virtbase + il[i].src);
1649
1650 for (;;) {
1651
1652 chan = find_next_bit((unsigned long *)regs,
1653 BITS_PER_LONG * il_size, chan + 1);
1654
1655
1656 if (chan == BITS_PER_LONG * il_size)
1657 break;
1658
1659 row = chan / BITS_PER_LONG;
1660 idx = chan & (BITS_PER_LONG - 1);
1661
1662 if (il[row].offset == D40_PHY_CHAN)
1663 d40c = base->lookup_phy_chans[idx];
1664 else
1665 d40c = base->lookup_log_chans[il[row].offset + idx];
1666
1667 if (!d40c) {
1668
1669
1670
1671
1672 continue;
1673 }
1674
1675
1676 writel(BIT(idx), base->virtbase + il[row].clr);
1677
1678 spin_lock(&d40c->lock);
1679
1680 if (!il[row].is_error)
1681 dma_tc_handle(d40c);
1682 else
1683 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1684 chan, il[row].offset, idx);
1685
1686 spin_unlock(&d40c->lock);
1687 }
1688
1689 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1690
1691 return IRQ_HANDLED;
1692}
1693
1694static int d40_validate_conf(struct d40_chan *d40c,
1695 struct stedma40_chan_cfg *conf)
1696{
1697 int res = 0;
1698 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1699
1700 if (!conf->dir) {
1701 chan_err(d40c, "Invalid direction.\n");
1702 res = -EINVAL;
1703 }
1704
1705 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1706 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1707 (conf->dev_type < 0)) {
1708 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1709 res = -EINVAL;
1710 }
1711
1712 if (conf->dir == DMA_DEV_TO_DEV) {
1713
1714
1715
1716
1717 chan_err(d40c, "periph to periph not supported\n");
1718 res = -EINVAL;
1719 }
1720
1721 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1722 conf->src_info.data_width !=
1723 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1724 conf->dst_info.data_width) {
1725
1726
1727
1728
1729
1730 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1731 res = -EINVAL;
1732 }
1733
1734 return res;
1735}
1736
1737static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1738 bool is_src, int log_event_line, bool is_log,
1739 bool *first_user)
1740{
1741 unsigned long flags;
1742 spin_lock_irqsave(&phy->lock, flags);
1743
1744 *first_user = ((phy->allocated_src | phy->allocated_dst)
1745 == D40_ALLOC_FREE);
1746
1747 if (!is_log) {
1748
1749 if (phy->allocated_src == D40_ALLOC_FREE &&
1750 phy->allocated_dst == D40_ALLOC_FREE) {
1751 phy->allocated_dst = D40_ALLOC_PHY;
1752 phy->allocated_src = D40_ALLOC_PHY;
1753 goto found_unlock;
1754 } else
1755 goto not_found_unlock;
1756 }
1757
1758
1759 if (is_src) {
1760 if (phy->allocated_src == D40_ALLOC_PHY)
1761 goto not_found_unlock;
1762
1763 if (phy->allocated_src == D40_ALLOC_FREE)
1764 phy->allocated_src = D40_ALLOC_LOG_FREE;
1765
1766 if (!(phy->allocated_src & BIT(log_event_line))) {
1767 phy->allocated_src |= BIT(log_event_line);
1768 goto found_unlock;
1769 } else
1770 goto not_found_unlock;
1771 } else {
1772 if (phy->allocated_dst == D40_ALLOC_PHY)
1773 goto not_found_unlock;
1774
1775 if (phy->allocated_dst == D40_ALLOC_FREE)
1776 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1777
1778 if (!(phy->allocated_dst & BIT(log_event_line))) {
1779 phy->allocated_dst |= BIT(log_event_line);
1780 goto found_unlock;
1781 }
1782 }
1783 not_found_unlock:
1784 spin_unlock_irqrestore(&phy->lock, flags);
1785 return false;
1786 found_unlock:
1787 spin_unlock_irqrestore(&phy->lock, flags);
1788 return true;
1789}
1790
1791static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1792 int log_event_line)
1793{
1794 unsigned long flags;
1795 bool is_free = false;
1796
1797 spin_lock_irqsave(&phy->lock, flags);
1798 if (!log_event_line) {
1799 phy->allocated_dst = D40_ALLOC_FREE;
1800 phy->allocated_src = D40_ALLOC_FREE;
1801 is_free = true;
1802 goto unlock;
1803 }
1804
1805
1806 if (is_src) {
1807 phy->allocated_src &= ~BIT(log_event_line);
1808 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1809 phy->allocated_src = D40_ALLOC_FREE;
1810 } else {
1811 phy->allocated_dst &= ~BIT(log_event_line);
1812 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1813 phy->allocated_dst = D40_ALLOC_FREE;
1814 }
1815
1816 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1817 D40_ALLOC_FREE);
1818 unlock:
1819 spin_unlock_irqrestore(&phy->lock, flags);
1820
1821 return is_free;
1822}
1823
1824static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1825{
1826 int dev_type = d40c->dma_cfg.dev_type;
1827 int event_group;
1828 int event_line;
1829 struct d40_phy_res *phys;
1830 int i;
1831 int j;
1832 int log_num;
1833 int num_phy_chans;
1834 bool is_src;
1835 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1836
1837 phys = d40c->base->phy_res;
1838 num_phy_chans = d40c->base->num_phy_chans;
1839
1840 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1841 log_num = 2 * dev_type;
1842 is_src = true;
1843 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1844 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1845
1846 log_num = 2 * dev_type + 1;
1847 is_src = false;
1848 } else
1849 return -EINVAL;
1850
1851 event_group = D40_TYPE_TO_GROUP(dev_type);
1852 event_line = D40_TYPE_TO_EVENT(dev_type);
1853
1854 if (!is_log) {
1855 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1856
1857 if (d40c->dma_cfg.use_fixed_channel) {
1858 i = d40c->dma_cfg.phy_channel;
1859 if (d40_alloc_mask_set(&phys[i], is_src,
1860 0, is_log,
1861 first_phy_user))
1862 goto found_phy;
1863 } else {
1864 for (i = 0; i < num_phy_chans; i++) {
1865 if (d40_alloc_mask_set(&phys[i], is_src,
1866 0, is_log,
1867 first_phy_user))
1868 goto found_phy;
1869 }
1870 }
1871 } else
1872 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1873 int phy_num = j + event_group * 2;
1874 for (i = phy_num; i < phy_num + 2; i++) {
1875 if (d40_alloc_mask_set(&phys[i],
1876 is_src,
1877 0,
1878 is_log,
1879 first_phy_user))
1880 goto found_phy;
1881 }
1882 }
1883 return -EINVAL;
1884found_phy:
1885 d40c->phy_chan = &phys[i];
1886 d40c->log_num = D40_PHY_CHAN;
1887 goto out;
1888 }
1889 if (dev_type == -1)
1890 return -EINVAL;
1891
1892
1893 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1894 int phy_num = j + event_group * 2;
1895
1896 if (d40c->dma_cfg.use_fixed_channel) {
1897 i = d40c->dma_cfg.phy_channel;
1898
1899 if ((i != phy_num) && (i != phy_num + 1)) {
1900 dev_err(chan2dev(d40c),
1901 "invalid fixed phy channel %d\n", i);
1902 return -EINVAL;
1903 }
1904
1905 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1906 is_log, first_phy_user))
1907 goto found_log;
1908
1909 dev_err(chan2dev(d40c),
1910 "could not allocate fixed phy channel %d\n", i);
1911 return -EINVAL;
1912 }
1913
1914
1915
1916
1917
1918
1919 if (is_src) {
1920 for (i = phy_num; i < phy_num + 2; i++) {
1921 if (d40_alloc_mask_set(&phys[i], is_src,
1922 event_line, is_log,
1923 first_phy_user))
1924 goto found_log;
1925 }
1926 } else {
1927 for (i = phy_num + 1; i >= phy_num; i--) {
1928 if (d40_alloc_mask_set(&phys[i], is_src,
1929 event_line, is_log,
1930 first_phy_user))
1931 goto found_log;
1932 }
1933 }
1934 }
1935 return -EINVAL;
1936
1937found_log:
1938 d40c->phy_chan = &phys[i];
1939 d40c->log_num = log_num;
1940out:
1941
1942 if (is_log)
1943 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1944 else
1945 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1946
1947 return 0;
1948
1949}
1950
1951static int d40_config_memcpy(struct d40_chan *d40c)
1952{
1953 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1954
1955 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1956 d40c->dma_cfg = dma40_memcpy_conf_log;
1957 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1958
1959 d40_log_cfg(&d40c->dma_cfg,
1960 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1961
1962 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1963 dma_has_cap(DMA_SLAVE, cap)) {
1964 d40c->dma_cfg = dma40_memcpy_conf_phy;
1965
1966
1967 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1968
1969
1970 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1971 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1972
1973 } else {
1974 chan_err(d40c, "No memcpy\n");
1975 return -EINVAL;
1976 }
1977
1978 return 0;
1979}
1980
1981static int d40_free_dma(struct d40_chan *d40c)
1982{
1983
1984 int res = 0;
1985 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1986 struct d40_phy_res *phy = d40c->phy_chan;
1987 bool is_src;
1988
1989
1990 d40_term_all(d40c);
1991
1992 if (phy == NULL) {
1993 chan_err(d40c, "phy == null\n");
1994 return -EINVAL;
1995 }
1996
1997 if (phy->allocated_src == D40_ALLOC_FREE &&
1998 phy->allocated_dst == D40_ALLOC_FREE) {
1999 chan_err(d40c, "channel already free\n");
2000 return -EINVAL;
2001 }
2002
2003 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2004 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2005 is_src = false;
2006 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2007 is_src = true;
2008 else {
2009 chan_err(d40c, "Unknown direction\n");
2010 return -EINVAL;
2011 }
2012
2013 pm_runtime_get_sync(d40c->base->dev);
2014 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2015 if (res) {
2016 chan_err(d40c, "stop failed\n");
2017 goto mark_last_busy;
2018 }
2019
2020 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2021
2022 if (chan_is_logical(d40c))
2023 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2024 else
2025 d40c->base->lookup_phy_chans[phy->num] = NULL;
2026
2027 if (d40c->busy) {
2028 pm_runtime_mark_last_busy(d40c->base->dev);
2029 pm_runtime_put_autosuspend(d40c->base->dev);
2030 }
2031
2032 d40c->busy = false;
2033 d40c->phy_chan = NULL;
2034 d40c->configured = false;
2035 mark_last_busy:
2036 pm_runtime_mark_last_busy(d40c->base->dev);
2037 pm_runtime_put_autosuspend(d40c->base->dev);
2038 return res;
2039}
2040
2041static bool d40_is_paused(struct d40_chan *d40c)
2042{
2043 void __iomem *chanbase = chan_base(d40c);
2044 bool is_paused = false;
2045 unsigned long flags;
2046 void __iomem *active_reg;
2047 u32 status;
2048 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2049
2050 spin_lock_irqsave(&d40c->lock, flags);
2051
2052 if (chan_is_physical(d40c)) {
2053 if (d40c->phy_chan->num % 2 == 0)
2054 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2055 else
2056 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2057
2058 status = (readl(active_reg) &
2059 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2060 D40_CHAN_POS(d40c->phy_chan->num);
2061 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2062 is_paused = true;
2063 goto unlock;
2064 }
2065
2066 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2067 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2068 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2069 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2070 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2071 } else {
2072 chan_err(d40c, "Unknown direction\n");
2073 goto unlock;
2074 }
2075
2076 status = (status & D40_EVENTLINE_MASK(event)) >>
2077 D40_EVENTLINE_POS(event);
2078
2079 if (status != D40_DMA_RUN)
2080 is_paused = true;
2081 unlock:
2082 spin_unlock_irqrestore(&d40c->lock, flags);
2083 return is_paused;
2084
2085}
2086
2087static u32 stedma40_residue(struct dma_chan *chan)
2088{
2089 struct d40_chan *d40c =
2090 container_of(chan, struct d40_chan, chan);
2091 u32 bytes_left;
2092 unsigned long flags;
2093
2094 spin_lock_irqsave(&d40c->lock, flags);
2095 bytes_left = d40_residue(d40c);
2096 spin_unlock_irqrestore(&d40c->lock, flags);
2097
2098 return bytes_left;
2099}
2100
2101static int
2102d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2103 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2104 unsigned int sg_len, dma_addr_t src_dev_addr,
2105 dma_addr_t dst_dev_addr)
2106{
2107 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2108 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2109 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2110 int ret;
2111
2112 ret = d40_log_sg_to_lli(sg_src, sg_len,
2113 src_dev_addr,
2114 desc->lli_log.src,
2115 chan->log_def.lcsp1,
2116 src_info->data_width,
2117 dst_info->data_width);
2118
2119 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2120 dst_dev_addr,
2121 desc->lli_log.dst,
2122 chan->log_def.lcsp3,
2123 dst_info->data_width,
2124 src_info->data_width);
2125
2126 return ret < 0 ? ret : 0;
2127}
2128
2129static int
2130d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2131 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2132 unsigned int sg_len, dma_addr_t src_dev_addr,
2133 dma_addr_t dst_dev_addr)
2134{
2135 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2136 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2137 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2138 unsigned long flags = 0;
2139 int ret;
2140
2141 if (desc->cyclic)
2142 flags |= LLI_CYCLIC | LLI_TERM_INT;
2143
2144 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2145 desc->lli_phy.src,
2146 virt_to_phys(desc->lli_phy.src),
2147 chan->src_def_cfg,
2148 src_info, dst_info, flags);
2149
2150 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2151 desc->lli_phy.dst,
2152 virt_to_phys(desc->lli_phy.dst),
2153 chan->dst_def_cfg,
2154 dst_info, src_info, flags);
2155
2156 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2157 desc->lli_pool.size, DMA_TO_DEVICE);
2158
2159 return ret < 0 ? ret : 0;
2160}
2161
2162static struct d40_desc *
2163d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2164 unsigned int sg_len, unsigned long dma_flags)
2165{
2166 struct stedma40_chan_cfg *cfg;
2167 struct d40_desc *desc;
2168 int ret;
2169
2170 desc = d40_desc_get(chan);
2171 if (!desc)
2172 return NULL;
2173
2174 cfg = &chan->dma_cfg;
2175 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2176 cfg->dst_info.data_width);
2177 if (desc->lli_len < 0) {
2178 chan_err(chan, "Unaligned size\n");
2179 goto free_desc;
2180 }
2181
2182 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2183 if (ret < 0) {
2184 chan_err(chan, "Could not allocate lli\n");
2185 goto free_desc;
2186 }
2187
2188 desc->lli_current = 0;
2189 desc->txd.flags = dma_flags;
2190 desc->txd.tx_submit = d40_tx_submit;
2191
2192 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2193
2194 return desc;
2195 free_desc:
2196 d40_desc_free(chan, desc);
2197 return NULL;
2198}
2199
2200static struct dma_async_tx_descriptor *
2201d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2202 struct scatterlist *sg_dst, unsigned int sg_len,
2203 enum dma_transfer_direction direction, unsigned long dma_flags)
2204{
2205 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2206 dma_addr_t src_dev_addr;
2207 dma_addr_t dst_dev_addr;
2208 struct d40_desc *desc;
2209 unsigned long flags;
2210 int ret;
2211
2212 if (!chan->phy_chan) {
2213 chan_err(chan, "Cannot prepare unallocated channel\n");
2214 return NULL;
2215 }
2216
2217 spin_lock_irqsave(&chan->lock, flags);
2218
2219 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2220 if (desc == NULL)
2221 goto unlock;
2222
2223 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2224 desc->cyclic = true;
2225
2226 src_dev_addr = 0;
2227 dst_dev_addr = 0;
2228 if (direction == DMA_DEV_TO_MEM)
2229 src_dev_addr = chan->runtime_addr;
2230 else if (direction == DMA_MEM_TO_DEV)
2231 dst_dev_addr = chan->runtime_addr;
2232
2233 if (chan_is_logical(chan))
2234 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2235 sg_len, src_dev_addr, dst_dev_addr);
2236 else
2237 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2238 sg_len, src_dev_addr, dst_dev_addr);
2239
2240 if (ret) {
2241 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2242 chan_is_logical(chan) ? "log" : "phy", ret);
2243 goto free_desc;
2244 }
2245
2246
2247
2248
2249
2250 list_add_tail(&desc->node, &chan->prepare_queue);
2251
2252 spin_unlock_irqrestore(&chan->lock, flags);
2253
2254 return &desc->txd;
2255 free_desc:
2256 d40_desc_free(chan, desc);
2257 unlock:
2258 spin_unlock_irqrestore(&chan->lock, flags);
2259 return NULL;
2260}
2261
2262bool stedma40_filter(struct dma_chan *chan, void *data)
2263{
2264 struct stedma40_chan_cfg *info = data;
2265 struct d40_chan *d40c =
2266 container_of(chan, struct d40_chan, chan);
2267 int err;
2268
2269 if (data) {
2270 err = d40_validate_conf(d40c, info);
2271 if (!err)
2272 d40c->dma_cfg = *info;
2273 } else
2274 err = d40_config_memcpy(d40c);
2275
2276 if (!err)
2277 d40c->configured = true;
2278
2279 return err == 0;
2280}
2281EXPORT_SYMBOL(stedma40_filter);
2282
2283static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2284{
2285 bool realtime = d40c->dma_cfg.realtime;
2286 bool highprio = d40c->dma_cfg.high_priority;
2287 u32 rtreg;
2288 u32 event = D40_TYPE_TO_EVENT(dev_type);
2289 u32 group = D40_TYPE_TO_GROUP(dev_type);
2290 u32 bit = BIT(event);
2291 u32 prioreg;
2292 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2293
2294 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2295
2296
2297
2298
2299
2300
2301
2302
2303 if (!src && chan_is_logical(d40c))
2304 highprio = false;
2305
2306 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2307
2308
2309 if (!src)
2310 bit <<= 16;
2311
2312 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2313 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2314}
2315
2316static void d40_set_prio_realtime(struct d40_chan *d40c)
2317{
2318 if (d40c->base->rev < 3)
2319 return;
2320
2321 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2322 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2323 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2324
2325 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2326 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2327 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2328}
2329
2330#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2331#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2332#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2333#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2334#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2335
2336static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2337 struct of_dma *ofdma)
2338{
2339 struct stedma40_chan_cfg cfg;
2340 dma_cap_mask_t cap;
2341 u32 flags;
2342
2343 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2344
2345 dma_cap_zero(cap);
2346 dma_cap_set(DMA_SLAVE, cap);
2347
2348 cfg.dev_type = dma_spec->args[0];
2349 flags = dma_spec->args[2];
2350
2351 switch (D40_DT_FLAGS_MODE(flags)) {
2352 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2353 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2354 }
2355
2356 switch (D40_DT_FLAGS_DIR(flags)) {
2357 case 0:
2358 cfg.dir = DMA_MEM_TO_DEV;
2359 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2360 break;
2361 case 1:
2362 cfg.dir = DMA_DEV_TO_MEM;
2363 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2364 break;
2365 }
2366
2367 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2368 cfg.phy_channel = dma_spec->args[1];
2369 cfg.use_fixed_channel = true;
2370 }
2371
2372 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2373 cfg.high_priority = true;
2374
2375 return dma_request_channel(cap, stedma40_filter, &cfg);
2376}
2377
2378
2379static int d40_alloc_chan_resources(struct dma_chan *chan)
2380{
2381 int err;
2382 unsigned long flags;
2383 struct d40_chan *d40c =
2384 container_of(chan, struct d40_chan, chan);
2385 bool is_free_phy;
2386 spin_lock_irqsave(&d40c->lock, flags);
2387
2388 dma_cookie_init(chan);
2389
2390
2391 if (!d40c->configured) {
2392 err = d40_config_memcpy(d40c);
2393 if (err) {
2394 chan_err(d40c, "Failed to configure memcpy channel\n");
2395 goto mark_last_busy;
2396 }
2397 }
2398
2399 err = d40_allocate_channel(d40c, &is_free_phy);
2400 if (err) {
2401 chan_err(d40c, "Failed to allocate channel\n");
2402 d40c->configured = false;
2403 goto mark_last_busy;
2404 }
2405
2406 pm_runtime_get_sync(d40c->base->dev);
2407
2408 d40_set_prio_realtime(d40c);
2409
2410 if (chan_is_logical(d40c)) {
2411 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2412 d40c->lcpa = d40c->base->lcpa_base +
2413 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2414 else
2415 d40c->lcpa = d40c->base->lcpa_base +
2416 d40c->dma_cfg.dev_type *
2417 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2418
2419
2420 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2421 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2422 }
2423
2424 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2425 chan_is_logical(d40c) ? "logical" : "physical",
2426 d40c->phy_chan->num,
2427 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2428
2429
2430
2431
2432
2433
2434
2435 if (is_free_phy)
2436 d40_config_write(d40c);
2437 mark_last_busy:
2438 pm_runtime_mark_last_busy(d40c->base->dev);
2439 pm_runtime_put_autosuspend(d40c->base->dev);
2440 spin_unlock_irqrestore(&d40c->lock, flags);
2441 return err;
2442}
2443
2444static void d40_free_chan_resources(struct dma_chan *chan)
2445{
2446 struct d40_chan *d40c =
2447 container_of(chan, struct d40_chan, chan);
2448 int err;
2449 unsigned long flags;
2450
2451 if (d40c->phy_chan == NULL) {
2452 chan_err(d40c, "Cannot free unallocated channel\n");
2453 return;
2454 }
2455
2456 spin_lock_irqsave(&d40c->lock, flags);
2457
2458 err = d40_free_dma(d40c);
2459
2460 if (err)
2461 chan_err(d40c, "Failed to free channel\n");
2462 spin_unlock_irqrestore(&d40c->lock, flags);
2463}
2464
2465static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2466 dma_addr_t dst,
2467 dma_addr_t src,
2468 size_t size,
2469 unsigned long dma_flags)
2470{
2471 struct scatterlist dst_sg;
2472 struct scatterlist src_sg;
2473
2474 sg_init_table(&dst_sg, 1);
2475 sg_init_table(&src_sg, 1);
2476
2477 sg_dma_address(&dst_sg) = dst;
2478 sg_dma_address(&src_sg) = src;
2479
2480 sg_dma_len(&dst_sg) = size;
2481 sg_dma_len(&src_sg) = size;
2482
2483 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2484 DMA_MEM_TO_MEM, dma_flags);
2485}
2486
2487static struct dma_async_tx_descriptor *
2488d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2489 unsigned int sg_len, enum dma_transfer_direction direction,
2490 unsigned long dma_flags, void *context)
2491{
2492 if (!is_slave_direction(direction))
2493 return NULL;
2494
2495 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2496}
2497
2498static struct dma_async_tx_descriptor *
2499dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2500 size_t buf_len, size_t period_len,
2501 enum dma_transfer_direction direction, unsigned long flags)
2502{
2503 unsigned int periods = buf_len / period_len;
2504 struct dma_async_tx_descriptor *txd;
2505 struct scatterlist *sg;
2506 int i;
2507
2508 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2509 if (!sg)
2510 return NULL;
2511
2512 for (i = 0; i < periods; i++) {
2513 sg_dma_address(&sg[i]) = dma_addr;
2514 sg_dma_len(&sg[i]) = period_len;
2515 dma_addr += period_len;
2516 }
2517
2518 sg_chain(sg, periods + 1, sg);
2519
2520 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2521 DMA_PREP_INTERRUPT);
2522
2523 kfree(sg);
2524
2525 return txd;
2526}
2527
2528static enum dma_status d40_tx_status(struct dma_chan *chan,
2529 dma_cookie_t cookie,
2530 struct dma_tx_state *txstate)
2531{
2532 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2533 enum dma_status ret;
2534
2535 if (d40c->phy_chan == NULL) {
2536 chan_err(d40c, "Cannot read status of unallocated channel\n");
2537 return -EINVAL;
2538 }
2539
2540 ret = dma_cookie_status(chan, cookie, txstate);
2541 if (ret != DMA_COMPLETE && txstate)
2542 dma_set_residue(txstate, stedma40_residue(chan));
2543
2544 if (d40_is_paused(d40c))
2545 ret = DMA_PAUSED;
2546
2547 return ret;
2548}
2549
2550static void d40_issue_pending(struct dma_chan *chan)
2551{
2552 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2553 unsigned long flags;
2554
2555 if (d40c->phy_chan == NULL) {
2556 chan_err(d40c, "Channel is not allocated!\n");
2557 return;
2558 }
2559
2560 spin_lock_irqsave(&d40c->lock, flags);
2561
2562 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2563
2564
2565 if (!d40c->busy)
2566 (void) d40_queue_start(d40c);
2567
2568 spin_unlock_irqrestore(&d40c->lock, flags);
2569}
2570
2571static int d40_terminate_all(struct dma_chan *chan)
2572{
2573 unsigned long flags;
2574 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2575 int ret;
2576
2577 if (d40c->phy_chan == NULL) {
2578 chan_err(d40c, "Channel is not allocated!\n");
2579 return -EINVAL;
2580 }
2581
2582 spin_lock_irqsave(&d40c->lock, flags);
2583
2584 pm_runtime_get_sync(d40c->base->dev);
2585 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2586 if (ret)
2587 chan_err(d40c, "Failed to stop channel\n");
2588
2589 d40_term_all(d40c);
2590 pm_runtime_mark_last_busy(d40c->base->dev);
2591 pm_runtime_put_autosuspend(d40c->base->dev);
2592 if (d40c->busy) {
2593 pm_runtime_mark_last_busy(d40c->base->dev);
2594 pm_runtime_put_autosuspend(d40c->base->dev);
2595 }
2596 d40c->busy = false;
2597
2598 spin_unlock_irqrestore(&d40c->lock, flags);
2599 return 0;
2600}
2601
2602static int
2603dma40_config_to_halfchannel(struct d40_chan *d40c,
2604 struct stedma40_half_channel_info *info,
2605 u32 maxburst)
2606{
2607 int psize;
2608
2609 if (chan_is_logical(d40c)) {
2610 if (maxburst >= 16)
2611 psize = STEDMA40_PSIZE_LOG_16;
2612 else if (maxburst >= 8)
2613 psize = STEDMA40_PSIZE_LOG_8;
2614 else if (maxburst >= 4)
2615 psize = STEDMA40_PSIZE_LOG_4;
2616 else
2617 psize = STEDMA40_PSIZE_LOG_1;
2618 } else {
2619 if (maxburst >= 16)
2620 psize = STEDMA40_PSIZE_PHY_16;
2621 else if (maxburst >= 8)
2622 psize = STEDMA40_PSIZE_PHY_8;
2623 else if (maxburst >= 4)
2624 psize = STEDMA40_PSIZE_PHY_4;
2625 else
2626 psize = STEDMA40_PSIZE_PHY_1;
2627 }
2628
2629 info->psize = psize;
2630 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2631
2632 return 0;
2633}
2634
2635
2636static int d40_set_runtime_config(struct dma_chan *chan,
2637 struct dma_slave_config *config)
2638{
2639 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2640 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2641 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2642 dma_addr_t config_addr;
2643 u32 src_maxburst, dst_maxburst;
2644 int ret;
2645
2646 if (d40c->phy_chan == NULL) {
2647 chan_err(d40c, "Channel is not allocated!\n");
2648 return -EINVAL;
2649 }
2650
2651 src_addr_width = config->src_addr_width;
2652 src_maxburst = config->src_maxburst;
2653 dst_addr_width = config->dst_addr_width;
2654 dst_maxburst = config->dst_maxburst;
2655
2656 if (config->direction == DMA_DEV_TO_MEM) {
2657 config_addr = config->src_addr;
2658
2659 if (cfg->dir != DMA_DEV_TO_MEM)
2660 dev_dbg(d40c->base->dev,
2661 "channel was not configured for peripheral "
2662 "to memory transfer (%d) overriding\n",
2663 cfg->dir);
2664 cfg->dir = DMA_DEV_TO_MEM;
2665
2666
2667 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2668 dst_addr_width = src_addr_width;
2669 if (dst_maxburst == 0)
2670 dst_maxburst = src_maxburst;
2671
2672 } else if (config->direction == DMA_MEM_TO_DEV) {
2673 config_addr = config->dst_addr;
2674
2675 if (cfg->dir != DMA_MEM_TO_DEV)
2676 dev_dbg(d40c->base->dev,
2677 "channel was not configured for memory "
2678 "to peripheral transfer (%d) overriding\n",
2679 cfg->dir);
2680 cfg->dir = DMA_MEM_TO_DEV;
2681
2682
2683 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2684 src_addr_width = dst_addr_width;
2685 if (src_maxburst == 0)
2686 src_maxburst = dst_maxburst;
2687 } else {
2688 dev_err(d40c->base->dev,
2689 "unrecognized channel direction %d\n",
2690 config->direction);
2691 return -EINVAL;
2692 }
2693
2694 if (config_addr <= 0) {
2695 dev_err(d40c->base->dev, "no address supplied\n");
2696 return -EINVAL;
2697 }
2698
2699 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2700 dev_err(d40c->base->dev,
2701 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2702 src_maxburst,
2703 src_addr_width,
2704 dst_maxburst,
2705 dst_addr_width);
2706 return -EINVAL;
2707 }
2708
2709 if (src_maxburst > 16) {
2710 src_maxburst = 16;
2711 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2712 } else if (dst_maxburst > 16) {
2713 dst_maxburst = 16;
2714 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2715 }
2716
2717
2718 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2719 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2720 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2721 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2722 !is_power_of_2(src_addr_width) ||
2723 !is_power_of_2(dst_addr_width))
2724 return -EINVAL;
2725
2726 cfg->src_info.data_width = src_addr_width;
2727 cfg->dst_info.data_width = dst_addr_width;
2728
2729 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2730 src_maxburst);
2731 if (ret)
2732 return ret;
2733
2734 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2735 dst_maxburst);
2736 if (ret)
2737 return ret;
2738
2739
2740 if (chan_is_logical(d40c))
2741 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2742 else
2743 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2744
2745
2746 d40c->runtime_addr = config_addr;
2747 d40c->runtime_direction = config->direction;
2748 dev_dbg(d40c->base->dev,
2749 "configured channel %s for %s, data width %d/%d, "
2750 "maxburst %d/%d elements, LE, no flow control\n",
2751 dma_chan_name(chan),
2752 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2753 src_addr_width, dst_addr_width,
2754 src_maxburst, dst_maxburst);
2755
2756 return 0;
2757}
2758
2759
2760
2761static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2762 struct d40_chan *chans, int offset,
2763 int num_chans)
2764{
2765 int i = 0;
2766 struct d40_chan *d40c;
2767
2768 INIT_LIST_HEAD(&dma->channels);
2769
2770 for (i = offset; i < offset + num_chans; i++) {
2771 d40c = &chans[i];
2772 d40c->base = base;
2773 d40c->chan.device = dma;
2774
2775 spin_lock_init(&d40c->lock);
2776
2777 d40c->log_num = D40_PHY_CHAN;
2778
2779 INIT_LIST_HEAD(&d40c->done);
2780 INIT_LIST_HEAD(&d40c->active);
2781 INIT_LIST_HEAD(&d40c->queue);
2782 INIT_LIST_HEAD(&d40c->pending_queue);
2783 INIT_LIST_HEAD(&d40c->client);
2784 INIT_LIST_HEAD(&d40c->prepare_queue);
2785
2786 tasklet_init(&d40c->tasklet, dma_tasklet,
2787 (unsigned long) d40c);
2788
2789 list_add_tail(&d40c->chan.device_node,
2790 &dma->channels);
2791 }
2792}
2793
2794static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2795{
2796 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2797 dev->device_prep_slave_sg = d40_prep_slave_sg;
2798 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2799 }
2800
2801 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2802 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2803 dev->directions = BIT(DMA_MEM_TO_MEM);
2804
2805
2806
2807
2808 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2809 }
2810
2811 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2812 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2813
2814 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2815 dev->device_free_chan_resources = d40_free_chan_resources;
2816 dev->device_issue_pending = d40_issue_pending;
2817 dev->device_tx_status = d40_tx_status;
2818 dev->device_config = d40_set_runtime_config;
2819 dev->device_pause = d40_pause;
2820 dev->device_resume = d40_resume;
2821 dev->device_terminate_all = d40_terminate_all;
2822 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2823 dev->dev = base->dev;
2824}
2825
2826static int __init d40_dmaengine_init(struct d40_base *base,
2827 int num_reserved_chans)
2828{
2829 int err ;
2830
2831 d40_chan_init(base, &base->dma_slave, base->log_chans,
2832 0, base->num_log_chans);
2833
2834 dma_cap_zero(base->dma_slave.cap_mask);
2835 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2836 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2837
2838 d40_ops_init(base, &base->dma_slave);
2839
2840 err = dma_async_device_register(&base->dma_slave);
2841
2842 if (err) {
2843 d40_err(base->dev, "Failed to register slave channels\n");
2844 goto exit;
2845 }
2846
2847 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2848 base->num_log_chans, base->num_memcpy_chans);
2849
2850 dma_cap_zero(base->dma_memcpy.cap_mask);
2851 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2852
2853 d40_ops_init(base, &base->dma_memcpy);
2854
2855 err = dma_async_device_register(&base->dma_memcpy);
2856
2857 if (err) {
2858 d40_err(base->dev,
2859 "Failed to register memcpy only channels\n");
2860 goto unregister_slave;
2861 }
2862
2863 d40_chan_init(base, &base->dma_both, base->phy_chans,
2864 0, num_reserved_chans);
2865
2866 dma_cap_zero(base->dma_both.cap_mask);
2867 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2868 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2869 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2870
2871 d40_ops_init(base, &base->dma_both);
2872 err = dma_async_device_register(&base->dma_both);
2873
2874 if (err) {
2875 d40_err(base->dev,
2876 "Failed to register logical and physical capable channels\n");
2877 goto unregister_memcpy;
2878 }
2879 return 0;
2880 unregister_memcpy:
2881 dma_async_device_unregister(&base->dma_memcpy);
2882 unregister_slave:
2883 dma_async_device_unregister(&base->dma_slave);
2884 exit:
2885 return err;
2886}
2887
2888
2889#ifdef CONFIG_PM_SLEEP
2890static int dma40_suspend(struct device *dev)
2891{
2892 struct d40_base *base = dev_get_drvdata(dev);
2893 int ret;
2894
2895 ret = pm_runtime_force_suspend(dev);
2896 if (ret)
2897 return ret;
2898
2899 if (base->lcpa_regulator)
2900 ret = regulator_disable(base->lcpa_regulator);
2901 return ret;
2902}
2903
2904static int dma40_resume(struct device *dev)
2905{
2906 struct d40_base *base = dev_get_drvdata(dev);
2907 int ret = 0;
2908
2909 if (base->lcpa_regulator) {
2910 ret = regulator_enable(base->lcpa_regulator);
2911 if (ret)
2912 return ret;
2913 }
2914
2915 return pm_runtime_force_resume(dev);
2916}
2917#endif
2918
2919#ifdef CONFIG_PM
2920static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2921 u32 *regaddr, int num, bool save)
2922{
2923 int i;
2924
2925 for (i = 0; i < num; i++) {
2926 void __iomem *addr = baseaddr + regaddr[i];
2927
2928 if (save)
2929 backup[i] = readl_relaxed(addr);
2930 else
2931 writel_relaxed(backup[i], addr);
2932 }
2933}
2934
2935static void d40_save_restore_registers(struct d40_base *base, bool save)
2936{
2937 int i;
2938
2939
2940 for (i = 0; i < base->num_phy_chans; i++) {
2941 void __iomem *addr;
2942 int idx;
2943
2944 if (base->phy_res[i].reserved)
2945 continue;
2946
2947 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2948 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2949
2950 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2951 d40_backup_regs_chan,
2952 ARRAY_SIZE(d40_backup_regs_chan),
2953 save);
2954 }
2955
2956
2957 dma40_backup(base->virtbase, base->reg_val_backup,
2958 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2959 save);
2960
2961
2962 if (base->gen_dmac.backup)
2963 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2964 base->gen_dmac.backup,
2965 base->gen_dmac.backup_size,
2966 save);
2967}
2968
2969static int dma40_runtime_suspend(struct device *dev)
2970{
2971 struct d40_base *base = dev_get_drvdata(dev);
2972
2973 d40_save_restore_registers(base, true);
2974
2975
2976 if (base->rev != 1)
2977 writel_relaxed(base->gcc_pwr_off_mask,
2978 base->virtbase + D40_DREG_GCC);
2979
2980 return 0;
2981}
2982
2983static int dma40_runtime_resume(struct device *dev)
2984{
2985 struct d40_base *base = dev_get_drvdata(dev);
2986
2987 d40_save_restore_registers(base, false);
2988
2989 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
2990 base->virtbase + D40_DREG_GCC);
2991 return 0;
2992}
2993#endif
2994
2995static const struct dev_pm_ops dma40_pm_ops = {
2996 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
2997 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
2998 dma40_runtime_resume,
2999 NULL)
3000};
3001
3002
3003
3004static int __init d40_phy_res_init(struct d40_base *base)
3005{
3006 int i;
3007 int num_phy_chans_avail = 0;
3008 u32 val[2];
3009 int odd_even_bit = -2;
3010 int gcc = D40_DREG_GCC_ENA;
3011
3012 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3013 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3014
3015 for (i = 0; i < base->num_phy_chans; i++) {
3016 base->phy_res[i].num = i;
3017 odd_even_bit += 2 * ((i % 2) == 0);
3018 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3019
3020 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3021 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3022 base->phy_res[i].reserved = true;
3023 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3024 D40_DREG_GCC_SRC);
3025 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3026 D40_DREG_GCC_DST);
3027
3028
3029 } else {
3030 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3031 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3032 base->phy_res[i].reserved = false;
3033 num_phy_chans_avail++;
3034 }
3035 spin_lock_init(&base->phy_res[i].lock);
3036 }
3037
3038
3039 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3040 int chan = base->plat_data->disabled_channels[i];
3041
3042 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3043 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3044 base->phy_res[chan].reserved = true;
3045 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3046 D40_DREG_GCC_SRC);
3047 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3048 D40_DREG_GCC_DST);
3049 num_phy_chans_avail--;
3050 }
3051
3052
3053 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3054 int chan = base->plat_data->soft_lli_chans[i];
3055
3056 base->phy_res[chan].use_soft_lli = true;
3057 }
3058
3059 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3060 num_phy_chans_avail, base->num_phy_chans);
3061
3062
3063 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3064
3065 for (i = 0; i < base->num_phy_chans; i++) {
3066
3067 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3068 (val[0] & 0x3) != 1)
3069 dev_info(base->dev,
3070 "[%s] INFO: channel %d is misconfigured (%d)\n",
3071 __func__, i, val[0] & 0x3);
3072
3073 val[0] = val[0] >> 2;
3074 }
3075
3076
3077
3078
3079
3080
3081
3082 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3083 base->gcc_pwr_off_mask = gcc;
3084
3085 return num_phy_chans_avail;
3086}
3087
3088static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3089{
3090 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3091 struct clk *clk;
3092 void __iomem *virtbase;
3093 struct resource *res;
3094 struct d40_base *base;
3095 int num_log_chans;
3096 int num_phy_chans;
3097 int num_memcpy_chans;
3098 int clk_ret = -EINVAL;
3099 int i;
3100 u32 pid;
3101 u32 cid;
3102 u8 rev;
3103
3104 clk = clk_get(&pdev->dev, NULL);
3105 if (IS_ERR(clk)) {
3106 d40_err(&pdev->dev, "No matching clock found\n");
3107 goto check_prepare_enabled;
3108 }
3109
3110 clk_ret = clk_prepare_enable(clk);
3111 if (clk_ret) {
3112 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3113 goto disable_unprepare;
3114 }
3115
3116
3117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3118 if (!res)
3119 goto disable_unprepare;
3120
3121 if (request_mem_region(res->start, resource_size(res),
3122 D40_NAME " I/O base") == NULL)
3123 goto release_region;
3124
3125 virtbase = ioremap(res->start, resource_size(res));
3126 if (!virtbase)
3127 goto release_region;
3128
3129
3130 for (pid = 0, i = 0; i < 4; i++)
3131 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3132 & 255) << (i * 8);
3133 for (cid = 0, i = 0; i < 4; i++)
3134 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3135 & 255) << (i * 8);
3136
3137 if (cid != AMBA_CID) {
3138 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3139 goto unmap_io;
3140 }
3141 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3142 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3143 AMBA_MANF_BITS(pid),
3144 AMBA_VENDOR_ST);
3145 goto unmap_io;
3146 }
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156 rev = AMBA_REV_BITS(pid);
3157 if (rev < 2) {
3158 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3159 goto unmap_io;
3160 }
3161
3162
3163 if (plat_data->num_of_phy_chans)
3164 num_phy_chans = plat_data->num_of_phy_chans;
3165 else
3166 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3167
3168
3169 if (plat_data->num_of_memcpy_chans)
3170 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3171 else
3172 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3173
3174 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3175
3176 dev_info(&pdev->dev,
3177 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3178 rev, &res->start, num_phy_chans, num_log_chans);
3179
3180 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3181 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3182 sizeof(struct d40_chan), GFP_KERNEL);
3183
3184 if (base == NULL)
3185 goto unmap_io;
3186
3187 base->rev = rev;
3188 base->clk = clk;
3189 base->num_memcpy_chans = num_memcpy_chans;
3190 base->num_phy_chans = num_phy_chans;
3191 base->num_log_chans = num_log_chans;
3192 base->phy_start = res->start;
3193 base->phy_size = resource_size(res);
3194 base->virtbase = virtbase;
3195 base->plat_data = plat_data;
3196 base->dev = &pdev->dev;
3197 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3198 base->log_chans = &base->phy_chans[num_phy_chans];
3199
3200 if (base->plat_data->num_of_phy_chans == 14) {
3201 base->gen_dmac.backup = d40_backup_regs_v4b;
3202 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3203 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3204 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3205 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3206 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3207 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3208 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3209 base->gen_dmac.il = il_v4b;
3210 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3211 base->gen_dmac.init_reg = dma_init_reg_v4b;
3212 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3213 } else {
3214 if (base->rev >= 3) {
3215 base->gen_dmac.backup = d40_backup_regs_v4a;
3216 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3217 }
3218 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3219 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3220 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3221 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3222 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3223 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3224 base->gen_dmac.il = il_v4a;
3225 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3226 base->gen_dmac.init_reg = dma_init_reg_v4a;
3227 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3228 }
3229
3230 base->phy_res = kcalloc(num_phy_chans,
3231 sizeof(*base->phy_res),
3232 GFP_KERNEL);
3233 if (!base->phy_res)
3234 goto free_base;
3235
3236 base->lookup_phy_chans = kcalloc(num_phy_chans,
3237 sizeof(*base->lookup_phy_chans),
3238 GFP_KERNEL);
3239 if (!base->lookup_phy_chans)
3240 goto free_phy_res;
3241
3242 base->lookup_log_chans = kcalloc(num_log_chans,
3243 sizeof(*base->lookup_log_chans),
3244 GFP_KERNEL);
3245 if (!base->lookup_log_chans)
3246 goto free_phy_chans;
3247
3248 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3249 sizeof(d40_backup_regs_chan),
3250 GFP_KERNEL);
3251 if (!base->reg_val_backup_chan)
3252 goto free_log_chans;
3253
3254 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3255 * D40_LCLA_LINK_PER_EVENT_GRP,
3256 sizeof(*base->lcla_pool.alloc_map),
3257 GFP_KERNEL);
3258 if (!base->lcla_pool.alloc_map)
3259 goto free_backup_chan;
3260
3261 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3262 0, SLAB_HWCACHE_ALIGN,
3263 NULL);
3264 if (base->desc_slab == NULL)
3265 goto free_map;
3266
3267 return base;
3268 free_map:
3269 kfree(base->lcla_pool.alloc_map);
3270 free_backup_chan:
3271 kfree(base->reg_val_backup_chan);
3272 free_log_chans:
3273 kfree(base->lookup_log_chans);
3274 free_phy_chans:
3275 kfree(base->lookup_phy_chans);
3276 free_phy_res:
3277 kfree(base->phy_res);
3278 free_base:
3279 kfree(base);
3280 unmap_io:
3281 iounmap(virtbase);
3282 release_region:
3283 release_mem_region(res->start, resource_size(res));
3284 check_prepare_enabled:
3285 if (!clk_ret)
3286 disable_unprepare:
3287 clk_disable_unprepare(clk);
3288 if (!IS_ERR(clk))
3289 clk_put(clk);
3290 return NULL;
3291}
3292
3293static void __init d40_hw_init(struct d40_base *base)
3294{
3295
3296 int i;
3297 u32 prmseo[2] = {0, 0};
3298 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3299 u32 pcmis = 0;
3300 u32 pcicr = 0;
3301 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3302 u32 reg_size = base->gen_dmac.init_reg_size;
3303
3304 for (i = 0; i < reg_size; i++)
3305 writel(dma_init_reg[i].val,
3306 base->virtbase + dma_init_reg[i].reg);
3307
3308
3309 for (i = 0; i < base->num_phy_chans; i++) {
3310
3311 activeo[i % 2] = activeo[i % 2] << 2;
3312
3313 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3314 == D40_ALLOC_PHY) {
3315 activeo[i % 2] |= 3;
3316 continue;
3317 }
3318
3319
3320 pcmis = (pcmis << 1) | 1;
3321
3322
3323 pcicr = (pcicr << 1) | 1;
3324
3325
3326 prmseo[i % 2] = prmseo[i % 2] << 2;
3327 prmseo[i % 2] |= 1;
3328
3329 }
3330
3331 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3332 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3333 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3334 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3335
3336
3337 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3338
3339
3340 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3341
3342
3343 base->gen_dmac.init_reg = NULL;
3344 base->gen_dmac.init_reg_size = 0;
3345}
3346
3347static int __init d40_lcla_allocate(struct d40_base *base)
3348{
3349 struct d40_lcla_pool *pool = &base->lcla_pool;
3350 unsigned long *page_list;
3351 int i, j;
3352 int ret;
3353
3354
3355
3356
3357
3358
3359 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3360 sizeof(*page_list),
3361 GFP_KERNEL);
3362 if (!page_list)
3363 return -ENOMEM;
3364
3365
3366 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3367
3368 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3369 page_list[i] = __get_free_pages(GFP_KERNEL,
3370 base->lcla_pool.pages);
3371 if (!page_list[i]) {
3372
3373 d40_err(base->dev, "Failed to allocate %d pages.\n",
3374 base->lcla_pool.pages);
3375 ret = -ENOMEM;
3376
3377 for (j = 0; j < i; j++)
3378 free_pages(page_list[j], base->lcla_pool.pages);
3379 goto free_page_list;
3380 }
3381
3382 if ((virt_to_phys((void *)page_list[i]) &
3383 (LCLA_ALIGNMENT - 1)) == 0)
3384 break;
3385 }
3386
3387 for (j = 0; j < i; j++)
3388 free_pages(page_list[j], base->lcla_pool.pages);
3389
3390 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3391 base->lcla_pool.base = (void *)page_list[i];
3392 } else {
3393
3394
3395
3396
3397 dev_warn(base->dev,
3398 "[%s] Failed to get %d pages @ 18 bit align.\n",
3399 __func__, base->lcla_pool.pages);
3400 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3401 base->num_phy_chans +
3402 LCLA_ALIGNMENT,
3403 GFP_KERNEL);
3404 if (!base->lcla_pool.base_unaligned) {
3405 ret = -ENOMEM;
3406 goto free_page_list;
3407 }
3408
3409 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3410 LCLA_ALIGNMENT);
3411 }
3412
3413 pool->dma_addr = dma_map_single(base->dev, pool->base,
3414 SZ_1K * base->num_phy_chans,
3415 DMA_TO_DEVICE);
3416 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3417 pool->dma_addr = 0;
3418 ret = -ENOMEM;
3419 goto free_page_list;
3420 }
3421
3422 writel(virt_to_phys(base->lcla_pool.base),
3423 base->virtbase + D40_DREG_LCLA);
3424 ret = 0;
3425 free_page_list:
3426 kfree(page_list);
3427 return ret;
3428}
3429
3430static int __init d40_of_probe(struct platform_device *pdev,
3431 struct device_node *np)
3432{
3433 struct stedma40_platform_data *pdata;
3434 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3435 const __be32 *list;
3436
3437 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3438 if (!pdata)
3439 return -ENOMEM;
3440
3441
3442 of_property_read_u32(np, "dma-channels", &num_phy);
3443 if (num_phy > 0)
3444 pdata->num_of_phy_chans = num_phy;
3445
3446 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3447 num_memcpy /= sizeof(*list);
3448
3449 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3450 d40_err(&pdev->dev,
3451 "Invalid number of memcpy channels specified (%d)\n",
3452 num_memcpy);
3453 return -EINVAL;
3454 }
3455 pdata->num_of_memcpy_chans = num_memcpy;
3456
3457 of_property_read_u32_array(np, "memcpy-channels",
3458 dma40_memcpy_channels,
3459 num_memcpy);
3460
3461 list = of_get_property(np, "disabled-channels", &num_disabled);
3462 num_disabled /= sizeof(*list);
3463
3464 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3465 d40_err(&pdev->dev,
3466 "Invalid number of disabled channels specified (%d)\n",
3467 num_disabled);
3468 return -EINVAL;
3469 }
3470
3471 of_property_read_u32_array(np, "disabled-channels",
3472 pdata->disabled_channels,
3473 num_disabled);
3474 pdata->disabled_channels[num_disabled] = -1;
3475
3476 pdev->dev.platform_data = pdata;
3477
3478 return 0;
3479}
3480
3481static int __init d40_probe(struct platform_device *pdev)
3482{
3483 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3484 struct device_node *np = pdev->dev.of_node;
3485 int ret = -ENOENT;
3486 struct d40_base *base;
3487 struct resource *res;
3488 int num_reserved_chans;
3489 u32 val;
3490
3491 if (!plat_data) {
3492 if (np) {
3493 if (d40_of_probe(pdev, np)) {
3494 ret = -ENOMEM;
3495 goto report_failure;
3496 }
3497 } else {
3498 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3499 goto report_failure;
3500 }
3501 }
3502
3503 base = d40_hw_detect_init(pdev);
3504 if (!base)
3505 goto report_failure;
3506
3507 num_reserved_chans = d40_phy_res_init(base);
3508
3509 platform_set_drvdata(pdev, base);
3510
3511 spin_lock_init(&base->interrupt_lock);
3512 spin_lock_init(&base->execmd_lock);
3513
3514
3515 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3516 if (!res) {
3517 ret = -ENOENT;
3518 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3519 goto destroy_cache;
3520 }
3521 base->lcpa_size = resource_size(res);
3522 base->phy_lcpa = res->start;
3523
3524 if (request_mem_region(res->start, resource_size(res),
3525 D40_NAME " I/O lcpa") == NULL) {
3526 ret = -EBUSY;
3527 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3528 goto destroy_cache;
3529 }
3530
3531
3532 val = readl(base->virtbase + D40_DREG_LCPA);
3533 if (res->start != val && val != 0) {
3534 dev_warn(&pdev->dev,
3535 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3536 __func__, val, &res->start);
3537 } else
3538 writel(res->start, base->virtbase + D40_DREG_LCPA);
3539
3540 base->lcpa_base = ioremap(res->start, resource_size(res));
3541 if (!base->lcpa_base) {
3542 ret = -ENOMEM;
3543 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3544 goto destroy_cache;
3545 }
3546
3547 if (base->plat_data->use_esram_lcla) {
3548 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3549 "lcla_esram");
3550 if (!res) {
3551 ret = -ENOENT;
3552 d40_err(&pdev->dev,
3553 "No \"lcla_esram\" memory resource\n");
3554 goto destroy_cache;
3555 }
3556 base->lcla_pool.base = ioremap(res->start,
3557 resource_size(res));
3558 if (!base->lcla_pool.base) {
3559 ret = -ENOMEM;
3560 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3561 goto destroy_cache;
3562 }
3563 writel(res->start, base->virtbase + D40_DREG_LCLA);
3564
3565 } else {
3566 ret = d40_lcla_allocate(base);
3567 if (ret) {
3568 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3569 goto destroy_cache;
3570 }
3571 }
3572
3573 spin_lock_init(&base->lcla_pool.lock);
3574
3575 base->irq = platform_get_irq(pdev, 0);
3576
3577 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3578 if (ret) {
3579 d40_err(&pdev->dev, "No IRQ defined\n");
3580 goto destroy_cache;
3581 }
3582
3583 if (base->plat_data->use_esram_lcla) {
3584
3585 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3586 if (IS_ERR(base->lcpa_regulator)) {
3587 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3588 ret = PTR_ERR(base->lcpa_regulator);
3589 base->lcpa_regulator = NULL;
3590 goto destroy_cache;
3591 }
3592
3593 ret = regulator_enable(base->lcpa_regulator);
3594 if (ret) {
3595 d40_err(&pdev->dev,
3596 "Failed to enable lcpa_regulator\n");
3597 regulator_put(base->lcpa_regulator);
3598 base->lcpa_regulator = NULL;
3599 goto destroy_cache;
3600 }
3601 }
3602
3603 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3604
3605 pm_runtime_irq_safe(base->dev);
3606 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3607 pm_runtime_use_autosuspend(base->dev);
3608 pm_runtime_mark_last_busy(base->dev);
3609 pm_runtime_set_active(base->dev);
3610 pm_runtime_enable(base->dev);
3611
3612 ret = d40_dmaengine_init(base, num_reserved_chans);
3613 if (ret)
3614 goto destroy_cache;
3615
3616 base->dev->dma_parms = &base->dma_parms;
3617 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3618 if (ret) {
3619 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3620 goto destroy_cache;
3621 }
3622
3623 d40_hw_init(base);
3624
3625 if (np) {
3626 ret = of_dma_controller_register(np, d40_xlate, NULL);
3627 if (ret)
3628 dev_err(&pdev->dev,
3629 "could not register of_dma_controller\n");
3630 }
3631
3632 dev_info(base->dev, "initialized\n");
3633 return 0;
3634 destroy_cache:
3635 kmem_cache_destroy(base->desc_slab);
3636 if (base->virtbase)
3637 iounmap(base->virtbase);
3638
3639 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3640 iounmap(base->lcla_pool.base);
3641 base->lcla_pool.base = NULL;
3642 }
3643
3644 if (base->lcla_pool.dma_addr)
3645 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3646 SZ_1K * base->num_phy_chans,
3647 DMA_TO_DEVICE);
3648
3649 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3650 free_pages((unsigned long)base->lcla_pool.base,
3651 base->lcla_pool.pages);
3652
3653 kfree(base->lcla_pool.base_unaligned);
3654
3655 if (base->phy_lcpa)
3656 release_mem_region(base->phy_lcpa,
3657 base->lcpa_size);
3658 if (base->phy_start)
3659 release_mem_region(base->phy_start,
3660 base->phy_size);
3661 if (base->clk) {
3662 clk_disable_unprepare(base->clk);
3663 clk_put(base->clk);
3664 }
3665
3666 if (base->lcpa_regulator) {
3667 regulator_disable(base->lcpa_regulator);
3668 regulator_put(base->lcpa_regulator);
3669 }
3670
3671 kfree(base->lcla_pool.alloc_map);
3672 kfree(base->lookup_log_chans);
3673 kfree(base->lookup_phy_chans);
3674 kfree(base->phy_res);
3675 kfree(base);
3676 report_failure:
3677 d40_err(&pdev->dev, "probe failed\n");
3678 return ret;
3679}
3680
3681static const struct of_device_id d40_match[] = {
3682 { .compatible = "stericsson,dma40", },
3683 {}
3684};
3685
3686static struct platform_driver d40_driver = {
3687 .driver = {
3688 .name = D40_NAME,
3689 .pm = &dma40_pm_ops,
3690 .of_match_table = d40_match,
3691 },
3692};
3693
3694static int __init stedma40_init(void)
3695{
3696 return platform_driver_probe(&d40_driver, d40_probe);
3697}
3698subsys_initcall(stedma40_init);
3699